file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
state.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::{MAX_COLOR_TARGETS, ColorSlot};
use core::state as s;
use core::state::{BlendValue, Comparison, CullFace, Equation,
Offset, RasterMethod, StencilOp, FrontFace};
use core::target::{ColorValue, Rect, Stencil};
use gl;
pub fn bind_raster_method(gl: &gl::Gl, method: s::RasterMethod, offset: Option<s::Offset>) {
let (gl_draw, gl_offset) = match method {
RasterMethod::Point => (gl::POINT, gl::POLYGON_OFFSET_POINT),
RasterMethod::Line(width) => {
unsafe { gl.LineWidth(width as gl::types::GLfloat) };
(gl::LINE, gl::POLYGON_OFFSET_LINE)
},
RasterMethod::Fill => (gl::FILL, gl::POLYGON_OFFSET_FILL),
};
unsafe { gl.PolygonMode(gl::FRONT_AND_BACK, gl_draw) };
match offset {
Some(Offset(factor, units)) => unsafe {
gl.Enable(gl_offset);
gl.PolygonOffset(factor as gl::types::GLfloat,
units as gl::types::GLfloat);
},
None => unsafe {
gl.Disable(gl_offset)
},
}
}
pub fn bind_rasterizer(gl: &gl::Gl, r: &s::Rasterizer, is_embedded: bool) {
unsafe {
gl.FrontFace(match r.front_face {
FrontFace::Clockwise => gl::CW,
FrontFace::CounterClockwise => gl::CCW,
})
};
match r.cull_face {
CullFace::Nothing => unsafe { gl.Disable(gl::CULL_FACE) },
CullFace::Front => { unsafe {
gl.Enable(gl::CULL_FACE);
gl.CullFace(gl::FRONT);
}},
CullFace::Back => { unsafe {
gl.Enable(gl::CULL_FACE);
gl.CullFace(gl::BACK);
}}
}
if!is_embedded {
bind_raster_method(gl, r.method, r.offset);
}
match r.samples {
Some(_) => unsafe { gl.Enable(gl::MULTISAMPLE) },
None => unsafe { gl.Disable(gl::MULTISAMPLE) },
}
}
pub fn bind_draw_color_buffers(gl: &gl::Gl, mask: usize) {
let attachments = [
gl::COLOR_ATTACHMENT0, gl::COLOR_ATTACHMENT1, gl::COLOR_ATTACHMENT2,
gl::COLOR_ATTACHMENT3, gl::COLOR_ATTACHMENT4, gl::COLOR_ATTACHMENT5,
gl::COLOR_ATTACHMENT6, gl::COLOR_ATTACHMENT7, gl::COLOR_ATTACHMENT8,
gl::COLOR_ATTACHMENT9, gl::COLOR_ATTACHMENT10, gl::COLOR_ATTACHMENT11,
gl::COLOR_ATTACHMENT12, gl::COLOR_ATTACHMENT13, gl::COLOR_ATTACHMENT14,
gl::COLOR_ATTACHMENT15];
let mut targets = [0; MAX_COLOR_TARGETS];
let mut count = 0;
let mut i = 0;
while mask >> i!= 0 {
if mask & (1<<i)!= 0 {
targets[count] = attachments[i];
count += 1;
}
i += 1;
}
unsafe { gl.DrawBuffers(count as gl::types::GLint, targets.as_ptr()) };
}
pub fn bind_viewport(gl: &gl::Gl, rect: Rect) {
unsafe { gl.Viewport(
rect.x as gl::types::GLint,
rect.y as gl::types::GLint,
rect.w as gl::types::GLint,
rect.h as gl::types::GLint
)};
}
pub fn bind_scissor(gl: &gl::Gl, rect: Option<Rect>) {
match rect {
Some(r) => { unsafe {
gl.Enable(gl::SCISSOR_TEST);
gl.Scissor(
r.x as gl::types::GLint,
r.y as gl::types::GLint,
r.w as gl::types::GLint,
r.h as gl::types::GLint
);
}},
None => unsafe { gl.Disable(gl::SCISSOR_TEST) },
}
}
pub fn map_comparison(cmp: Comparison) -> gl::types::GLenum {
match cmp {
Comparison::Never => gl::NEVER,
Comparison::Less => gl::LESS,
Comparison::LessEqual => gl::LEQUAL,
Comparison::Equal => gl::EQUAL,
Comparison::GreaterEqual => gl::GEQUAL,
Comparison::Greater => gl::GREATER,
Comparison::NotEqual => gl::NOTEQUAL,
Comparison::Always => gl::ALWAYS,
}
}
pub fn bind_depth(gl: &gl::Gl, depth: &Option<s::Depth>) {
match depth {
&Some(ref d) => { unsafe {
gl.Enable(gl::DEPTH_TEST);
gl.DepthFunc(map_comparison(d.fun));
gl.DepthMask(if d.write {gl::TRUE} else {gl::FALSE});
}},
&None => unsafe { gl.Disable(gl::DEPTH_TEST) },
}
}
fn map_operation(op: StencilOp) -> gl::types::GLenum {
match op {
StencilOp::Keep => gl::KEEP,
StencilOp::Zero => gl::ZERO,
StencilOp::Replace => gl::REPLACE,
StencilOp::IncrementClamp=> gl::INCR,
StencilOp::IncrementWrap => gl::INCR_WRAP,
StencilOp::DecrementClamp=> gl::DECR,
StencilOp::DecrementWrap => gl::DECR_WRAP,
StencilOp::Invert => gl::INVERT,
}
}
pub fn bind_stencil(gl: &gl::Gl, stencil: &Option<s::Stencil>, refs: (Stencil, Stencil), cull: s::CullFace) {
fn bind_side(gl: &gl::Gl, face: gl::types::GLenum, side: s::StencilSide, ref_value: Stencil) { unsafe {
gl.StencilFuncSeparate(face, map_comparison(side.fun),
ref_value as gl::types::GLint, side.mask_read as gl::types::GLuint);
gl.StencilMaskSeparate(face, side.mask_write as gl::types::GLuint);
gl.StencilOpSeparate(face, map_operation(side.op_fail),
map_operation(side.op_depth_fail), map_operation(side.op_pass));
}}
match stencil {
&Some(ref s) => {
unsafe { gl.Enable(gl::STENCIL_TEST) };
if cull!= CullFace::Front {
bind_side(gl, gl::FRONT, s.front, refs.0);
}
if cull!= CullFace::Back {
bind_side(gl, gl::BACK, s.back, refs.1);
}
}
&None => unsafe { gl.Disable(gl::STENCIL_TEST) },
}
}
fn map_equation(eq: Equation) -> gl::types::GLenum {
match eq {
Equation::Add => gl::FUNC_ADD,
Equation::Sub => gl::FUNC_SUBTRACT,
Equation::RevSub => gl::FUNC_REVERSE_SUBTRACT,
Equation::Min => gl::MIN,
Equation::Max => gl::MAX,
}
}
fn map_factor(factor: s::Factor) -> gl::types::GLenum {
match factor {
s::Factor::Zero => gl::ZERO,
s::Factor::One => gl::ONE,
s::Factor::ZeroPlus(BlendValue::SourceColor) => gl::SRC_COLOR,
s::Factor::OneMinus(BlendValue::SourceColor) => gl::ONE_MINUS_SRC_COLOR,
s::Factor::ZeroPlus(BlendValue::SourceAlpha) => gl::SRC_ALPHA,
s::Factor::OneMinus(BlendValue::SourceAlpha) => gl::ONE_MINUS_SRC_ALPHA,
s::Factor::ZeroPlus(BlendValue::DestColor) => gl::DST_COLOR,
s::Factor::OneMinus(BlendValue::DestColor) => gl::ONE_MINUS_DST_COLOR,
s::Factor::ZeroPlus(BlendValue::DestAlpha) => gl::DST_ALPHA,
s::Factor::OneMinus(BlendValue::DestAlpha) => gl::ONE_MINUS_DST_ALPHA,
s::Factor::ZeroPlus(BlendValue::ConstColor) => gl::CONSTANT_COLOR,
s::Factor::OneMinus(BlendValue::ConstColor) => gl::ONE_MINUS_CONSTANT_COLOR,
s::Factor::ZeroPlus(BlendValue::ConstAlpha) => gl::CONSTANT_ALPHA,
s::Factor::OneMinus(BlendValue::ConstAlpha) => gl::ONE_MINUS_CONSTANT_ALPHA,
s::Factor::SourceAlphaSaturated => gl::SRC_ALPHA_SATURATE,
}
}
pub fn bind_blend(gl: &gl::Gl, color: s::Color) {
match color.blend {
Some(b) => unsafe {
gl.Enable(gl::BLEND);
gl.BlendEquationSeparate(
map_equation(b.color.equation),
map_equation(b.alpha.equation)
);
gl.BlendFuncSeparate(
map_factor(b.color.source),
map_factor(b.color.destination),
map_factor(b.alpha.source),
map_factor(b.alpha.destination)
);
},
None => unsafe {
gl.Disable(gl::BLEND);
},
};
unsafe { gl.ColorMask(
if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::GREEN).is_empty() | else {gl::TRUE},
if (color.mask & s::BLUE ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::ALPHA).is_empty() {gl::FALSE} else {gl::TRUE}
)};
}
pub fn bind_blend_slot(gl: &gl::Gl, slot: ColorSlot, color: s::Color) {
let buf = slot as gl::types::GLuint;
match color.blend {
Some(b) => unsafe {
//Note: using ARB functions as they are more compatible
gl.Enablei(gl::BLEND, buf);
gl.BlendEquationSeparateiARB(buf,
map_equation(b.color.equation),
map_equation(b.alpha.equation)
);
gl.BlendFuncSeparateiARB(buf,
map_factor(b.color.source),
map_factor(b.color.destination),
map_factor(b.alpha.source),
map_factor(b.alpha.destination)
);
},
None => unsafe {
gl.Disablei(gl::BLEND, buf);
},
};
unsafe { gl.ColorMaski(buf,
if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::GREEN).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::BLUE ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::ALPHA).is_empty() {gl::FALSE} else {gl::TRUE}
)};
}
pub fn unlock_color_mask(gl: &gl::Gl) {
unsafe { gl.ColorMask(gl::TRUE, gl::TRUE, gl::TRUE, gl::TRUE) };
}
pub fn set_blend_color(gl: &gl::Gl, color: ColorValue) {
unsafe {
gl.BlendColor(color[0], color[1], color[2], color[3])
};
}
| {gl::FALSE} | conditional_block |
state.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::{MAX_COLOR_TARGETS, ColorSlot};
use core::state as s;
use core::state::{BlendValue, Comparison, CullFace, Equation,
Offset, RasterMethod, StencilOp, FrontFace};
use core::target::{ColorValue, Rect, Stencil};
use gl;
pub fn bind_raster_method(gl: &gl::Gl, method: s::RasterMethod, offset: Option<s::Offset>) {
let (gl_draw, gl_offset) = match method {
RasterMethod::Point => (gl::POINT, gl::POLYGON_OFFSET_POINT),
RasterMethod::Line(width) => {
unsafe { gl.LineWidth(width as gl::types::GLfloat) };
(gl::LINE, gl::POLYGON_OFFSET_LINE)
},
RasterMethod::Fill => (gl::FILL, gl::POLYGON_OFFSET_FILL),
};
unsafe { gl.PolygonMode(gl::FRONT_AND_BACK, gl_draw) };
match offset {
Some(Offset(factor, units)) => unsafe {
gl.Enable(gl_offset);
gl.PolygonOffset(factor as gl::types::GLfloat,
units as gl::types::GLfloat);
},
None => unsafe {
gl.Disable(gl_offset)
},
}
}
pub fn bind_rasterizer(gl: &gl::Gl, r: &s::Rasterizer, is_embedded: bool) {
unsafe {
gl.FrontFace(match r.front_face {
FrontFace::Clockwise => gl::CW,
FrontFace::CounterClockwise => gl::CCW,
})
};
match r.cull_face {
CullFace::Nothing => unsafe { gl.Disable(gl::CULL_FACE) },
CullFace::Front => { unsafe {
gl.Enable(gl::CULL_FACE);
gl.CullFace(gl::FRONT);
}},
CullFace::Back => { unsafe {
gl.Enable(gl::CULL_FACE);
gl.CullFace(gl::BACK);
}}
}
if!is_embedded {
bind_raster_method(gl, r.method, r.offset);
}
match r.samples {
Some(_) => unsafe { gl.Enable(gl::MULTISAMPLE) },
None => unsafe { gl.Disable(gl::MULTISAMPLE) },
}
}
pub fn bind_draw_color_buffers(gl: &gl::Gl, mask: usize) {
let attachments = [
gl::COLOR_ATTACHMENT0, gl::COLOR_ATTACHMENT1, gl::COLOR_ATTACHMENT2,
gl::COLOR_ATTACHMENT3, gl::COLOR_ATTACHMENT4, gl::COLOR_ATTACHMENT5,
gl::COLOR_ATTACHMENT6, gl::COLOR_ATTACHMENT7, gl::COLOR_ATTACHMENT8,
gl::COLOR_ATTACHMENT9, gl::COLOR_ATTACHMENT10, gl::COLOR_ATTACHMENT11,
gl::COLOR_ATTACHMENT12, gl::COLOR_ATTACHMENT13, gl::COLOR_ATTACHMENT14,
gl::COLOR_ATTACHMENT15];
let mut targets = [0; MAX_COLOR_TARGETS];
let mut count = 0;
let mut i = 0;
while mask >> i!= 0 {
if mask & (1<<i)!= 0 {
targets[count] = attachments[i];
count += 1;
}
i += 1;
}
unsafe { gl.DrawBuffers(count as gl::types::GLint, targets.as_ptr()) };
}
pub fn bind_viewport(gl: &gl::Gl, rect: Rect) {
unsafe { gl.Viewport(
rect.x as gl::types::GLint,
rect.y as gl::types::GLint,
rect.w as gl::types::GLint,
rect.h as gl::types::GLint
)};
}
pub fn bind_scissor(gl: &gl::Gl, rect: Option<Rect>) {
match rect {
Some(r) => { unsafe {
gl.Enable(gl::SCISSOR_TEST);
gl.Scissor(
r.x as gl::types::GLint,
r.y as gl::types::GLint,
r.w as gl::types::GLint,
r.h as gl::types::GLint
);
}},
None => unsafe { gl.Disable(gl::SCISSOR_TEST) },
}
}
pub fn map_comparison(cmp: Comparison) -> gl::types::GLenum {
match cmp {
Comparison::Never => gl::NEVER,
Comparison::Less => gl::LESS,
Comparison::LessEqual => gl::LEQUAL,
Comparison::Equal => gl::EQUAL,
Comparison::GreaterEqual => gl::GEQUAL,
Comparison::Greater => gl::GREATER,
Comparison::NotEqual => gl::NOTEQUAL,
Comparison::Always => gl::ALWAYS,
}
}
pub fn bind_depth(gl: &gl::Gl, depth: &Option<s::Depth>) {
match depth {
&Some(ref d) => { unsafe {
gl.Enable(gl::DEPTH_TEST);
gl.DepthFunc(map_comparison(d.fun));
gl.DepthMask(if d.write {gl::TRUE} else {gl::FALSE});
}},
&None => unsafe { gl.Disable(gl::DEPTH_TEST) },
}
}
fn map_operation(op: StencilOp) -> gl::types::GLenum {
match op {
StencilOp::Keep => gl::KEEP,
StencilOp::Zero => gl::ZERO,
StencilOp::Replace => gl::REPLACE,
StencilOp::IncrementClamp=> gl::INCR,
StencilOp::IncrementWrap => gl::INCR_WRAP,
StencilOp::DecrementClamp=> gl::DECR,
StencilOp::DecrementWrap => gl::DECR_WRAP,
StencilOp::Invert => gl::INVERT,
}
}
pub fn bind_stencil(gl: &gl::Gl, stencil: &Option<s::Stencil>, refs: (Stencil, Stencil), cull: s::CullFace) {
fn bind_side(gl: &gl::Gl, face: gl::types::GLenum, side: s::StencilSide, ref_value: Stencil) { unsafe {
gl.StencilFuncSeparate(face, map_comparison(side.fun),
ref_value as gl::types::GLint, side.mask_read as gl::types::GLuint);
gl.StencilMaskSeparate(face, side.mask_write as gl::types::GLuint);
gl.StencilOpSeparate(face, map_operation(side.op_fail),
map_operation(side.op_depth_fail), map_operation(side.op_pass));
}}
match stencil {
&Some(ref s) => {
unsafe { gl.Enable(gl::STENCIL_TEST) };
if cull!= CullFace::Front {
bind_side(gl, gl::FRONT, s.front, refs.0);
}
if cull!= CullFace::Back {
bind_side(gl, gl::BACK, s.back, refs.1);
}
}
&None => unsafe { gl.Disable(gl::STENCIL_TEST) },
}
}
fn map_equation(eq: Equation) -> gl::types::GLenum {
match eq {
Equation::Add => gl::FUNC_ADD,
Equation::Sub => gl::FUNC_SUBTRACT,
Equation::RevSub => gl::FUNC_REVERSE_SUBTRACT,
Equation::Min => gl::MIN,
Equation::Max => gl::MAX,
}
}
fn map_factor(factor: s::Factor) -> gl::types::GLenum {
match factor {
s::Factor::Zero => gl::ZERO,
s::Factor::One => gl::ONE,
s::Factor::ZeroPlus(BlendValue::SourceColor) => gl::SRC_COLOR,
s::Factor::OneMinus(BlendValue::SourceColor) => gl::ONE_MINUS_SRC_COLOR,
s::Factor::ZeroPlus(BlendValue::SourceAlpha) => gl::SRC_ALPHA,
s::Factor::OneMinus(BlendValue::SourceAlpha) => gl::ONE_MINUS_SRC_ALPHA,
s::Factor::ZeroPlus(BlendValue::DestColor) => gl::DST_COLOR,
s::Factor::OneMinus(BlendValue::DestColor) => gl::ONE_MINUS_DST_COLOR,
s::Factor::ZeroPlus(BlendValue::DestAlpha) => gl::DST_ALPHA,
s::Factor::OneMinus(BlendValue::DestAlpha) => gl::ONE_MINUS_DST_ALPHA,
s::Factor::ZeroPlus(BlendValue::ConstColor) => gl::CONSTANT_COLOR,
s::Factor::OneMinus(BlendValue::ConstColor) => gl::ONE_MINUS_CONSTANT_COLOR,
s::Factor::ZeroPlus(BlendValue::ConstAlpha) => gl::CONSTANT_ALPHA,
s::Factor::OneMinus(BlendValue::ConstAlpha) => gl::ONE_MINUS_CONSTANT_ALPHA,
s::Factor::SourceAlphaSaturated => gl::SRC_ALPHA_SATURATE,
}
}
pub fn | (gl: &gl::Gl, color: s::Color) {
match color.blend {
Some(b) => unsafe {
gl.Enable(gl::BLEND);
gl.BlendEquationSeparate(
map_equation(b.color.equation),
map_equation(b.alpha.equation)
);
gl.BlendFuncSeparate(
map_factor(b.color.source),
map_factor(b.color.destination),
map_factor(b.alpha.source),
map_factor(b.alpha.destination)
);
},
None => unsafe {
gl.Disable(gl::BLEND);
},
};
unsafe { gl.ColorMask(
if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::GREEN).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::BLUE ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::ALPHA).is_empty() {gl::FALSE} else {gl::TRUE}
)};
}
pub fn bind_blend_slot(gl: &gl::Gl, slot: ColorSlot, color: s::Color) {
let buf = slot as gl::types::GLuint;
match color.blend {
Some(b) => unsafe {
//Note: using ARB functions as they are more compatible
gl.Enablei(gl::BLEND, buf);
gl.BlendEquationSeparateiARB(buf,
map_equation(b.color.equation),
map_equation(b.alpha.equation)
);
gl.BlendFuncSeparateiARB(buf,
map_factor(b.color.source),
map_factor(b.color.destination),
map_factor(b.alpha.source),
map_factor(b.alpha.destination)
);
},
None => unsafe {
gl.Disablei(gl::BLEND, buf);
},
};
unsafe { gl.ColorMaski(buf,
if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::GREEN).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::BLUE ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::ALPHA).is_empty() {gl::FALSE} else {gl::TRUE}
)};
}
pub fn unlock_color_mask(gl: &gl::Gl) {
unsafe { gl.ColorMask(gl::TRUE, gl::TRUE, gl::TRUE, gl::TRUE) };
}
pub fn set_blend_color(gl: &gl::Gl, color: ColorValue) {
unsafe {
gl.BlendColor(color[0], color[1], color[2], color[3])
};
}
| bind_blend | identifier_name |
state.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::{MAX_COLOR_TARGETS, ColorSlot};
use core::state as s;
use core::state::{BlendValue, Comparison, CullFace, Equation,
Offset, RasterMethod, StencilOp, FrontFace};
use core::target::{ColorValue, Rect, Stencil};
use gl;
pub fn bind_raster_method(gl: &gl::Gl, method: s::RasterMethod, offset: Option<s::Offset>) {
let (gl_draw, gl_offset) = match method {
RasterMethod::Point => (gl::POINT, gl::POLYGON_OFFSET_POINT),
RasterMethod::Line(width) => {
unsafe { gl.LineWidth(width as gl::types::GLfloat) };
(gl::LINE, gl::POLYGON_OFFSET_LINE)
},
RasterMethod::Fill => (gl::FILL, gl::POLYGON_OFFSET_FILL),
};
unsafe { gl.PolygonMode(gl::FRONT_AND_BACK, gl_draw) };
match offset {
Some(Offset(factor, units)) => unsafe {
gl.Enable(gl_offset);
gl.PolygonOffset(factor as gl::types::GLfloat,
units as gl::types::GLfloat);
},
None => unsafe {
gl.Disable(gl_offset) | },
}
}
pub fn bind_rasterizer(gl: &gl::Gl, r: &s::Rasterizer, is_embedded: bool) {
unsafe {
gl.FrontFace(match r.front_face {
FrontFace::Clockwise => gl::CW,
FrontFace::CounterClockwise => gl::CCW,
})
};
match r.cull_face {
CullFace::Nothing => unsafe { gl.Disable(gl::CULL_FACE) },
CullFace::Front => { unsafe {
gl.Enable(gl::CULL_FACE);
gl.CullFace(gl::FRONT);
}},
CullFace::Back => { unsafe {
gl.Enable(gl::CULL_FACE);
gl.CullFace(gl::BACK);
}}
}
if!is_embedded {
bind_raster_method(gl, r.method, r.offset);
}
match r.samples {
Some(_) => unsafe { gl.Enable(gl::MULTISAMPLE) },
None => unsafe { gl.Disable(gl::MULTISAMPLE) },
}
}
pub fn bind_draw_color_buffers(gl: &gl::Gl, mask: usize) {
let attachments = [
gl::COLOR_ATTACHMENT0, gl::COLOR_ATTACHMENT1, gl::COLOR_ATTACHMENT2,
gl::COLOR_ATTACHMENT3, gl::COLOR_ATTACHMENT4, gl::COLOR_ATTACHMENT5,
gl::COLOR_ATTACHMENT6, gl::COLOR_ATTACHMENT7, gl::COLOR_ATTACHMENT8,
gl::COLOR_ATTACHMENT9, gl::COLOR_ATTACHMENT10, gl::COLOR_ATTACHMENT11,
gl::COLOR_ATTACHMENT12, gl::COLOR_ATTACHMENT13, gl::COLOR_ATTACHMENT14,
gl::COLOR_ATTACHMENT15];
let mut targets = [0; MAX_COLOR_TARGETS];
let mut count = 0;
let mut i = 0;
while mask >> i!= 0 {
if mask & (1<<i)!= 0 {
targets[count] = attachments[i];
count += 1;
}
i += 1;
}
unsafe { gl.DrawBuffers(count as gl::types::GLint, targets.as_ptr()) };
}
pub fn bind_viewport(gl: &gl::Gl, rect: Rect) {
unsafe { gl.Viewport(
rect.x as gl::types::GLint,
rect.y as gl::types::GLint,
rect.w as gl::types::GLint,
rect.h as gl::types::GLint
)};
}
pub fn bind_scissor(gl: &gl::Gl, rect: Option<Rect>) {
match rect {
Some(r) => { unsafe {
gl.Enable(gl::SCISSOR_TEST);
gl.Scissor(
r.x as gl::types::GLint,
r.y as gl::types::GLint,
r.w as gl::types::GLint,
r.h as gl::types::GLint
);
}},
None => unsafe { gl.Disable(gl::SCISSOR_TEST) },
}
}
pub fn map_comparison(cmp: Comparison) -> gl::types::GLenum {
match cmp {
Comparison::Never => gl::NEVER,
Comparison::Less => gl::LESS,
Comparison::LessEqual => gl::LEQUAL,
Comparison::Equal => gl::EQUAL,
Comparison::GreaterEqual => gl::GEQUAL,
Comparison::Greater => gl::GREATER,
Comparison::NotEqual => gl::NOTEQUAL,
Comparison::Always => gl::ALWAYS,
}
}
pub fn bind_depth(gl: &gl::Gl, depth: &Option<s::Depth>) {
match depth {
&Some(ref d) => { unsafe {
gl.Enable(gl::DEPTH_TEST);
gl.DepthFunc(map_comparison(d.fun));
gl.DepthMask(if d.write {gl::TRUE} else {gl::FALSE});
}},
&None => unsafe { gl.Disable(gl::DEPTH_TEST) },
}
}
fn map_operation(op: StencilOp) -> gl::types::GLenum {
match op {
StencilOp::Keep => gl::KEEP,
StencilOp::Zero => gl::ZERO,
StencilOp::Replace => gl::REPLACE,
StencilOp::IncrementClamp=> gl::INCR,
StencilOp::IncrementWrap => gl::INCR_WRAP,
StencilOp::DecrementClamp=> gl::DECR,
StencilOp::DecrementWrap => gl::DECR_WRAP,
StencilOp::Invert => gl::INVERT,
}
}
pub fn bind_stencil(gl: &gl::Gl, stencil: &Option<s::Stencil>, refs: (Stencil, Stencil), cull: s::CullFace) {
fn bind_side(gl: &gl::Gl, face: gl::types::GLenum, side: s::StencilSide, ref_value: Stencil) { unsafe {
gl.StencilFuncSeparate(face, map_comparison(side.fun),
ref_value as gl::types::GLint, side.mask_read as gl::types::GLuint);
gl.StencilMaskSeparate(face, side.mask_write as gl::types::GLuint);
gl.StencilOpSeparate(face, map_operation(side.op_fail),
map_operation(side.op_depth_fail), map_operation(side.op_pass));
}}
match stencil {
&Some(ref s) => {
unsafe { gl.Enable(gl::STENCIL_TEST) };
if cull!= CullFace::Front {
bind_side(gl, gl::FRONT, s.front, refs.0);
}
if cull!= CullFace::Back {
bind_side(gl, gl::BACK, s.back, refs.1);
}
}
&None => unsafe { gl.Disable(gl::STENCIL_TEST) },
}
}
fn map_equation(eq: Equation) -> gl::types::GLenum {
match eq {
Equation::Add => gl::FUNC_ADD,
Equation::Sub => gl::FUNC_SUBTRACT,
Equation::RevSub => gl::FUNC_REVERSE_SUBTRACT,
Equation::Min => gl::MIN,
Equation::Max => gl::MAX,
}
}
fn map_factor(factor: s::Factor) -> gl::types::GLenum {
match factor {
s::Factor::Zero => gl::ZERO,
s::Factor::One => gl::ONE,
s::Factor::ZeroPlus(BlendValue::SourceColor) => gl::SRC_COLOR,
s::Factor::OneMinus(BlendValue::SourceColor) => gl::ONE_MINUS_SRC_COLOR,
s::Factor::ZeroPlus(BlendValue::SourceAlpha) => gl::SRC_ALPHA,
s::Factor::OneMinus(BlendValue::SourceAlpha) => gl::ONE_MINUS_SRC_ALPHA,
s::Factor::ZeroPlus(BlendValue::DestColor) => gl::DST_COLOR,
s::Factor::OneMinus(BlendValue::DestColor) => gl::ONE_MINUS_DST_COLOR,
s::Factor::ZeroPlus(BlendValue::DestAlpha) => gl::DST_ALPHA,
s::Factor::OneMinus(BlendValue::DestAlpha) => gl::ONE_MINUS_DST_ALPHA,
s::Factor::ZeroPlus(BlendValue::ConstColor) => gl::CONSTANT_COLOR,
s::Factor::OneMinus(BlendValue::ConstColor) => gl::ONE_MINUS_CONSTANT_COLOR,
s::Factor::ZeroPlus(BlendValue::ConstAlpha) => gl::CONSTANT_ALPHA,
s::Factor::OneMinus(BlendValue::ConstAlpha) => gl::ONE_MINUS_CONSTANT_ALPHA,
s::Factor::SourceAlphaSaturated => gl::SRC_ALPHA_SATURATE,
}
}
pub fn bind_blend(gl: &gl::Gl, color: s::Color) {
match color.blend {
Some(b) => unsafe {
gl.Enable(gl::BLEND);
gl.BlendEquationSeparate(
map_equation(b.color.equation),
map_equation(b.alpha.equation)
);
gl.BlendFuncSeparate(
map_factor(b.color.source),
map_factor(b.color.destination),
map_factor(b.alpha.source),
map_factor(b.alpha.destination)
);
},
None => unsafe {
gl.Disable(gl::BLEND);
},
};
unsafe { gl.ColorMask(
if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::GREEN).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::BLUE ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::ALPHA).is_empty() {gl::FALSE} else {gl::TRUE}
)};
}
pub fn bind_blend_slot(gl: &gl::Gl, slot: ColorSlot, color: s::Color) {
let buf = slot as gl::types::GLuint;
match color.blend {
Some(b) => unsafe {
//Note: using ARB functions as they are more compatible
gl.Enablei(gl::BLEND, buf);
gl.BlendEquationSeparateiARB(buf,
map_equation(b.color.equation),
map_equation(b.alpha.equation)
);
gl.BlendFuncSeparateiARB(buf,
map_factor(b.color.source),
map_factor(b.color.destination),
map_factor(b.alpha.source),
map_factor(b.alpha.destination)
);
},
None => unsafe {
gl.Disablei(gl::BLEND, buf);
},
};
unsafe { gl.ColorMaski(buf,
if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::GREEN).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::BLUE ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::ALPHA).is_empty() {gl::FALSE} else {gl::TRUE}
)};
}
pub fn unlock_color_mask(gl: &gl::Gl) {
unsafe { gl.ColorMask(gl::TRUE, gl::TRUE, gl::TRUE, gl::TRUE) };
}
pub fn set_blend_color(gl: &gl::Gl, color: ColorValue) {
unsafe {
gl.BlendColor(color[0], color[1], color[2], color[3])
};
} | random_line_split |
|
state.rs | // Copyright 2014 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::{MAX_COLOR_TARGETS, ColorSlot};
use core::state as s;
use core::state::{BlendValue, Comparison, CullFace, Equation,
Offset, RasterMethod, StencilOp, FrontFace};
use core::target::{ColorValue, Rect, Stencil};
use gl;
pub fn bind_raster_method(gl: &gl::Gl, method: s::RasterMethod, offset: Option<s::Offset>) {
let (gl_draw, gl_offset) = match method {
RasterMethod::Point => (gl::POINT, gl::POLYGON_OFFSET_POINT),
RasterMethod::Line(width) => {
unsafe { gl.LineWidth(width as gl::types::GLfloat) };
(gl::LINE, gl::POLYGON_OFFSET_LINE)
},
RasterMethod::Fill => (gl::FILL, gl::POLYGON_OFFSET_FILL),
};
unsafe { gl.PolygonMode(gl::FRONT_AND_BACK, gl_draw) };
match offset {
Some(Offset(factor, units)) => unsafe {
gl.Enable(gl_offset);
gl.PolygonOffset(factor as gl::types::GLfloat,
units as gl::types::GLfloat);
},
None => unsafe {
gl.Disable(gl_offset)
},
}
}
pub fn bind_rasterizer(gl: &gl::Gl, r: &s::Rasterizer, is_embedded: bool) {
unsafe {
gl.FrontFace(match r.front_face {
FrontFace::Clockwise => gl::CW,
FrontFace::CounterClockwise => gl::CCW,
})
};
match r.cull_face {
CullFace::Nothing => unsafe { gl.Disable(gl::CULL_FACE) },
CullFace::Front => { unsafe {
gl.Enable(gl::CULL_FACE);
gl.CullFace(gl::FRONT);
}},
CullFace::Back => { unsafe {
gl.Enable(gl::CULL_FACE);
gl.CullFace(gl::BACK);
}}
}
if!is_embedded {
bind_raster_method(gl, r.method, r.offset);
}
match r.samples {
Some(_) => unsafe { gl.Enable(gl::MULTISAMPLE) },
None => unsafe { gl.Disable(gl::MULTISAMPLE) },
}
}
pub fn bind_draw_color_buffers(gl: &gl::Gl, mask: usize) {
let attachments = [
gl::COLOR_ATTACHMENT0, gl::COLOR_ATTACHMENT1, gl::COLOR_ATTACHMENT2,
gl::COLOR_ATTACHMENT3, gl::COLOR_ATTACHMENT4, gl::COLOR_ATTACHMENT5,
gl::COLOR_ATTACHMENT6, gl::COLOR_ATTACHMENT7, gl::COLOR_ATTACHMENT8,
gl::COLOR_ATTACHMENT9, gl::COLOR_ATTACHMENT10, gl::COLOR_ATTACHMENT11,
gl::COLOR_ATTACHMENT12, gl::COLOR_ATTACHMENT13, gl::COLOR_ATTACHMENT14,
gl::COLOR_ATTACHMENT15];
let mut targets = [0; MAX_COLOR_TARGETS];
let mut count = 0;
let mut i = 0;
while mask >> i!= 0 {
if mask & (1<<i)!= 0 {
targets[count] = attachments[i];
count += 1;
}
i += 1;
}
unsafe { gl.DrawBuffers(count as gl::types::GLint, targets.as_ptr()) };
}
pub fn bind_viewport(gl: &gl::Gl, rect: Rect) {
unsafe { gl.Viewport(
rect.x as gl::types::GLint,
rect.y as gl::types::GLint,
rect.w as gl::types::GLint,
rect.h as gl::types::GLint
)};
}
pub fn bind_scissor(gl: &gl::Gl, rect: Option<Rect>) {
match rect {
Some(r) => { unsafe {
gl.Enable(gl::SCISSOR_TEST);
gl.Scissor(
r.x as gl::types::GLint,
r.y as gl::types::GLint,
r.w as gl::types::GLint,
r.h as gl::types::GLint
);
}},
None => unsafe { gl.Disable(gl::SCISSOR_TEST) },
}
}
pub fn map_comparison(cmp: Comparison) -> gl::types::GLenum {
match cmp {
Comparison::Never => gl::NEVER,
Comparison::Less => gl::LESS,
Comparison::LessEqual => gl::LEQUAL,
Comparison::Equal => gl::EQUAL,
Comparison::GreaterEqual => gl::GEQUAL,
Comparison::Greater => gl::GREATER,
Comparison::NotEqual => gl::NOTEQUAL,
Comparison::Always => gl::ALWAYS,
}
}
pub fn bind_depth(gl: &gl::Gl, depth: &Option<s::Depth>) {
match depth {
&Some(ref d) => { unsafe {
gl.Enable(gl::DEPTH_TEST);
gl.DepthFunc(map_comparison(d.fun));
gl.DepthMask(if d.write {gl::TRUE} else {gl::FALSE});
}},
&None => unsafe { gl.Disable(gl::DEPTH_TEST) },
}
}
fn map_operation(op: StencilOp) -> gl::types::GLenum {
match op {
StencilOp::Keep => gl::KEEP,
StencilOp::Zero => gl::ZERO,
StencilOp::Replace => gl::REPLACE,
StencilOp::IncrementClamp=> gl::INCR,
StencilOp::IncrementWrap => gl::INCR_WRAP,
StencilOp::DecrementClamp=> gl::DECR,
StencilOp::DecrementWrap => gl::DECR_WRAP,
StencilOp::Invert => gl::INVERT,
}
}
pub fn bind_stencil(gl: &gl::Gl, stencil: &Option<s::Stencil>, refs: (Stencil, Stencil), cull: s::CullFace) {
fn bind_side(gl: &gl::Gl, face: gl::types::GLenum, side: s::StencilSide, ref_value: Stencil) { unsafe {
gl.StencilFuncSeparate(face, map_comparison(side.fun),
ref_value as gl::types::GLint, side.mask_read as gl::types::GLuint);
gl.StencilMaskSeparate(face, side.mask_write as gl::types::GLuint);
gl.StencilOpSeparate(face, map_operation(side.op_fail),
map_operation(side.op_depth_fail), map_operation(side.op_pass));
}}
match stencil {
&Some(ref s) => {
unsafe { gl.Enable(gl::STENCIL_TEST) };
if cull!= CullFace::Front {
bind_side(gl, gl::FRONT, s.front, refs.0);
}
if cull!= CullFace::Back {
bind_side(gl, gl::BACK, s.back, refs.1);
}
}
&None => unsafe { gl.Disable(gl::STENCIL_TEST) },
}
}
fn map_equation(eq: Equation) -> gl::types::GLenum {
match eq {
Equation::Add => gl::FUNC_ADD,
Equation::Sub => gl::FUNC_SUBTRACT,
Equation::RevSub => gl::FUNC_REVERSE_SUBTRACT,
Equation::Min => gl::MIN,
Equation::Max => gl::MAX,
}
}
fn map_factor(factor: s::Factor) -> gl::types::GLenum |
pub fn bind_blend(gl: &gl::Gl, color: s::Color) {
match color.blend {
Some(b) => unsafe {
gl.Enable(gl::BLEND);
gl.BlendEquationSeparate(
map_equation(b.color.equation),
map_equation(b.alpha.equation)
);
gl.BlendFuncSeparate(
map_factor(b.color.source),
map_factor(b.color.destination),
map_factor(b.alpha.source),
map_factor(b.alpha.destination)
);
},
None => unsafe {
gl.Disable(gl::BLEND);
},
};
unsafe { gl.ColorMask(
if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::GREEN).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::BLUE ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::ALPHA).is_empty() {gl::FALSE} else {gl::TRUE}
)};
}
pub fn bind_blend_slot(gl: &gl::Gl, slot: ColorSlot, color: s::Color) {
let buf = slot as gl::types::GLuint;
match color.blend {
Some(b) => unsafe {
//Note: using ARB functions as they are more compatible
gl.Enablei(gl::BLEND, buf);
gl.BlendEquationSeparateiARB(buf,
map_equation(b.color.equation),
map_equation(b.alpha.equation)
);
gl.BlendFuncSeparateiARB(buf,
map_factor(b.color.source),
map_factor(b.color.destination),
map_factor(b.alpha.source),
map_factor(b.alpha.destination)
);
},
None => unsafe {
gl.Disablei(gl::BLEND, buf);
},
};
unsafe { gl.ColorMaski(buf,
if (color.mask & s::RED ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::GREEN).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::BLUE ).is_empty() {gl::FALSE} else {gl::TRUE},
if (color.mask & s::ALPHA).is_empty() {gl::FALSE} else {gl::TRUE}
)};
}
pub fn unlock_color_mask(gl: &gl::Gl) {
unsafe { gl.ColorMask(gl::TRUE, gl::TRUE, gl::TRUE, gl::TRUE) };
}
pub fn set_blend_color(gl: &gl::Gl, color: ColorValue) {
unsafe {
gl.BlendColor(color[0], color[1], color[2], color[3])
};
}
| {
match factor {
s::Factor::Zero => gl::ZERO,
s::Factor::One => gl::ONE,
s::Factor::ZeroPlus(BlendValue::SourceColor) => gl::SRC_COLOR,
s::Factor::OneMinus(BlendValue::SourceColor) => gl::ONE_MINUS_SRC_COLOR,
s::Factor::ZeroPlus(BlendValue::SourceAlpha) => gl::SRC_ALPHA,
s::Factor::OneMinus(BlendValue::SourceAlpha) => gl::ONE_MINUS_SRC_ALPHA,
s::Factor::ZeroPlus(BlendValue::DestColor) => gl::DST_COLOR,
s::Factor::OneMinus(BlendValue::DestColor) => gl::ONE_MINUS_DST_COLOR,
s::Factor::ZeroPlus(BlendValue::DestAlpha) => gl::DST_ALPHA,
s::Factor::OneMinus(BlendValue::DestAlpha) => gl::ONE_MINUS_DST_ALPHA,
s::Factor::ZeroPlus(BlendValue::ConstColor) => gl::CONSTANT_COLOR,
s::Factor::OneMinus(BlendValue::ConstColor) => gl::ONE_MINUS_CONSTANT_COLOR,
s::Factor::ZeroPlus(BlendValue::ConstAlpha) => gl::CONSTANT_ALPHA,
s::Factor::OneMinus(BlendValue::ConstAlpha) => gl::ONE_MINUS_CONSTANT_ALPHA,
s::Factor::SourceAlphaSaturated => gl::SRC_ALPHA_SATURATE,
}
} | identifier_body |
proxy.rs | // Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Runs hardware devices in child processes.
use std::fmt::{self, Display};
use std::time::Duration;
use std::{self, io};
use base::{error, net::UnixSeqpacket, AsRawDescriptor, RawDescriptor};
use libc::{self, pid_t};
use minijail::{self, Minijail};
use msg_socket::{MsgOnSocket, MsgReceiver, MsgSender, MsgSocket};
use crate::{BusAccessInfo, BusDevice};
/// Errors for proxy devices.
#[derive(Debug)]
pub enum Error {
ForkingJail(minijail::Error),
Io(io::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match self {
ForkingJail(e) => write!(f, "Failed to fork jail process: {}", e),
Io(e) => write!(f, "IO error configuring proxy device {}.", e),
}
}
}
const SOCKET_TIMEOUT_MS: u64 = 2000;
#[derive(Debug, MsgOnSocket)]
enum Command {
Read {
len: u32,
info: BusAccessInfo,
},
Write {
len: u32,
info: BusAccessInfo,
data: [u8; 8],
},
ReadConfig(u32),
WriteConfig {
reg_idx: u32,
offset: u32,
len: u32,
data: [u8; 4],
},
Shutdown,
}
#[derive(MsgOnSocket)]
enum CommandResult {
Ok,
ReadResult([u8; 8]),
ReadConfigResult(u32),
}
fn child_proc<D: BusDevice>(sock: UnixSeqpacket, device: &mut D) {
let mut running = true;
let sock = MsgSocket::<CommandResult, Command>::new(sock);
while running {
let cmd = match sock.recv() {
Ok(cmd) => cmd,
Err(err) => {
error!("child device process failed recv: {}", err);
break;
}
};
let res = match cmd {
Command::Read { len, info } => {
let mut buffer = [0u8; 8];
device.read(info, &mut buffer[0..len as usize]);
sock.send(&CommandResult::ReadResult(buffer))
}
Command::Write { len, info, data } => {
let len = len as usize;
device.write(info, &data[0..len]);
// Command::Write does not have a result.
Ok(())
}
Command::ReadConfig(idx) => {
let val = device.config_register_read(idx as usize);
sock.send(&CommandResult::ReadConfigResult(val))
}
Command::WriteConfig {
reg_idx,
offset,
len,
data,
} => {
let len = len as usize;
device.config_register_write(reg_idx as usize, offset as u64, &data[0..len]);
// Command::WriteConfig does not have a result.
Ok(())
}
Command::Shutdown => {
running = false;
sock.send(&CommandResult::Ok)
}
};
if let Err(e) = res {
error!("child device process failed send: {}", e);
}
}
}
/// Wraps an inner `BusDevice` that is run inside a child process via fork.
///
/// Because forks are very unfriendly to destructors and all memory mappings and file descriptors
/// are inherited, this should be used as early as possible in the main process.
pub struct ProxyDevice {
sock: MsgSocket<Command, CommandResult>,
pid: pid_t,
debug_label: String,
}
impl ProxyDevice {
/// Takes the given device and isolates it into another process via fork before returning.
///
/// The forked process will automatically be terminated when this is dropped, so be sure to keep
/// a reference.
///
/// # Arguments
/// * `device` - The device to isolate to another process.
/// * `jail` - The jail to use for isolating the given device.
/// * `keep_rds` - File descriptors that will be kept open in the child.
pub fn new<D: BusDevice>(
mut device: D,
jail: &Minijail,
mut keep_rds: Vec<RawDescriptor>,
) -> Result<ProxyDevice> {
let debug_label = device.debug_label();
let (child_sock, parent_sock) = UnixSeqpacket::pair().map_err(Error::Io)?;
keep_rds.push(child_sock.as_raw_descriptor());
// Forking here is safe as long as the program is still single threaded.
let pid = unsafe {
match jail.fork(Some(&keep_rds)).map_err(Error::ForkingJail)? {
0 => {
device.on_sandboxed();
child_proc(child_sock, &mut device);
// We're explicitly not using std::process::exit here to avoid the cleanup of
// stdout/stderr globals. This can cause cascading panics and SIGILL if a worker
// thread attempts to log to stderr after at_exit handlers have been run.
// TODO(crbug.com/992494): Remove this once device shutdown ordering is clearly
// defined.
//
// exit() is trivially safe.
//! Never returns
libc::exit(0);
}
p => p,
}
};
parent_sock
.set_write_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS)))
.map_err(Error::Io)?;
parent_sock
.set_read_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS)))
.map_err(Error::Io)?;
Ok(ProxyDevice {
sock: MsgSocket::<Command, CommandResult>::new(parent_sock),
pid,
debug_label,
})
}
pub fn pid(&self) -> pid_t {
self.pid
}
/// Send a command that does not expect a response from the child device process.
fn send_no_result(&self, cmd: &Command) {
let res = self.sock.send(cmd);
if let Err(e) = res {
error!(
"failed write to child device process {}: {}",
self.debug_label, e,
);
}
}
/// Send a command and read its response from the child device process.
fn sync_send(&self, cmd: &Command) -> Option<CommandResult> {
self.send_no_result(cmd);
match self.sock.recv() {
Err(e) => |
Ok(r) => Some(r),
}
}
}
impl BusDevice for ProxyDevice {
fn debug_label(&self) -> String {
self.debug_label.clone()
}
fn config_register_write(&mut self, reg_idx: usize, offset: u64, data: &[u8]) {
let len = data.len() as u32;
let mut buffer = [0u8; 4];
buffer[0..data.len()].clone_from_slice(data);
let reg_idx = reg_idx as u32;
let offset = offset as u32;
self.send_no_result(&Command::WriteConfig {
reg_idx,
offset,
len,
data: buffer,
});
}
fn config_register_read(&self, reg_idx: usize) -> u32 {
let res = self.sync_send(&Command::ReadConfig(reg_idx as u32));
if let Some(CommandResult::ReadConfigResult(val)) = res {
val
} else {
0
}
}
fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
let len = data.len() as u32;
if let Some(CommandResult::ReadResult(buffer)) =
self.sync_send(&Command::Read { len, info })
{
let len = data.len();
data.clone_from_slice(&buffer[0..len]);
}
}
fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
let mut buffer = [0u8; 8];
let len = data.len() as u32;
buffer[0..data.len()].clone_from_slice(data);
self.send_no_result(&Command::Write {
len,
info,
data: buffer,
});
}
}
impl Drop for ProxyDevice {
fn drop(&mut self) {
self.sync_send(&Command::Shutdown);
}
}
/// Note: These tests must be run with --test-threads=1 to allow minijail to fork
/// the process.
#[cfg(test)]
mod tests {
use super::*;
/// A simple test echo device that outputs the same u8 that was written to it.
struct EchoDevice {
data: u8,
config: u8,
}
impl EchoDevice {
fn new() -> EchoDevice {
EchoDevice { data: 0, config: 0 }
}
}
impl BusDevice for EchoDevice {
fn debug_label(&self) -> String {
"EchoDevice".to_owned()
}
fn write(&mut self, _info: BusAccessInfo, data: &[u8]) {
assert!(data.len() == 1);
self.data = data[0];
}
fn read(&mut self, _info: BusAccessInfo, data: &mut [u8]) {
assert!(data.len() == 1);
data[0] = self.data;
}
fn config_register_write(&mut self, _reg_idx: usize, _offset: u64, data: &[u8]) {
assert!(data.len() == 1);
self.config = data[0];
}
fn config_register_read(&self, _reg_idx: usize) -> u32 {
self.config as u32
}
}
fn new_proxied_echo_device() -> ProxyDevice {
let device = EchoDevice::new();
let keep_fds: Vec<RawDescriptor> = Vec::new();
let minijail = Minijail::new().unwrap();
ProxyDevice::new(device, &minijail, keep_fds).unwrap()
}
// TODO(b/173833661): Find a way to ensure these tests are run single-threaded.
#[test]
#[ignore]
fn test_debug_label() {
let proxy_device = new_proxied_echo_device();
assert_eq!(proxy_device.debug_label(), "EchoDevice");
}
#[test]
#[ignore]
fn test_proxied_read_write() {
let mut proxy_device = new_proxied_echo_device();
let address = BusAccessInfo {
offset: 0,
address: 0,
id: 0,
};
proxy_device.write(address, &[42]);
let mut read_buffer = [0];
proxy_device.read(address, &mut read_buffer);
assert_eq!(read_buffer, [42]);
}
#[test]
#[ignore]
fn test_proxied_config() {
let mut proxy_device = new_proxied_echo_device();
proxy_device.config_register_write(0, 0, &[42]);
assert_eq!(proxy_device.config_register_read(0), 42);
}
}
| {
error!(
"failed to read result of {:?} from child device process {}: {}",
cmd, self.debug_label, e,
);
None
} | conditional_block |
proxy.rs | // Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Runs hardware devices in child processes.
use std::fmt::{self, Display};
use std::time::Duration;
use std::{self, io};
use base::{error, net::UnixSeqpacket, AsRawDescriptor, RawDescriptor};
use libc::{self, pid_t};
use minijail::{self, Minijail};
use msg_socket::{MsgOnSocket, MsgReceiver, MsgSender, MsgSocket};
use crate::{BusAccessInfo, BusDevice};
/// Errors for proxy devices.
#[derive(Debug)]
pub enum Error {
ForkingJail(minijail::Error),
Io(io::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match self {
ForkingJail(e) => write!(f, "Failed to fork jail process: {}", e),
Io(e) => write!(f, "IO error configuring proxy device {}.", e),
}
}
}
const SOCKET_TIMEOUT_MS: u64 = 2000;
#[derive(Debug, MsgOnSocket)]
enum Command {
Read {
len: u32,
info: BusAccessInfo,
},
Write {
len: u32,
info: BusAccessInfo,
data: [u8; 8],
},
ReadConfig(u32),
WriteConfig {
reg_idx: u32,
offset: u32,
len: u32,
data: [u8; 4],
},
Shutdown,
}
#[derive(MsgOnSocket)]
enum CommandResult {
Ok,
ReadResult([u8; 8]),
ReadConfigResult(u32),
}
fn child_proc<D: BusDevice>(sock: UnixSeqpacket, device: &mut D) {
let mut running = true;
let sock = MsgSocket::<CommandResult, Command>::new(sock);
while running {
let cmd = match sock.recv() {
Ok(cmd) => cmd,
Err(err) => {
error!("child device process failed recv: {}", err);
break;
}
};
let res = match cmd {
Command::Read { len, info } => {
let mut buffer = [0u8; 8];
device.read(info, &mut buffer[0..len as usize]);
sock.send(&CommandResult::ReadResult(buffer))
}
Command::Write { len, info, data } => {
let len = len as usize;
device.write(info, &data[0..len]);
// Command::Write does not have a result.
Ok(())
}
Command::ReadConfig(idx) => {
let val = device.config_register_read(idx as usize);
sock.send(&CommandResult::ReadConfigResult(val))
}
Command::WriteConfig {
reg_idx,
offset,
len,
data,
} => {
let len = len as usize;
device.config_register_write(reg_idx as usize, offset as u64, &data[0..len]);
// Command::WriteConfig does not have a result.
Ok(())
}
Command::Shutdown => {
running = false;
sock.send(&CommandResult::Ok)
}
};
if let Err(e) = res {
error!("child device process failed send: {}", e);
}
}
}
/// Wraps an inner `BusDevice` that is run inside a child process via fork.
///
/// Because forks are very unfriendly to destructors and all memory mappings and file descriptors
/// are inherited, this should be used as early as possible in the main process.
pub struct ProxyDevice {
sock: MsgSocket<Command, CommandResult>,
pid: pid_t,
debug_label: String,
}
impl ProxyDevice {
/// Takes the given device and isolates it into another process via fork before returning.
///
/// The forked process will automatically be terminated when this is dropped, so be sure to keep
/// a reference.
///
/// # Arguments
/// * `device` - The device to isolate to another process.
/// * `jail` - The jail to use for isolating the given device.
/// * `keep_rds` - File descriptors that will be kept open in the child.
pub fn new<D: BusDevice>(
mut device: D,
jail: &Minijail,
mut keep_rds: Vec<RawDescriptor>,
) -> Result<ProxyDevice> {
let debug_label = device.debug_label();
let (child_sock, parent_sock) = UnixSeqpacket::pair().map_err(Error::Io)?;
keep_rds.push(child_sock.as_raw_descriptor());
// Forking here is safe as long as the program is still single threaded.
let pid = unsafe {
match jail.fork(Some(&keep_rds)).map_err(Error::ForkingJail)? {
0 => {
device.on_sandboxed();
child_proc(child_sock, &mut device);
// We're explicitly not using std::process::exit here to avoid the cleanup of
// stdout/stderr globals. This can cause cascading panics and SIGILL if a worker
// thread attempts to log to stderr after at_exit handlers have been run.
// TODO(crbug.com/992494): Remove this once device shutdown ordering is clearly
// defined.
//
// exit() is trivially safe.
//! Never returns
libc::exit(0);
}
p => p,
}
};
parent_sock
.set_write_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS)))
.map_err(Error::Io)?;
parent_sock
.set_read_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS)))
.map_err(Error::Io)?;
Ok(ProxyDevice {
sock: MsgSocket::<Command, CommandResult>::new(parent_sock),
pid,
debug_label,
})
}
pub fn pid(&self) -> pid_t {
self.pid
}
/// Send a command that does not expect a response from the child device process.
fn send_no_result(&self, cmd: &Command) {
let res = self.sock.send(cmd);
if let Err(e) = res {
error!(
"failed write to child device process {}: {}",
self.debug_label, e,
);
}
}
/// Send a command and read its response from the child device process.
fn sync_send(&self, cmd: &Command) -> Option<CommandResult> {
self.send_no_result(cmd);
match self.sock.recv() {
Err(e) => {
error!(
"failed to read result of {:?} from child device process {}: {}",
cmd, self.debug_label, e,
);
None
}
Ok(r) => Some(r),
}
}
}
impl BusDevice for ProxyDevice {
fn debug_label(&self) -> String {
self.debug_label.clone()
}
fn config_register_write(&mut self, reg_idx: usize, offset: u64, data: &[u8]) {
let len = data.len() as u32;
let mut buffer = [0u8; 4];
buffer[0..data.len()].clone_from_slice(data);
let reg_idx = reg_idx as u32;
let offset = offset as u32;
self.send_no_result(&Command::WriteConfig {
reg_idx,
offset,
len,
data: buffer,
});
}
fn config_register_read(&self, reg_idx: usize) -> u32 {
let res = self.sync_send(&Command::ReadConfig(reg_idx as u32));
if let Some(CommandResult::ReadConfigResult(val)) = res {
val
} else {
0
}
}
fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
let len = data.len() as u32;
if let Some(CommandResult::ReadResult(buffer)) =
self.sync_send(&Command::Read { len, info })
{
let len = data.len();
data.clone_from_slice(&buffer[0..len]);
}
}
fn | (&mut self, info: BusAccessInfo, data: &[u8]) {
let mut buffer = [0u8; 8];
let len = data.len() as u32;
buffer[0..data.len()].clone_from_slice(data);
self.send_no_result(&Command::Write {
len,
info,
data: buffer,
});
}
}
impl Drop for ProxyDevice {
fn drop(&mut self) {
self.sync_send(&Command::Shutdown);
}
}
/// Note: These tests must be run with --test-threads=1 to allow minijail to fork
/// the process.
#[cfg(test)]
mod tests {
use super::*;
/// A simple test echo device that outputs the same u8 that was written to it.
struct EchoDevice {
data: u8,
config: u8,
}
impl EchoDevice {
fn new() -> EchoDevice {
EchoDevice { data: 0, config: 0 }
}
}
impl BusDevice for EchoDevice {
fn debug_label(&self) -> String {
"EchoDevice".to_owned()
}
fn write(&mut self, _info: BusAccessInfo, data: &[u8]) {
assert!(data.len() == 1);
self.data = data[0];
}
fn read(&mut self, _info: BusAccessInfo, data: &mut [u8]) {
assert!(data.len() == 1);
data[0] = self.data;
}
fn config_register_write(&mut self, _reg_idx: usize, _offset: u64, data: &[u8]) {
assert!(data.len() == 1);
self.config = data[0];
}
fn config_register_read(&self, _reg_idx: usize) -> u32 {
self.config as u32
}
}
fn new_proxied_echo_device() -> ProxyDevice {
let device = EchoDevice::new();
let keep_fds: Vec<RawDescriptor> = Vec::new();
let minijail = Minijail::new().unwrap();
ProxyDevice::new(device, &minijail, keep_fds).unwrap()
}
// TODO(b/173833661): Find a way to ensure these tests are run single-threaded.
#[test]
#[ignore]
fn test_debug_label() {
let proxy_device = new_proxied_echo_device();
assert_eq!(proxy_device.debug_label(), "EchoDevice");
}
#[test]
#[ignore]
fn test_proxied_read_write() {
let mut proxy_device = new_proxied_echo_device();
let address = BusAccessInfo {
offset: 0,
address: 0,
id: 0,
};
proxy_device.write(address, &[42]);
let mut read_buffer = [0];
proxy_device.read(address, &mut read_buffer);
assert_eq!(read_buffer, [42]);
}
#[test]
#[ignore]
fn test_proxied_config() {
let mut proxy_device = new_proxied_echo_device();
proxy_device.config_register_write(0, 0, &[42]);
assert_eq!(proxy_device.config_register_read(0), 42);
}
}
| write | identifier_name |
proxy.rs | // Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Runs hardware devices in child processes.
use std::fmt::{self, Display};
use std::time::Duration;
use std::{self, io};
use base::{error, net::UnixSeqpacket, AsRawDescriptor, RawDescriptor};
use libc::{self, pid_t};
use minijail::{self, Minijail};
use msg_socket::{MsgOnSocket, MsgReceiver, MsgSender, MsgSocket};
use crate::{BusAccessInfo, BusDevice};
/// Errors for proxy devices.
#[derive(Debug)]
pub enum Error {
ForkingJail(minijail::Error),
Io(io::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match self {
ForkingJail(e) => write!(f, "Failed to fork jail process: {}", e),
Io(e) => write!(f, "IO error configuring proxy device {}.", e),
}
}
}
const SOCKET_TIMEOUT_MS: u64 = 2000;
#[derive(Debug, MsgOnSocket)]
enum Command {
Read {
len: u32,
info: BusAccessInfo,
},
Write {
len: u32,
info: BusAccessInfo,
data: [u8; 8],
},
ReadConfig(u32),
WriteConfig {
reg_idx: u32,
offset: u32,
len: u32,
data: [u8; 4],
},
Shutdown,
}
#[derive(MsgOnSocket)]
enum CommandResult {
Ok,
ReadResult([u8; 8]),
ReadConfigResult(u32),
}
fn child_proc<D: BusDevice>(sock: UnixSeqpacket, device: &mut D) {
let mut running = true;
let sock = MsgSocket::<CommandResult, Command>::new(sock);
while running {
let cmd = match sock.recv() {
Ok(cmd) => cmd,
Err(err) => {
error!("child device process failed recv: {}", err);
break;
}
};
let res = match cmd {
Command::Read { len, info } => {
let mut buffer = [0u8; 8];
device.read(info, &mut buffer[0..len as usize]);
sock.send(&CommandResult::ReadResult(buffer))
}
Command::Write { len, info, data } => {
let len = len as usize;
device.write(info, &data[0..len]);
// Command::Write does not have a result.
Ok(())
}
Command::ReadConfig(idx) => {
let val = device.config_register_read(idx as usize);
sock.send(&CommandResult::ReadConfigResult(val))
}
Command::WriteConfig {
reg_idx,
offset,
len,
data,
} => {
let len = len as usize;
device.config_register_write(reg_idx as usize, offset as u64, &data[0..len]);
// Command::WriteConfig does not have a result.
Ok(())
}
Command::Shutdown => {
running = false;
sock.send(&CommandResult::Ok)
}
};
if let Err(e) = res {
error!("child device process failed send: {}", e);
}
}
}
/// Wraps an inner `BusDevice` that is run inside a child process via fork.
///
/// Because forks are very unfriendly to destructors and all memory mappings and file descriptors
/// are inherited, this should be used as early as possible in the main process.
pub struct ProxyDevice {
sock: MsgSocket<Command, CommandResult>,
pid: pid_t,
debug_label: String,
}
impl ProxyDevice {
/// Takes the given device and isolates it into another process via fork before returning.
///
/// The forked process will automatically be terminated when this is dropped, so be sure to keep
/// a reference.
///
/// # Arguments
/// * `device` - The device to isolate to another process.
/// * `jail` - The jail to use for isolating the given device.
/// * `keep_rds` - File descriptors that will be kept open in the child.
pub fn new<D: BusDevice>(
mut device: D,
jail: &Minijail,
mut keep_rds: Vec<RawDescriptor>,
) -> Result<ProxyDevice> {
let debug_label = device.debug_label();
let (child_sock, parent_sock) = UnixSeqpacket::pair().map_err(Error::Io)?;
keep_rds.push(child_sock.as_raw_descriptor());
// Forking here is safe as long as the program is still single threaded.
let pid = unsafe {
match jail.fork(Some(&keep_rds)).map_err(Error::ForkingJail)? {
0 => {
device.on_sandboxed();
child_proc(child_sock, &mut device);
// We're explicitly not using std::process::exit here to avoid the cleanup of
// stdout/stderr globals. This can cause cascading panics and SIGILL if a worker
// thread attempts to log to stderr after at_exit handlers have been run.
// TODO(crbug.com/992494): Remove this once device shutdown ordering is clearly
// defined.
//
// exit() is trivially safe.
//! Never returns
libc::exit(0);
}
p => p,
}
};
parent_sock
.set_write_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS)))
.map_err(Error::Io)?;
parent_sock
.set_read_timeout(Some(Duration::from_millis(SOCKET_TIMEOUT_MS)))
.map_err(Error::Io)?;
Ok(ProxyDevice {
sock: MsgSocket::<Command, CommandResult>::new(parent_sock),
pid,
debug_label,
})
}
pub fn pid(&self) -> pid_t {
self.pid
}
/// Send a command that does not expect a response from the child device process.
fn send_no_result(&self, cmd: &Command) {
let res = self.sock.send(cmd);
if let Err(e) = res {
error!(
"failed write to child device process {}: {}",
self.debug_label, e,
);
}
}
/// Send a command and read its response from the child device process.
fn sync_send(&self, cmd: &Command) -> Option<CommandResult> {
self.send_no_result(cmd);
match self.sock.recv() {
Err(e) => {
error!(
"failed to read result of {:?} from child device process {}: {}",
cmd, self.debug_label, e,
);
None
}
Ok(r) => Some(r),
}
}
}
impl BusDevice for ProxyDevice {
fn debug_label(&self) -> String {
self.debug_label.clone()
}
fn config_register_write(&mut self, reg_idx: usize, offset: u64, data: &[u8]) {
let len = data.len() as u32;
let mut buffer = [0u8; 4];
buffer[0..data.len()].clone_from_slice(data);
let reg_idx = reg_idx as u32;
let offset = offset as u32;
self.send_no_result(&Command::WriteConfig {
reg_idx,
offset,
len,
data: buffer,
});
}
fn config_register_read(&self, reg_idx: usize) -> u32 {
let res = self.sync_send(&Command::ReadConfig(reg_idx as u32));
if let Some(CommandResult::ReadConfigResult(val)) = res {
val
} else {
0
}
}
fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
let len = data.len() as u32;
if let Some(CommandResult::ReadResult(buffer)) =
self.sync_send(&Command::Read { len, info })
{
let len = data.len();
data.clone_from_slice(&buffer[0..len]);
}
}
fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
let mut buffer = [0u8; 8];
let len = data.len() as u32;
buffer[0..data.len()].clone_from_slice(data);
self.send_no_result(&Command::Write {
len,
info,
data: buffer,
});
}
}
impl Drop for ProxyDevice {
fn drop(&mut self) {
self.sync_send(&Command::Shutdown);
}
}
/// Note: These tests must be run with --test-threads=1 to allow minijail to fork
/// the process.
#[cfg(test)]
mod tests {
use super::*;
/// A simple test echo device that outputs the same u8 that was written to it.
struct EchoDevice {
data: u8,
config: u8,
}
impl EchoDevice {
fn new() -> EchoDevice {
EchoDevice { data: 0, config: 0 }
}
}
impl BusDevice for EchoDevice {
fn debug_label(&self) -> String { | self.data = data[0];
}
fn read(&mut self, _info: BusAccessInfo, data: &mut [u8]) {
assert!(data.len() == 1);
data[0] = self.data;
}
fn config_register_write(&mut self, _reg_idx: usize, _offset: u64, data: &[u8]) {
assert!(data.len() == 1);
self.config = data[0];
}
fn config_register_read(&self, _reg_idx: usize) -> u32 {
self.config as u32
}
}
fn new_proxied_echo_device() -> ProxyDevice {
let device = EchoDevice::new();
let keep_fds: Vec<RawDescriptor> = Vec::new();
let minijail = Minijail::new().unwrap();
ProxyDevice::new(device, &minijail, keep_fds).unwrap()
}
// TODO(b/173833661): Find a way to ensure these tests are run single-threaded.
#[test]
#[ignore]
fn test_debug_label() {
let proxy_device = new_proxied_echo_device();
assert_eq!(proxy_device.debug_label(), "EchoDevice");
}
#[test]
#[ignore]
fn test_proxied_read_write() {
let mut proxy_device = new_proxied_echo_device();
let address = BusAccessInfo {
offset: 0,
address: 0,
id: 0,
};
proxy_device.write(address, &[42]);
let mut read_buffer = [0];
proxy_device.read(address, &mut read_buffer);
assert_eq!(read_buffer, [42]);
}
#[test]
#[ignore]
fn test_proxied_config() {
let mut proxy_device = new_proxied_echo_device();
proxy_device.config_register_write(0, 0, &[42]);
assert_eq!(proxy_device.config_register_read(0), 42);
}
} | "EchoDevice".to_owned()
}
fn write(&mut self, _info: BusAccessInfo, data: &[u8]) {
assert!(data.len() == 1); | random_line_split |
client.rs | // Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use reqwest::StatusCode;
use serde::Deserialize;
use serde::Serialize;
use std::cmp::Ordering;
use std::sync::RwLock;
#[derive(Debug, thiserror::Error)]
pub enum ClientError {
#[error("request: {0}")]
ReqwestError(reqwest::Error),
#[error("json: {0}")]
JsonError(serde_json::error::Error),
#[error("failed to parse version: {0}")]
VersionParseError(String),
#[error("{0}")]
StringError(String),
}
impl From<reqwest::Error> for ClientError {
fn from(err: reqwest::Error) -> Self {
ClientError::ReqwestError(err)
}
}
impl From<serde_json::error::Error> for ClientError {
fn from(err: serde_json::error::Error) -> Self {
ClientError::JsonError(err)
}
}
#[derive(Debug, Default)]
pub struct Client {
url: String,
disable_certificate_validation: bool,
username: Option<String>,
password: Option<String>,
pub version: RwLock<Option<Version>>,
}
impl Clone for Client {
fn clone(&self) -> Self {
let version = self.version.read().unwrap();
Self {
url: self.url.clone(),
disable_certificate_validation: self.disable_certificate_validation,
username: self.username.clone(),
password: self.password.clone(),
version: RwLock::new(version.clone()),
}
}
}
impl Client {
pub fn new(url: &str) -> Self {
Self {
url: url.to_string(),
..Default::default()
}
}
pub fn get_http_client(&self) -> Result<reqwest::Client, reqwest::Error> {
let mut builder = reqwest::Client::builder();
if self.disable_certificate_validation {
builder = builder.danger_accept_invalid_certs(true);
}
builder.build()
}
pub fn get(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.get(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
pub fn post(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.post(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
pub fn put(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.put(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
#[inline(always)]
pub async fn get_version(&self) -> Result<Version, ClientError> {
if let Ok(version) = self.version.read() {
if let Some(version) = &*version {
return Ok(version.clone());
}
}
let r = self.get("")?.send().await?;
let status_code = r.status();
if status_code!= StatusCode::OK {
let body = r.text().await?;
let err = format!("{} -- {}", status_code.as_u16(), body.trim());
return Err(ClientError::StringError(err));
}
let body = r.text().await?;
let response: super::ElasticResponse = serde_json::from_str(&body)?;
if let Some(error) = response.error {
return Err(ClientError::StringError(error.reason));
}
if response.version.is_none() {
return Err(ClientError::StringError(
"request for version did not return a version".to_string(),
));
}
let version = Version::parse(&response.version.unwrap().number)?;
let mut locked = self.version.write().unwrap();
*locked = Some(version.clone());
Ok(version)
}
pub async fn put_template(&self, name: &str, template: String) -> Result<(), ClientError> {
let path = format!("_template/{}", name);
let response = self.put(&path)?.body(template).send().await?;
if response.status().as_u16() == 200 {
return Ok(());
}
let body = response.text().await?;
return Err(ClientError::StringError(body));
}
pub async fn get_template(
&self,
name: &str,
) -> Result<Option<serde_json::Value>, Box<dyn std::error::Error>> {
let path = format!("_template/{}", name);
let response = self.get(&path)?.send().await?;
if response.status() == reqwest::StatusCode::OK {
let template: serde_json::Value = response.json().await?;
return Ok(Some(template));
} else if response.status() == reqwest::StatusCode::NOT_FOUND {
return Ok(None);
}
return Err(format!("Failed to get template: {}", response.status()).into());
}
}
#[derive(Debug, Clone, Eq)]
pub struct Version {
pub version: String,
pub major: u64,
pub minor: u64,
pub patch: u64,
}
impl Version {
pub fn parse(s: &str) -> Result<Version, ClientError> {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
for (i, part) in s.split('.').enumerate() {
if i == 0 {
major = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
} else if i == 1 {
minor = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
} else if i == 2 { | .map_err(|_| ClientError::VersionParseError(s.to_string()))?;
}
}
let version = Version {
version: s.to_string(),
major,
minor,
patch,
};
Ok(version)
}
pub fn as_u64(&self) -> u64 {
(self.major * 1_000_000_000) + (self.minor * 1_000_000) + (self.patch * 1_000)
}
}
impl Ord for Version {
fn cmp(&self, other: &Self) -> Ordering {
self.as_u64().cmp(&other.as_u64())
}
}
impl PartialOrd for Version {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Version {
fn eq(&self, other: &Self) -> bool {
self.as_u64() == other.as_u64()
}
}
#[derive(Default, Debug)]
pub struct ClientBuilder {
url: String,
disable_certificate_validation: bool,
username: Option<String>,
password: Option<String>,
}
impl ClientBuilder {
pub fn new(url: &str) -> ClientBuilder {
ClientBuilder {
url: url.to_string(),
..ClientBuilder::default()
}
}
pub fn disable_certificate_validation(&mut self, yes: bool) -> &Self {
self.disable_certificate_validation = yes;
self
}
pub fn with_username(&mut self, username: &str) -> &Self {
self.username = Some(username.to_string());
self
}
pub fn with_password(&mut self, password: &str) -> &Self {
self.password = Some(password.to_string());
self
}
pub fn build(&self) -> Client {
Client {
url: self.url.clone(),
disable_certificate_validation: self.disable_certificate_validation,
username: self.username.clone(),
password: self.password.clone(),
version: RwLock::new(None),
}
}
}
#[derive(Deserialize, Serialize, Debug)]
pub struct BulkResponse {
pub errors: Option<bool>,
pub items: Option<Vec<serde_json::Value>>,
pub error: Option<serde_json::Value>,
#[serde(flatten)]
pub other: std::collections::HashMap<String, serde_json::Value>,
}
impl BulkResponse {
pub fn is_error(&self) -> bool {
self.error.is_some()
}
pub fn has_error(&self) -> bool {
if let Some(errors) = self.errors {
return errors;
}
if self.error.is_some() {
return true;
}
return false;
}
pub fn first_error(&self) -> Option<String> {
if!self.has_error() {
return None;
}
if let Some(error) = &self.error {
return Some(error.to_string());
}
if let Some(items) = &self.items {
for item in items {
if let serde_json::Value::String(err) = &item["index"]["error"]["reason"] {
return Some(err.to_string());
}
}
}
None
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
pub fn test_version_compare() {
assert_eq!(
Version::parse("1.1.1").unwrap(),
Version::parse("1.1.1").unwrap()
);
assert!(Version::parse("7.7.0").unwrap() < Version::parse("7.7.1").unwrap());
assert!(Version::parse("7.7.1").unwrap() <= Version::parse("7.7.1").unwrap());
assert!(Version::parse("7.7.1").unwrap() == Version::parse("7.7.1").unwrap());
}
} | patch = part
.parse::<u64>() | random_line_split |
client.rs | // Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use reqwest::StatusCode;
use serde::Deserialize;
use serde::Serialize;
use std::cmp::Ordering;
use std::sync::RwLock;
#[derive(Debug, thiserror::Error)]
pub enum ClientError {
#[error("request: {0}")]
ReqwestError(reqwest::Error),
#[error("json: {0}")]
JsonError(serde_json::error::Error),
#[error("failed to parse version: {0}")]
VersionParseError(String),
#[error("{0}")]
StringError(String),
}
impl From<reqwest::Error> for ClientError {
fn from(err: reqwest::Error) -> Self {
ClientError::ReqwestError(err)
}
}
impl From<serde_json::error::Error> for ClientError {
fn from(err: serde_json::error::Error) -> Self {
ClientError::JsonError(err)
}
}
#[derive(Debug, Default)]
pub struct Client {
url: String,
disable_certificate_validation: bool,
username: Option<String>,
password: Option<String>,
pub version: RwLock<Option<Version>>,
}
impl Clone for Client {
fn clone(&self) -> Self {
let version = self.version.read().unwrap();
Self {
url: self.url.clone(),
disable_certificate_validation: self.disable_certificate_validation,
username: self.username.clone(),
password: self.password.clone(),
version: RwLock::new(version.clone()),
}
}
}
impl Client {
pub fn new(url: &str) -> Self {
Self {
url: url.to_string(),
..Default::default()
}
}
pub fn get_http_client(&self) -> Result<reqwest::Client, reqwest::Error> {
let mut builder = reqwest::Client::builder();
if self.disable_certificate_validation {
builder = builder.danger_accept_invalid_certs(true);
}
builder.build()
}
pub fn get(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.get(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
pub fn post(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.post(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
pub fn put(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.put(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
#[inline(always)]
pub async fn get_version(&self) -> Result<Version, ClientError> {
if let Ok(version) = self.version.read() {
if let Some(version) = &*version {
return Ok(version.clone());
}
}
let r = self.get("")?.send().await?;
let status_code = r.status();
if status_code!= StatusCode::OK {
let body = r.text().await?;
let err = format!("{} -- {}", status_code.as_u16(), body.trim());
return Err(ClientError::StringError(err));
}
let body = r.text().await?;
let response: super::ElasticResponse = serde_json::from_str(&body)?;
if let Some(error) = response.error {
return Err(ClientError::StringError(error.reason));
}
if response.version.is_none() {
return Err(ClientError::StringError(
"request for version did not return a version".to_string(),
));
}
let version = Version::parse(&response.version.unwrap().number)?;
let mut locked = self.version.write().unwrap();
*locked = Some(version.clone());
Ok(version)
}
pub async fn put_template(&self, name: &str, template: String) -> Result<(), ClientError> {
let path = format!("_template/{}", name);
let response = self.put(&path)?.body(template).send().await?;
if response.status().as_u16() == 200 {
return Ok(());
}
let body = response.text().await?;
return Err(ClientError::StringError(body));
}
pub async fn get_template(
&self,
name: &str,
) -> Result<Option<serde_json::Value>, Box<dyn std::error::Error>> {
let path = format!("_template/{}", name);
let response = self.get(&path)?.send().await?;
if response.status() == reqwest::StatusCode::OK {
let template: serde_json::Value = response.json().await?;
return Ok(Some(template));
} else if response.status() == reqwest::StatusCode::NOT_FOUND {
return Ok(None);
}
return Err(format!("Failed to get template: {}", response.status()).into());
}
}
#[derive(Debug, Clone, Eq)]
pub struct Version {
pub version: String,
pub major: u64,
pub minor: u64,
pub patch: u64,
}
impl Version {
pub fn parse(s: &str) -> Result<Version, ClientError> {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
for (i, part) in s.split('.').enumerate() {
if i == 0 {
major = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
} else if i == 1 {
minor = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
} else if i == 2 {
patch = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
}
}
let version = Version {
version: s.to_string(),
major,
minor,
patch,
};
Ok(version)
}
pub fn as_u64(&self) -> u64 {
(self.major * 1_000_000_000) + (self.minor * 1_000_000) + (self.patch * 1_000)
}
}
impl Ord for Version {
fn cmp(&self, other: &Self) -> Ordering {
self.as_u64().cmp(&other.as_u64())
}
}
impl PartialOrd for Version {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Version {
fn eq(&self, other: &Self) -> bool {
self.as_u64() == other.as_u64()
}
}
#[derive(Default, Debug)]
pub struct ClientBuilder {
url: String,
disable_certificate_validation: bool,
username: Option<String>,
password: Option<String>,
}
impl ClientBuilder {
pub fn new(url: &str) -> ClientBuilder {
ClientBuilder {
url: url.to_string(),
..ClientBuilder::default()
}
}
pub fn | (&mut self, yes: bool) -> &Self {
self.disable_certificate_validation = yes;
self
}
pub fn with_username(&mut self, username: &str) -> &Self {
self.username = Some(username.to_string());
self
}
pub fn with_password(&mut self, password: &str) -> &Self {
self.password = Some(password.to_string());
self
}
pub fn build(&self) -> Client {
Client {
url: self.url.clone(),
disable_certificate_validation: self.disable_certificate_validation,
username: self.username.clone(),
password: self.password.clone(),
version: RwLock::new(None),
}
}
}
#[derive(Deserialize, Serialize, Debug)]
pub struct BulkResponse {
pub errors: Option<bool>,
pub items: Option<Vec<serde_json::Value>>,
pub error: Option<serde_json::Value>,
#[serde(flatten)]
pub other: std::collections::HashMap<String, serde_json::Value>,
}
impl BulkResponse {
pub fn is_error(&self) -> bool {
self.error.is_some()
}
pub fn has_error(&self) -> bool {
if let Some(errors) = self.errors {
return errors;
}
if self.error.is_some() {
return true;
}
return false;
}
pub fn first_error(&self) -> Option<String> {
if!self.has_error() {
return None;
}
if let Some(error) = &self.error {
return Some(error.to_string());
}
if let Some(items) = &self.items {
for item in items {
if let serde_json::Value::String(err) = &item["index"]["error"]["reason"] {
return Some(err.to_string());
}
}
}
None
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
pub fn test_version_compare() {
assert_eq!(
Version::parse("1.1.1").unwrap(),
Version::parse("1.1.1").unwrap()
);
assert!(Version::parse("7.7.0").unwrap() < Version::parse("7.7.1").unwrap());
assert!(Version::parse("7.7.1").unwrap() <= Version::parse("7.7.1").unwrap());
assert!(Version::parse("7.7.1").unwrap() == Version::parse("7.7.1").unwrap());
}
}
| disable_certificate_validation | identifier_name |
client.rs | // Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use reqwest::StatusCode;
use serde::Deserialize;
use serde::Serialize;
use std::cmp::Ordering;
use std::sync::RwLock;
#[derive(Debug, thiserror::Error)]
pub enum ClientError {
#[error("request: {0}")]
ReqwestError(reqwest::Error),
#[error("json: {0}")]
JsonError(serde_json::error::Error),
#[error("failed to parse version: {0}")]
VersionParseError(String),
#[error("{0}")]
StringError(String),
}
impl From<reqwest::Error> for ClientError {
fn from(err: reqwest::Error) -> Self {
ClientError::ReqwestError(err)
}
}
impl From<serde_json::error::Error> for ClientError {
fn from(err: serde_json::error::Error) -> Self {
ClientError::JsonError(err)
}
}
#[derive(Debug, Default)]
pub struct Client {
url: String,
disable_certificate_validation: bool,
username: Option<String>,
password: Option<String>,
pub version: RwLock<Option<Version>>,
}
impl Clone for Client {
fn clone(&self) -> Self {
let version = self.version.read().unwrap();
Self {
url: self.url.clone(),
disable_certificate_validation: self.disable_certificate_validation,
username: self.username.clone(),
password: self.password.clone(),
version: RwLock::new(version.clone()),
}
}
}
impl Client {
pub fn new(url: &str) -> Self {
Self {
url: url.to_string(),
..Default::default()
}
}
pub fn get_http_client(&self) -> Result<reqwest::Client, reqwest::Error> {
let mut builder = reqwest::Client::builder();
if self.disable_certificate_validation {
builder = builder.danger_accept_invalid_certs(true);
}
builder.build()
}
pub fn get(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.get(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
pub fn post(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.post(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
pub fn put(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.put(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
#[inline(always)]
pub async fn get_version(&self) -> Result<Version, ClientError> {
if let Ok(version) = self.version.read() {
if let Some(version) = &*version {
return Ok(version.clone());
}
}
let r = self.get("")?.send().await?;
let status_code = r.status();
if status_code!= StatusCode::OK {
let body = r.text().await?;
let err = format!("{} -- {}", status_code.as_u16(), body.trim());
return Err(ClientError::StringError(err));
}
let body = r.text().await?;
let response: super::ElasticResponse = serde_json::from_str(&body)?;
if let Some(error) = response.error {
return Err(ClientError::StringError(error.reason));
}
if response.version.is_none() {
return Err(ClientError::StringError(
"request for version did not return a version".to_string(),
));
}
let version = Version::parse(&response.version.unwrap().number)?;
let mut locked = self.version.write().unwrap();
*locked = Some(version.clone());
Ok(version)
}
pub async fn put_template(&self, name: &str, template: String) -> Result<(), ClientError> {
let path = format!("_template/{}", name);
let response = self.put(&path)?.body(template).send().await?;
if response.status().as_u16() == 200 {
return Ok(());
}
let body = response.text().await?;
return Err(ClientError::StringError(body));
}
pub async fn get_template(
&self,
name: &str,
) -> Result<Option<serde_json::Value>, Box<dyn std::error::Error>> {
let path = format!("_template/{}", name);
let response = self.get(&path)?.send().await?;
if response.status() == reqwest::StatusCode::OK {
let template: serde_json::Value = response.json().await?;
return Ok(Some(template));
} else if response.status() == reqwest::StatusCode::NOT_FOUND {
return Ok(None);
}
return Err(format!("Failed to get template: {}", response.status()).into());
}
}
#[derive(Debug, Clone, Eq)]
pub struct Version {
pub version: String,
pub major: u64,
pub minor: u64,
pub patch: u64,
}
impl Version {
pub fn parse(s: &str) -> Result<Version, ClientError> {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
for (i, part) in s.split('.').enumerate() {
if i == 0 {
major = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
} else if i == 1 {
minor = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
} else if i == 2 {
patch = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
}
}
let version = Version {
version: s.to_string(),
major,
minor,
patch,
};
Ok(version)
}
pub fn as_u64(&self) -> u64 {
(self.major * 1_000_000_000) + (self.minor * 1_000_000) + (self.patch * 1_000)
}
}
impl Ord for Version {
fn cmp(&self, other: &Self) -> Ordering {
self.as_u64().cmp(&other.as_u64())
}
}
impl PartialOrd for Version {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Version {
fn eq(&self, other: &Self) -> bool {
self.as_u64() == other.as_u64()
}
}
#[derive(Default, Debug)]
pub struct ClientBuilder {
url: String,
disable_certificate_validation: bool,
username: Option<String>,
password: Option<String>,
}
impl ClientBuilder {
pub fn new(url: &str) -> ClientBuilder {
ClientBuilder {
url: url.to_string(),
..ClientBuilder::default()
}
}
pub fn disable_certificate_validation(&mut self, yes: bool) -> &Self {
self.disable_certificate_validation = yes;
self
}
pub fn with_username(&mut self, username: &str) -> &Self {
self.username = Some(username.to_string());
self
}
pub fn with_password(&mut self, password: &str) -> &Self {
self.password = Some(password.to_string());
self
}
pub fn build(&self) -> Client {
Client {
url: self.url.clone(),
disable_certificate_validation: self.disable_certificate_validation,
username: self.username.clone(),
password: self.password.clone(),
version: RwLock::new(None),
}
}
}
#[derive(Deserialize, Serialize, Debug)]
pub struct BulkResponse {
pub errors: Option<bool>,
pub items: Option<Vec<serde_json::Value>>,
pub error: Option<serde_json::Value>,
#[serde(flatten)]
pub other: std::collections::HashMap<String, serde_json::Value>,
}
impl BulkResponse {
pub fn is_error(&self) -> bool {
self.error.is_some()
}
pub fn has_error(&self) -> bool {
if let Some(errors) = self.errors {
return errors;
}
if self.error.is_some() {
return true;
}
return false;
}
pub fn first_error(&self) -> Option<String> |
}
#[cfg(test)]
mod test {
use super::*;
#[test]
pub fn test_version_compare() {
assert_eq!(
Version::parse("1.1.1").unwrap(),
Version::parse("1.1.1").unwrap()
);
assert!(Version::parse("7.7.0").unwrap() < Version::parse("7.7.1").unwrap());
assert!(Version::parse("7.7.1").unwrap() <= Version::parse("7.7.1").unwrap());
assert!(Version::parse("7.7.1").unwrap() == Version::parse("7.7.1").unwrap());
}
}
| {
if !self.has_error() {
return None;
}
if let Some(error) = &self.error {
return Some(error.to_string());
}
if let Some(items) = &self.items {
for item in items {
if let serde_json::Value::String(err) = &item["index"]["error"]["reason"] {
return Some(err.to_string());
}
}
}
None
} | identifier_body |
client.rs | // Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use reqwest::StatusCode;
use serde::Deserialize;
use serde::Serialize;
use std::cmp::Ordering;
use std::sync::RwLock;
#[derive(Debug, thiserror::Error)]
pub enum ClientError {
#[error("request: {0}")]
ReqwestError(reqwest::Error),
#[error("json: {0}")]
JsonError(serde_json::error::Error),
#[error("failed to parse version: {0}")]
VersionParseError(String),
#[error("{0}")]
StringError(String),
}
impl From<reqwest::Error> for ClientError {
fn from(err: reqwest::Error) -> Self {
ClientError::ReqwestError(err)
}
}
impl From<serde_json::error::Error> for ClientError {
fn from(err: serde_json::error::Error) -> Self {
ClientError::JsonError(err)
}
}
#[derive(Debug, Default)]
pub struct Client {
url: String,
disable_certificate_validation: bool,
username: Option<String>,
password: Option<String>,
pub version: RwLock<Option<Version>>,
}
impl Clone for Client {
fn clone(&self) -> Self {
let version = self.version.read().unwrap();
Self {
url: self.url.clone(),
disable_certificate_validation: self.disable_certificate_validation,
username: self.username.clone(),
password: self.password.clone(),
version: RwLock::new(version.clone()),
}
}
}
impl Client {
pub fn new(url: &str) -> Self {
Self {
url: url.to_string(),
..Default::default()
}
}
pub fn get_http_client(&self) -> Result<reqwest::Client, reqwest::Error> {
let mut builder = reqwest::Client::builder();
if self.disable_certificate_validation {
builder = builder.danger_accept_invalid_certs(true);
}
builder.build()
}
pub fn get(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.get(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else | ;
Ok(request)
}
pub fn post(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.post(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
pub fn put(&self, path: &str) -> Result<reqwest::RequestBuilder, reqwest::Error> {
let url = format!("{}/{}", self.url, path);
let request = self
.get_http_client()?
.put(&url)
.header("Content-Type", "application/json");
let request = if let Some(username) = &self.username {
request.basic_auth(username, self.password.clone())
} else {
request
};
Ok(request)
}
#[inline(always)]
pub async fn get_version(&self) -> Result<Version, ClientError> {
if let Ok(version) = self.version.read() {
if let Some(version) = &*version {
return Ok(version.clone());
}
}
let r = self.get("")?.send().await?;
let status_code = r.status();
if status_code!= StatusCode::OK {
let body = r.text().await?;
let err = format!("{} -- {}", status_code.as_u16(), body.trim());
return Err(ClientError::StringError(err));
}
let body = r.text().await?;
let response: super::ElasticResponse = serde_json::from_str(&body)?;
if let Some(error) = response.error {
return Err(ClientError::StringError(error.reason));
}
if response.version.is_none() {
return Err(ClientError::StringError(
"request for version did not return a version".to_string(),
));
}
let version = Version::parse(&response.version.unwrap().number)?;
let mut locked = self.version.write().unwrap();
*locked = Some(version.clone());
Ok(version)
}
pub async fn put_template(&self, name: &str, template: String) -> Result<(), ClientError> {
let path = format!("_template/{}", name);
let response = self.put(&path)?.body(template).send().await?;
if response.status().as_u16() == 200 {
return Ok(());
}
let body = response.text().await?;
return Err(ClientError::StringError(body));
}
pub async fn get_template(
&self,
name: &str,
) -> Result<Option<serde_json::Value>, Box<dyn std::error::Error>> {
let path = format!("_template/{}", name);
let response = self.get(&path)?.send().await?;
if response.status() == reqwest::StatusCode::OK {
let template: serde_json::Value = response.json().await?;
return Ok(Some(template));
} else if response.status() == reqwest::StatusCode::NOT_FOUND {
return Ok(None);
}
return Err(format!("Failed to get template: {}", response.status()).into());
}
}
#[derive(Debug, Clone, Eq)]
pub struct Version {
pub version: String,
pub major: u64,
pub minor: u64,
pub patch: u64,
}
impl Version {
pub fn parse(s: &str) -> Result<Version, ClientError> {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
for (i, part) in s.split('.').enumerate() {
if i == 0 {
major = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
} else if i == 1 {
minor = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
} else if i == 2 {
patch = part
.parse::<u64>()
.map_err(|_| ClientError::VersionParseError(s.to_string()))?;
}
}
let version = Version {
version: s.to_string(),
major,
minor,
patch,
};
Ok(version)
}
pub fn as_u64(&self) -> u64 {
(self.major * 1_000_000_000) + (self.minor * 1_000_000) + (self.patch * 1_000)
}
}
impl Ord for Version {
fn cmp(&self, other: &Self) -> Ordering {
self.as_u64().cmp(&other.as_u64())
}
}
impl PartialOrd for Version {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for Version {
fn eq(&self, other: &Self) -> bool {
self.as_u64() == other.as_u64()
}
}
#[derive(Default, Debug)]
pub struct ClientBuilder {
url: String,
disable_certificate_validation: bool,
username: Option<String>,
password: Option<String>,
}
impl ClientBuilder {
pub fn new(url: &str) -> ClientBuilder {
ClientBuilder {
url: url.to_string(),
..ClientBuilder::default()
}
}
pub fn disable_certificate_validation(&mut self, yes: bool) -> &Self {
self.disable_certificate_validation = yes;
self
}
pub fn with_username(&mut self, username: &str) -> &Self {
self.username = Some(username.to_string());
self
}
pub fn with_password(&mut self, password: &str) -> &Self {
self.password = Some(password.to_string());
self
}
pub fn build(&self) -> Client {
Client {
url: self.url.clone(),
disable_certificate_validation: self.disable_certificate_validation,
username: self.username.clone(),
password: self.password.clone(),
version: RwLock::new(None),
}
}
}
#[derive(Deserialize, Serialize, Debug)]
pub struct BulkResponse {
pub errors: Option<bool>,
pub items: Option<Vec<serde_json::Value>>,
pub error: Option<serde_json::Value>,
#[serde(flatten)]
pub other: std::collections::HashMap<String, serde_json::Value>,
}
impl BulkResponse {
pub fn is_error(&self) -> bool {
self.error.is_some()
}
pub fn has_error(&self) -> bool {
if let Some(errors) = self.errors {
return errors;
}
if self.error.is_some() {
return true;
}
return false;
}
pub fn first_error(&self) -> Option<String> {
if!self.has_error() {
return None;
}
if let Some(error) = &self.error {
return Some(error.to_string());
}
if let Some(items) = &self.items {
for item in items {
if let serde_json::Value::String(err) = &item["index"]["error"]["reason"] {
return Some(err.to_string());
}
}
}
None
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
pub fn test_version_compare() {
assert_eq!(
Version::parse("1.1.1").unwrap(),
Version::parse("1.1.1").unwrap()
);
assert!(Version::parse("7.7.0").unwrap() < Version::parse("7.7.1").unwrap());
assert!(Version::parse("7.7.1").unwrap() <= Version::parse("7.7.1").unwrap());
assert!(Version::parse("7.7.1").unwrap() == Version::parse("7.7.1").unwrap());
}
}
| {
request
} | conditional_block |
interactive.rs | event::{Event, KeyCode, KeyEvent, KeyModifiers},
style::{style, Attribute, Color, ContentStyle, Print, PrintStyledContent, StyledContent},
terminal, QueueableCommand,
};
use std::convert::TryFrom;
use std::io::{stdin, stdout};
use std::path::Path;
const HELP: &'static str = r##"y - apply this suggestion
n - do not apply the suggested correction
q - quit; do not stage this hunk or any of the remaining ones
d - do not apply this suggestion and skip the rest of the file
g - select a suggestion to go to
j - leave this hunk undecided, see next undecided hunk
J - leave this hunk undecided, see next hunk
e - manually edit the current hunk
? - print help
"##;
/// Helper strict to assure we leave the terminals raw mode
struct ScopedRaw;
impl ScopedRaw {
fn new() -> Result<Self> {
crossterm::terminal::enable_raw_mode()?;
Ok(Self)
}
}
impl Drop for ScopedRaw {
fn drop(&mut self) {
let _ = crossterm::terminal::disable_raw_mode();
}
}
/// In which direction we should progress
#[derive(Debug, Clone, Copy)]
enum Direction {
Forward,
Backward,
}
/// The user picked something. This is the pick representation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) enum Pick {
Replacement(BandAid),
/// Skip this suggestion and move on to the next suggestion.
Skip,
/// Jump to the previous suggestion.
Previous,
/// Print the help message and exit.
Help,
/// Skip the remaining fixes for the current file.
SkipFile,
/// Stop execution.
Quit,
/// continue as if whatever returned this was never called.
Nop,
}
/// Statefulness for the selection process
struct State<'s, 't>
where
't:'s,
{
/// Which suggestion is operated upon.
pub suggestion: &'s Suggestion<'t>,
/// The content the user provided for the suggestion, if any.
pub custom_replacement: String,
/// Which index to show as highlighted.
pub pick_idx: usize,
/// Total number of pickable slots.
pub n_items: usize,
}
impl<'s, 't> From<&'s Suggestion<'t>> for State<'s, 't> {
fn from(suggestion: &'s Suggestion<'t>) -> Self {
Self {
suggestion,
custom_replacement: String::new(),
pick_idx: 0usize,
// all items provided by the checkers plus the user provided
n_items: suggestion.replacements.len() + 1,
}
}
}
impl<'s, 't> State<'s, 't>
where
't:'s,
{
pub fn select_next(&mut self) {
self.pick_idx = (self.pick_idx + 1).rem_euclid(self.n_items);
}
pub fn select_previous(&mut self) {
self.pick_idx = (self.pick_idx + self.n_items - 1).rem_euclid(self.n_items);
}
pub fn select_custom(&mut self) {
self.pick_idx = self.n_items - 1;
}
/// the last one is user input
pub fn is_custom_entry(&self) -> bool |
pub fn to_bandaid(&self) -> BandAid {
if self.is_custom_entry() {
BandAid::from((
self.custom_replacement.clone(),
self.suggestion.span.clone(),
))
} else {
BandAid::try_from((self.suggestion, self.pick_idx))
.expect("Was constructed around this suggestion.")
}
}
}
/// The selection of used suggestion replacements
#[derive(Debug, Clone, Default)]
pub struct UserPicked {
pub bandaids: indexmap::IndexMap<PathBuf, Vec<BandAid>>,
}
impl UserPicked {
/// Count the number of suggestions accress file in total
pub fn count(&self) -> usize {
self.bandaids.iter().map(|(_path, vec)| vec.len()).sum()
}
/// Apply a single bandaid.
fn add_bandaid<'u>(&mut self, path: &Path, fix: BandAid) {
self.bandaids
.entry(path.to_owned())
.or_insert_with(|| Vec::with_capacity(10))
.push(fix);
}
/// Apply multiple bandaids.
#[allow(unused)]
fn add_bandaids<I>(&mut self, path: &Path, fixes: I)
where
I: IntoIterator<Item = BandAid>,
{
let iter = fixes.into_iter();
self.bandaids
.entry(path.to_owned())
.or_insert_with(|| Vec::with_capacity(iter.size_hint().0))
.extend(iter);
}
/// Provide a replacement that was not provided by the backend
fn custom_replacement(&self, state: &mut State, event: KeyEvent) -> Result<Pick> {
let KeyEvent { code, modifiers } = event;
match code {
KeyCode::Up => state.select_next(),
KeyCode::Down => state.select_previous(),
KeyCode::Enter => {
let bandaid = BandAid::new(&state.custom_replacement, &state.suggestion.span);
return Ok(Pick::Replacement(bandaid));
}
KeyCode::Esc => return Ok(Pick::Quit),
KeyCode::Char('c') if modifiers == KeyModifiers::CONTROL => return Ok(Pick::Quit),
KeyCode::Char(c) => state.custom_replacement.push(c), // @todo handle cursors and insert / delete mode
_ => {}
}
Ok(Pick::Nop)
}
/// only print the list of replacements to the user
// initial thougth was to show a horizontal list of replacements, navigate left/ right
// by using the arrow keys
//.. suggestion0 [suggestion1] suggestion2 suggestion3..
// arrow left
//.. suggestion1 [suggestion2] suggestion3 suggestion4..
// but now it's only a very simple list for now
fn print_replacements_list(&self, state: &State) -> Result<()> {
let mut stdout = stdout();
let tick = ContentStyle::new()
.foreground(Color::Green)
.attribute(Attribute::Bold);
let highlight = ContentStyle::new()
.background(Color::Black)
.foreground(Color::Green)
.attribute(Attribute::Bold);
let others = ContentStyle::new()
.background(Color::Black)
.foreground(Color::Blue);
let custom = ContentStyle::new()
.background(Color::Black)
.foreground(Color::Yellow);
// render all replacements in a vertical list
stdout.queue(cursor::SavePosition).unwrap();
let _ = stdout.flush();
let active_idx = state.pick_idx;
let custom_content = if state.custom_replacement.is_empty() {
"..."
} else {
state.custom_replacement.as_str()
};
if state.n_items!= active_idx + 1 {
stdout
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
custom,
custom_content,
)))
.unwrap();
} else {
stdout
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(2))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(tick.clone(), '»')))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
custom,
custom_content,
)))
.unwrap();
}
let _ = stdout.flush();
state
.suggestion
.replacements
.iter()
.enumerate()
.for_each(|(idx, replacement)| {
let idx = idx as u16;
if idx!= active_idx as u16 {
// @todo figure out a way to deal with those errors better
stdout
//.queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap()
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
others.clone(),
replacement,
)))
.unwrap();
} else {
stdout
//.queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap()
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(2))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(tick.clone(), '»')))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
highlight.clone(),
replacement,
)))
.unwrap();
}
});
stdout.queue(cursor::RestorePosition).unwrap();
let _ = stdout.flush();
Ok(())
}
/// Wait for user input and process it into a `Pick` enum
fn user_input(&self, state: &mut State, running_idx: (usize, usize)) -> Result<Pick> {
{
let _guard = ScopedRaw::new();
let boring = ContentStyle::new()
.foreground(Color::Blue)
.attribute(Attribute::Bold);
let question = format!(
"({nth}/{of_n}) Apply this suggestion [y,n,q,a,d,j,e,?]?",
nth = running_idx.0 + 1,
of_n = running_idx.1
);
// a new suggestion, so prepare for the number of items that are visible
// and also overwrite the last lines of the regular print which would
// already contain the suggestions
stdout()
.queue(cursor::Hide)
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(cursor::MoveUp(5)) // erase the 5 last lines of suggestion print
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveDown(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(boring, question)))
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(cursor::MoveDown(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveDown(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap() // @todo deal with error conversion
.queue(terminal::ScrollUp((state.n_items) as u16))
.unwrap();
}
loop {
let mut guard = ScopedRaw::new();
self.print_replacements_list(state)?;
let event = match crossterm::event::read()
.map_err(|e| anyhow::anyhow!("Something unexpected happened on the CLI: {}", e))?
{
Event::Key(event) => event,
Event::Resize(..) => {
drop(guard);
continue;
}
sth => {
trace!("read() something other than a key: {:?}", sth);
break;
}
};
if state.is_custom_entry() {
drop(guard);
info!("Custom entry mode");
guard = ScopedRaw::new();
match self.custom_replacement(state, event)? {
Pick::Nop => continue,
other => return Ok(other),
}
}
drop(guard);
// print normally again
trace!("registered event: {:?}", &event);
let KeyEvent { code, modifiers } = event;
match code {
KeyCode::Up => state.select_next(),
KeyCode::Down => state.select_previous(),
KeyCode::Enter | KeyCode::Char('y') => {
let bandaid: BandAid = state.to_bandaid();
// @todo handle interactive intput for those where there are no suggestions
return Ok(Pick::Replacement(bandaid));
}
KeyCode::Char('n') => return Ok(Pick::Skip),
KeyCode::Char('j') => return Ok(Pick::Previous),
KeyCode::Char('c') if modifiers == KeyModifiers::CONTROL => return Ok(Pick::Quit),
KeyCode::Char('q') | KeyCode::Esc => return Ok(Pick::Quit),
KeyCode::Char('d') => return Ok(Pick::SkipFile),
KeyCode::Char('e') => {
// jump to the user input entry
state.select_custom();
}
KeyCode::Char('?') => return Ok(Pick::Help),
x => {
trace!("Unexpected input {:?}", x);
}
}
}
unreachable!("Unexpected return when dealing with user input")
}
pub(super) fn select_interactive<'s>(
suggestions_per_path: SuggestionSet<'s>,
_config: &Config,
) -> Result<Self> {
let mut picked = UserPicked::default();
trace!("Select the ones to actully use");
for (path, suggestions) in suggestions_per_path {
let count = suggestions.len();
println!("Path is {} and has {}", path.display(), count);
// @todo juck, uggly
let mut suggestions_it = suggestions.clone().into_iter().enumerate();
let mut direction = Direction::Forward;
loop {
let opt: Option<(usize, Suggestion)> = match direction {
Direction::Forward => suggestions_it.next(),
Direction::Backward => suggestions_it.next_back(), // FIXME @todo this is just plain wrong
};
trace!("next() ---> {:?}", &opt);
if opt.is_none() {
match direction {
Direction::Forward => {
trace!("completed file, continue to next");
break; // we completed this file, move on to the next
}
Direction::Backward => {
trace!("went back, now back at the beginning");
suggestions_it = suggestions.clone().into_iter().enumerate();
continue;
} // go to the start
}
}
let (idx, suggestion) = opt.expect("Must be Some(_)");
if suggestion.replacements.is_empty() {
trace!("Suggestion did not contain a replacement, skip");
continue;
}
println!("{}", suggestion);
let mut state = State::from(&suggestion);
let mut pick = picked.user_input(&mut state, (idx, count))?;
while pick == Pick::Help {
println!("{}", HELP);
pick = picked.user_input(&mut state, (idx, count))?;
}
match pick {
Pick::Quit => return Ok(picked),
Pick::SkipFile => break, // break the inner loop
Pick::Previous => {
unimplemented!("Requires a iterator which works bidrectionally")
}
Pick::Help => {
unreachable!("Help must not be reachable here, it is handled before")
}
Pick::Replacement(bandaid) => {
picked.add_bandaid(&path, bandaid);
| {
self.pick_idx + 1 == self.n_items
} | identifier_body |
interactive.rs | event::{Event, KeyCode, KeyEvent, KeyModifiers},
style::{style, Attribute, Color, ContentStyle, Print, PrintStyledContent, StyledContent},
terminal, QueueableCommand,
};
use std::convert::TryFrom;
use std::io::{stdin, stdout};
use std::path::Path;
const HELP: &'static str = r##"y - apply this suggestion
n - do not apply the suggested correction
q - quit; do not stage this hunk or any of the remaining ones
d - do not apply this suggestion and skip the rest of the file
g - select a suggestion to go to
j - leave this hunk undecided, see next undecided hunk
J - leave this hunk undecided, see next hunk
e - manually edit the current hunk
? - print help
"##;
/// Helper strict to assure we leave the terminals raw mode
struct ScopedRaw;
impl ScopedRaw {
fn new() -> Result<Self> {
crossterm::terminal::enable_raw_mode()?;
Ok(Self)
}
}
impl Drop for ScopedRaw {
fn drop(&mut self) {
let _ = crossterm::terminal::disable_raw_mode();
}
}
/// In which direction we should progress
#[derive(Debug, Clone, Copy)]
enum Direction {
Forward,
Backward,
}
/// The user picked something. This is the pick representation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) enum | {
Replacement(BandAid),
/// Skip this suggestion and move on to the next suggestion.
Skip,
/// Jump to the previous suggestion.
Previous,
/// Print the help message and exit.
Help,
/// Skip the remaining fixes for the current file.
SkipFile,
/// Stop execution.
Quit,
/// continue as if whatever returned this was never called.
Nop,
}
/// Statefulness for the selection process
struct State<'s, 't>
where
't:'s,
{
/// Which suggestion is operated upon.
pub suggestion: &'s Suggestion<'t>,
/// The content the user provided for the suggestion, if any.
pub custom_replacement: String,
/// Which index to show as highlighted.
pub pick_idx: usize,
/// Total number of pickable slots.
pub n_items: usize,
}
impl<'s, 't> From<&'s Suggestion<'t>> for State<'s, 't> {
fn from(suggestion: &'s Suggestion<'t>) -> Self {
Self {
suggestion,
custom_replacement: String::new(),
pick_idx: 0usize,
// all items provided by the checkers plus the user provided
n_items: suggestion.replacements.len() + 1,
}
}
}
impl<'s, 't> State<'s, 't>
where
't:'s,
{
pub fn select_next(&mut self) {
self.pick_idx = (self.pick_idx + 1).rem_euclid(self.n_items);
}
pub fn select_previous(&mut self) {
self.pick_idx = (self.pick_idx + self.n_items - 1).rem_euclid(self.n_items);
}
pub fn select_custom(&mut self) {
self.pick_idx = self.n_items - 1;
}
/// the last one is user input
pub fn is_custom_entry(&self) -> bool {
self.pick_idx + 1 == self.n_items
}
pub fn to_bandaid(&self) -> BandAid {
if self.is_custom_entry() {
BandAid::from((
self.custom_replacement.clone(),
self.suggestion.span.clone(),
))
} else {
BandAid::try_from((self.suggestion, self.pick_idx))
.expect("Was constructed around this suggestion.")
}
}
}
/// The selection of used suggestion replacements
#[derive(Debug, Clone, Default)]
pub struct UserPicked {
pub bandaids: indexmap::IndexMap<PathBuf, Vec<BandAid>>,
}
impl UserPicked {
/// Count the number of suggestions accress file in total
pub fn count(&self) -> usize {
self.bandaids.iter().map(|(_path, vec)| vec.len()).sum()
}
/// Apply a single bandaid.
fn add_bandaid<'u>(&mut self, path: &Path, fix: BandAid) {
self.bandaids
.entry(path.to_owned())
.or_insert_with(|| Vec::with_capacity(10))
.push(fix);
}
/// Apply multiple bandaids.
#[allow(unused)]
fn add_bandaids<I>(&mut self, path: &Path, fixes: I)
where
I: IntoIterator<Item = BandAid>,
{
let iter = fixes.into_iter();
self.bandaids
.entry(path.to_owned())
.or_insert_with(|| Vec::with_capacity(iter.size_hint().0))
.extend(iter);
}
/// Provide a replacement that was not provided by the backend
fn custom_replacement(&self, state: &mut State, event: KeyEvent) -> Result<Pick> {
let KeyEvent { code, modifiers } = event;
match code {
KeyCode::Up => state.select_next(),
KeyCode::Down => state.select_previous(),
KeyCode::Enter => {
let bandaid = BandAid::new(&state.custom_replacement, &state.suggestion.span);
return Ok(Pick::Replacement(bandaid));
}
KeyCode::Esc => return Ok(Pick::Quit),
KeyCode::Char('c') if modifiers == KeyModifiers::CONTROL => return Ok(Pick::Quit),
KeyCode::Char(c) => state.custom_replacement.push(c), // @todo handle cursors and insert / delete mode
_ => {}
}
Ok(Pick::Nop)
}
/// only print the list of replacements to the user
// initial thougth was to show a horizontal list of replacements, navigate left/ right
// by using the arrow keys
//.. suggestion0 [suggestion1] suggestion2 suggestion3..
// arrow left
//.. suggestion1 [suggestion2] suggestion3 suggestion4..
// but now it's only a very simple list for now
fn print_replacements_list(&self, state: &State) -> Result<()> {
let mut stdout = stdout();
let tick = ContentStyle::new()
.foreground(Color::Green)
.attribute(Attribute::Bold);
let highlight = ContentStyle::new()
.background(Color::Black)
.foreground(Color::Green)
.attribute(Attribute::Bold);
let others = ContentStyle::new()
.background(Color::Black)
.foreground(Color::Blue);
let custom = ContentStyle::new()
.background(Color::Black)
.foreground(Color::Yellow);
// render all replacements in a vertical list
stdout.queue(cursor::SavePosition).unwrap();
let _ = stdout.flush();
let active_idx = state.pick_idx;
let custom_content = if state.custom_replacement.is_empty() {
"..."
} else {
state.custom_replacement.as_str()
};
if state.n_items!= active_idx + 1 {
stdout
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
custom,
custom_content,
)))
.unwrap();
} else {
stdout
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(2))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(tick.clone(), '»')))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
custom,
custom_content,
)))
.unwrap();
}
let _ = stdout.flush();
state
.suggestion
.replacements
.iter()
.enumerate()
.for_each(|(idx, replacement)| {
let idx = idx as u16;
if idx!= active_idx as u16 {
// @todo figure out a way to deal with those errors better
stdout
//.queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap()
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
others.clone(),
replacement,
)))
.unwrap();
} else {
stdout
//.queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap()
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(2))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(tick.clone(), '»')))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
highlight.clone(),
replacement,
)))
.unwrap();
}
});
stdout.queue(cursor::RestorePosition).unwrap();
let _ = stdout.flush();
Ok(())
}
/// Wait for user input and process it into a `Pick` enum
fn user_input(&self, state: &mut State, running_idx: (usize, usize)) -> Result<Pick> {
{
let _guard = ScopedRaw::new();
let boring = ContentStyle::new()
.foreground(Color::Blue)
.attribute(Attribute::Bold);
let question = format!(
"({nth}/{of_n}) Apply this suggestion [y,n,q,a,d,j,e,?]?",
nth = running_idx.0 + 1,
of_n = running_idx.1
);
// a new suggestion, so prepare for the number of items that are visible
// and also overwrite the last lines of the regular print which would
// already contain the suggestions
stdout()
.queue(cursor::Hide)
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(cursor::MoveUp(5)) // erase the 5 last lines of suggestion print
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveDown(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(boring, question)))
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(cursor::MoveDown(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveDown(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap() // @todo deal with error conversion
.queue(terminal::ScrollUp((state.n_items) as u16))
.unwrap();
}
loop {
let mut guard = ScopedRaw::new();
self.print_replacements_list(state)?;
let event = match crossterm::event::read()
.map_err(|e| anyhow::anyhow!("Something unexpected happened on the CLI: {}", e))?
{
Event::Key(event) => event,
Event::Resize(..) => {
drop(guard);
continue;
}
sth => {
trace!("read() something other than a key: {:?}", sth);
break;
}
};
if state.is_custom_entry() {
drop(guard);
info!("Custom entry mode");
guard = ScopedRaw::new();
match self.custom_replacement(state, event)? {
Pick::Nop => continue,
other => return Ok(other),
}
}
drop(guard);
// print normally again
trace!("registered event: {:?}", &event);
let KeyEvent { code, modifiers } = event;
match code {
KeyCode::Up => state.select_next(),
KeyCode::Down => state.select_previous(),
KeyCode::Enter | KeyCode::Char('y') => {
let bandaid: BandAid = state.to_bandaid();
// @todo handle interactive intput for those where there are no suggestions
return Ok(Pick::Replacement(bandaid));
}
KeyCode::Char('n') => return Ok(Pick::Skip),
KeyCode::Char('j') => return Ok(Pick::Previous),
KeyCode::Char('c') if modifiers == KeyModifiers::CONTROL => return Ok(Pick::Quit),
KeyCode::Char('q') | KeyCode::Esc => return Ok(Pick::Quit),
KeyCode::Char('d') => return Ok(Pick::SkipFile),
KeyCode::Char('e') => {
// jump to the user input entry
state.select_custom();
}
KeyCode::Char('?') => return Ok(Pick::Help),
x => {
trace!("Unexpected input {:?}", x);
}
}
}
unreachable!("Unexpected return when dealing with user input")
}
pub(super) fn select_interactive<'s>(
suggestions_per_path: SuggestionSet<'s>,
_config: &Config,
) -> Result<Self> {
let mut picked = UserPicked::default();
trace!("Select the ones to actully use");
for (path, suggestions) in suggestions_per_path {
let count = suggestions.len();
println!("Path is {} and has {}", path.display(), count);
// @todo juck, uggly
let mut suggestions_it = suggestions.clone().into_iter().enumerate();
let mut direction = Direction::Forward;
loop {
let opt: Option<(usize, Suggestion)> = match direction {
Direction::Forward => suggestions_it.next(),
Direction::Backward => suggestions_it.next_back(), // FIXME @todo this is just plain wrong
};
trace!("next() ---> {:?}", &opt);
if opt.is_none() {
match direction {
Direction::Forward => {
trace!("completed file, continue to next");
break; // we completed this file, move on to the next
}
Direction::Backward => {
trace!("went back, now back at the beginning");
suggestions_it = suggestions.clone().into_iter().enumerate();
continue;
} // go to the start
}
}
let (idx, suggestion) = opt.expect("Must be Some(_)");
if suggestion.replacements.is_empty() {
trace!("Suggestion did not contain a replacement, skip");
continue;
}
println!("{}", suggestion);
let mut state = State::from(&suggestion);
let mut pick = picked.user_input(&mut state, (idx, count))?;
while pick == Pick::Help {
println!("{}", HELP);
pick = picked.user_input(&mut state, (idx, count))?;
}
match pick {
Pick::Quit => return Ok(picked),
Pick::SkipFile => break, // break the inner loop
Pick::Previous => {
unimplemented!("Requires a iterator which works bidrectionally")
}
Pick::Help => {
unreachable!("Help must not be reachable here, it is handled before")
}
Pick::Replacement(bandaid) => {
picked.add_bandaid(&path, bandaid);
| Pick | identifier_name |
interactive.rs | use crossterm;
use crossterm::{
cursor,
event::{Event, KeyCode, KeyEvent, KeyModifiers},
style::{style, Attribute, Color, ContentStyle, Print, PrintStyledContent, StyledContent},
terminal, QueueableCommand,
};
use std::convert::TryFrom;
use std::io::{stdin, stdout};
use std::path::Path;
const HELP: &'static str = r##"y - apply this suggestion
n - do not apply the suggested correction
q - quit; do not stage this hunk or any of the remaining ones
d - do not apply this suggestion and skip the rest of the file
g - select a suggestion to go to
j - leave this hunk undecided, see next undecided hunk
J - leave this hunk undecided, see next hunk
e - manually edit the current hunk
? - print help
"##;
/// Helper strict to assure we leave the terminals raw mode
struct ScopedRaw;
impl ScopedRaw {
fn new() -> Result<Self> {
crossterm::terminal::enable_raw_mode()?;
Ok(Self)
}
}
impl Drop for ScopedRaw {
fn drop(&mut self) {
let _ = crossterm::terminal::disable_raw_mode();
}
}
/// In which direction we should progress
#[derive(Debug, Clone, Copy)]
enum Direction {
Forward,
Backward,
}
/// The user picked something. This is the pick representation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) enum Pick {
Replacement(BandAid),
/// Skip this suggestion and move on to the next suggestion.
Skip,
/// Jump to the previous suggestion.
Previous,
/// Print the help message and exit.
Help,
/// Skip the remaining fixes for the current file.
SkipFile,
/// Stop execution.
Quit,
/// continue as if whatever returned this was never called.
Nop,
}
/// Statefulness for the selection process
struct State<'s, 't>
where
't:'s,
{
/// Which suggestion is operated upon.
pub suggestion: &'s Suggestion<'t>,
/// The content the user provided for the suggestion, if any.
pub custom_replacement: String,
/// Which index to show as highlighted.
pub pick_idx: usize,
/// Total number of pickable slots.
pub n_items: usize,
}
impl<'s, 't> From<&'s Suggestion<'t>> for State<'s, 't> {
fn from(suggestion: &'s Suggestion<'t>) -> Self {
Self {
suggestion,
custom_replacement: String::new(),
pick_idx: 0usize,
// all items provided by the checkers plus the user provided
n_items: suggestion.replacements.len() + 1,
}
}
}
impl<'s, 't> State<'s, 't>
where
't:'s,
{
pub fn select_next(&mut self) {
self.pick_idx = (self.pick_idx + 1).rem_euclid(self.n_items);
}
pub fn select_previous(&mut self) {
self.pick_idx = (self.pick_idx + self.n_items - 1).rem_euclid(self.n_items);
}
pub fn select_custom(&mut self) {
self.pick_idx = self.n_items - 1;
}
/// the last one is user input
pub fn is_custom_entry(&self) -> bool {
self.pick_idx + 1 == self.n_items
}
pub fn to_bandaid(&self) -> BandAid {
if self.is_custom_entry() {
BandAid::from((
self.custom_replacement.clone(),
self.suggestion.span.clone(),
))
} else {
BandAid::try_from((self.suggestion, self.pick_idx))
.expect("Was constructed around this suggestion.")
}
}
}
/// The selection of used suggestion replacements
#[derive(Debug, Clone, Default)]
pub struct UserPicked {
pub bandaids: indexmap::IndexMap<PathBuf, Vec<BandAid>>,
}
impl UserPicked {
/// Count the number of suggestions accress file in total
pub fn count(&self) -> usize {
self.bandaids.iter().map(|(_path, vec)| vec.len()).sum()
}
/// Apply a single bandaid.
fn add_bandaid<'u>(&mut self, path: &Path, fix: BandAid) {
self.bandaids
.entry(path.to_owned())
.or_insert_with(|| Vec::with_capacity(10))
.push(fix);
}
/// Apply multiple bandaids.
#[allow(unused)]
fn add_bandaids<I>(&mut self, path: &Path, fixes: I)
where
I: IntoIterator<Item = BandAid>,
{
let iter = fixes.into_iter();
self.bandaids
.entry(path.to_owned())
.or_insert_with(|| Vec::with_capacity(iter.size_hint().0))
.extend(iter);
}
/// Provide a replacement that was not provided by the backend
fn custom_replacement(&self, state: &mut State, event: KeyEvent) -> Result<Pick> {
let KeyEvent { code, modifiers } = event;
match code {
KeyCode::Up => state.select_next(),
KeyCode::Down => state.select_previous(),
KeyCode::Enter => {
let bandaid = BandAid::new(&state.custom_replacement, &state.suggestion.span);
return Ok(Pick::Replacement(bandaid));
}
KeyCode::Esc => return Ok(Pick::Quit),
KeyCode::Char('c') if modifiers == KeyModifiers::CONTROL => return Ok(Pick::Quit),
KeyCode::Char(c) => state.custom_replacement.push(c), // @todo handle cursors and insert / delete mode
_ => {}
}
Ok(Pick::Nop)
}
/// only print the list of replacements to the user
// initial thougth was to show a horizontal list of replacements, navigate left/ right
// by using the arrow keys
//.. suggestion0 [suggestion1] suggestion2 suggestion3..
// arrow left
//.. suggestion1 [suggestion2] suggestion3 suggestion4..
// but now it's only a very simple list for now
fn print_replacements_list(&self, state: &State) -> Result<()> {
let mut stdout = stdout();
let tick = ContentStyle::new()
.foreground(Color::Green)
.attribute(Attribute::Bold);
let highlight = ContentStyle::new()
.background(Color::Black)
.foreground(Color::Green)
.attribute(Attribute::Bold);
let others = ContentStyle::new()
.background(Color::Black)
.foreground(Color::Blue);
let custom = ContentStyle::new()
.background(Color::Black)
.foreground(Color::Yellow);
// render all replacements in a vertical list
stdout.queue(cursor::SavePosition).unwrap();
let _ = stdout.flush();
let active_idx = state.pick_idx;
let custom_content = if state.custom_replacement.is_empty() {
"..."
} else {
state.custom_replacement.as_str()
};
if state.n_items!= active_idx + 1 {
stdout
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
custom,
custom_content,
)))
.unwrap();
} else {
stdout
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(2))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(tick.clone(), '»')))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
custom,
custom_content,
)))
.unwrap();
}
let _ = stdout.flush();
state
.suggestion
.replacements
.iter()
.enumerate()
.for_each(|(idx, replacement)| {
let idx = idx as u16;
if idx!= active_idx as u16 {
// @todo figure out a way to deal with those errors better
stdout
//.queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap()
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
others.clone(),
replacement,
)))
.unwrap();
} else {
stdout
//.queue(cursor::MoveTo(start.0 + idx, start.1)).unwrap()
.queue(cursor::MoveUp(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(2))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(tick.clone(), '»')))
.unwrap()
.queue(cursor::MoveToColumn(4))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(
highlight.clone(),
replacement,
)))
.unwrap();
}
});
stdout.queue(cursor::RestorePosition).unwrap();
let _ = stdout.flush();
Ok(())
}
/// Wait for user input and process it into a `Pick` enum
fn user_input(&self, state: &mut State, running_idx: (usize, usize)) -> Result<Pick> {
{
let _guard = ScopedRaw::new();
let boring = ContentStyle::new()
.foreground(Color::Blue)
.attribute(Attribute::Bold);
let question = format!(
"({nth}/{of_n}) Apply this suggestion [y,n,q,a,d,j,e,?]?",
nth = running_idx.0 + 1,
of_n = running_idx.1
);
// a new suggestion, so prepare for the number of items that are visible
// and also overwrite the last lines of the regular print which would
// already contain the suggestions
stdout()
.queue(cursor::Hide)
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(cursor::MoveUp(5)) // erase the 5 last lines of suggestion print
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveDown(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(PrintStyledContent(StyledContent::new(boring, question)))
.unwrap()
.queue(cursor::MoveToColumn(0))
.unwrap()
.queue(cursor::MoveDown(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap()
.queue(cursor::MoveDown(1))
.unwrap()
.queue(terminal::Clear(terminal::ClearType::CurrentLine))
.unwrap() // @todo deal with error conversion
.queue(terminal::ScrollUp((state.n_items) as u16))
.unwrap();
}
loop {
let mut guard = ScopedRaw::new();
self.print_replacements_list(state)?;
let event = match crossterm::event::read()
.map_err(|e| anyhow::anyhow!("Something unexpected happened on the CLI: {}", e))?
{
Event::Key(event) => event,
Event::Resize(..) => {
drop(guard);
continue;
}
sth => {
trace!("read() something other than a key: {:?}", sth);
break;
}
};
if state.is_custom_entry() {
drop(guard);
info!("Custom entry mode");
guard = ScopedRaw::new();
match self.custom_replacement(state, event)? {
Pick::Nop => continue,
other => return Ok(other),
}
}
drop(guard);
// print normally again
trace!("registered event: {:?}", &event);
let KeyEvent { code, modifiers } = event;
match code {
KeyCode::Up => state.select_next(),
KeyCode::Down => state.select_previous(),
KeyCode::Enter | KeyCode::Char('y') => {
let bandaid: BandAid = state.to_bandaid();
// @todo handle interactive intput for those where there are no suggestions
return Ok(Pick::Replacement(bandaid));
}
KeyCode::Char('n') => return Ok(Pick::Skip),
KeyCode::Char('j') => return Ok(Pick::Previous),
KeyCode::Char('c') if modifiers == KeyModifiers::CONTROL => return Ok(Pick::Quit),
KeyCode::Char('q') | KeyCode::Esc => return Ok(Pick::Quit),
KeyCode::Char('d') => return Ok(Pick::SkipFile),
KeyCode::Char('e') => {
// jump to the user input entry
state.select_custom();
}
KeyCode::Char('?') => return Ok(Pick::Help),
x => {
trace!("Unexpected input {:?}", x);
}
}
}
unreachable!("Unexpected return when dealing with user input")
}
pub(super) fn select_interactive<'s>(
suggestions_per_path: SuggestionSet<'s>,
_config: &Config,
) -> Result<Self> {
let mut picked = UserPicked::default();
trace!("Select the ones to actully use");
for (path, suggestions) in suggestions_per_path {
let count = suggestions.len();
println!("Path is {} and has {}", path.display(), count);
// @todo juck, uggly
let mut suggestions_it = suggestions.clone().into_iter().enumerate();
let mut direction = Direction::Forward;
loop {
let opt: Option<(usize, Suggestion)> = match direction {
Direction::Forward => suggestions_it.next(),
Direction::Backward => suggestions_it.next_back(), // FIXME @todo this is just plain wrong
};
trace!("next() ---> {:?}", &opt);
if opt.is_none() {
match direction {
Direction::Forward => {
trace!("completed file, continue to next");
break; // we completed this file, move on to the next
}
Direction::Backward => {
trace!("went back, now back at the beginning");
suggestions_it = suggestions.clone().into_iter().enumerate();
continue;
} // go to the start
}
}
let (idx, suggestion) = opt.expect("Must be Some(_)");
if suggestion.replacements.is_empty() {
trace!("Suggestion did not contain a replacement, skip");
continue;
}
println!("{}", suggestion);
let mut state = State::from(&suggestion);
let mut pick = picked.user_input(&mut state, (idx, count))?;
while pick == Pick::Help {
println!("{}", HELP);
pick = picked.user_input(&mut state, (idx, count))?;
}
match pick {
Pick::Quit => return Ok(picked),
Pick::SkipFile => break, // break the inner loop
Pick::Previous => {
unimplemented!("Requires a iterator which works bidrectionally")
}
Pick::Help => {
unreachable!("Help must not be reachable here, it is handled before")
}
Pick::Replacement(bandaid) => {
| //!
//! The result of that pick is a bandaid.
use super::*;
| random_line_split |
|
hackc.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
mod assemble;
mod cmp_unit;
mod compile;
mod crc;
mod expr_trees;
mod facts;
mod parse;
mod util;
mod verify;
use ::compile::EnvFlags;
use ::compile::HHBCFlags;
use ::compile::NativeEnv;
use ::compile::ParserFlags;
use anyhow::Result;
use byte_unit::Byte;
use clap::Parser;
use hhvm_options::HhvmOptions;
use oxidized::decl_parser_options::DeclParserOptions;
use oxidized::relative_path::RelativePath;
use oxidized::relative_path::{self};
use std::io::BufRead;
use std::path::Path;
use std::path::PathBuf;
/// Hack Compiler
#[derive(Parser, Debug, Default)]
struct Opts {
#[clap(subcommand)]
command: Option<Command>,
/// Runs in daemon mode for testing purposes. Do not rely on for production
#[clap(long)]
daemon: bool,
#[clap(flatten)]
flag_commands: FlagCommands,
#[clap(flatten)]
hhvm_options: HhvmOptions,
#[clap(flatten)]
files: FileOpts,
/// Disable toplevel definition elaboration
#[clap(long)]
disable_toplevel_elaboration: bool,
/// Mutate the program as if we're in the debugger repl
#[clap(long)]
for_debugger_eval: bool,
#[clap(long, default_value("0"))]
emit_class_pointers: i32,
#[clap(long, default_value("0"))]
check_int_overflow: i32,
/// Number of parallel worker threads for subcommands that support parallelism,
/// otherwise ignored. If 0, use available parallelism, typically num-cpus.
#[clap(long, default_value("0"))]
num_threads: usize,
/// Stack size to use for parallel worker threads. Supports unit suffixes like KB, MiB, etc.
#[clap(long, default_value("32 MiB"))]
stack_size: Byte,
/// Instead of printing the unit, print a list of the decls requested during compilation.
/// (only used by --test-compile-with-decls)
#[clap(long)]
pub(crate) log_decls_requested: bool,
/// Use serialized decl instead of decl pointer as the decl provider API
/// (only used by --test-compile-with-decls)
#[clap(long)]
pub(crate) use_serialized_decls: bool,
/// Controls systemlib specific logic
#[clap(long)]
is_systemlib: bool,
}
/// Hack Compiler
#[derive(Parser, Debug, Default)]
struct FileOpts {
/// Input file(s)
filenames: Vec<PathBuf>,
/// Read a list of files (one-per-line) from this file
#[clap(long)]
input_file_list: Option<PathBuf>,
}
#[derive(Parser, Debug)]
enum Command {
/// Assemble HHAS file(s) into HackCUnit. Prints those HCUs' HHAS representation.
Assemble(assemble::Opts),
/// Compile one Hack source file or a list of files to HHAS
Compile(compile::Opts),
/// Compile Hack source files or directories and produce a single CRC per
/// input file.
Crc(crc::Opts),
/// Print the source code with expression tree literals desugared.
/// Best effort debugging tool.
DesugarExprTrees(expr_trees::Opts),
/// Compute facts for a set of files.
Facts(facts::Opts),
/// Render the source text parse tree for each given file.
Parse(parse::Opts),
/// Parse many files whose filenames are read from stdin, discard parser output.
ParseBench(parse::BenchOpts),
/// Compile Hack source files or directories and check for compilation errors.
Verify(verify::Opts),
}
/// Which command are we running? Using bool opts for compatibility with test harnesses.
/// New commands should be defined as subcommands using the Command enum.
#[derive(Parser, Debug, Default)]
struct FlagCommands {
/// Parse decls from source text, transform them into facts, and print the facts
/// in JSON format.
#[clap(long)]
extract_facts_from_decls: bool,
/// Compile file with decls from the same file available during compilation.
#[clap(long)]
test_compile_with_decls: bool,
}
impl FileOpts {
pub fn gather_input_files(&mut self) -> Result<Vec<PathBuf>> {
use std::io::BufReader;
let mut files: Vec<PathBuf> = Default::default();
if let Some(list_path) = self.input_file_list.take() {
for line in BufReader::new(std::fs::File::open(list_path)?).lines() {
files.push(Path::new(&line?).to_path_buf());
}
}
files.append(&mut self.filenames);
Ok(files)
}
pub fn is_batch_mode(&self) -> bool {
self.input_file_list.is_some() || self.filenames.len() > 1
}
}
impl Opts {
pub fn env_flags(&self) -> EnvFlags {
let mut flags = EnvFlags::empty();
if self.for_debugger_eval {
flags |= EnvFlags::FOR_DEBUGGER_EVAL;
}
if self.disable_toplevel_elaboration {
flags |= EnvFlags::DISABLE_TOPLEVEL_ELABORATION;
}
if self.is_systemlib {
flags |= EnvFlags::IS_SYSTEMLIB;
}
flags
}
pub fn decl_opts(&self) -> DeclParserOptions {
// TODO: share this logic with hackc_create_decl_parse_options()
let config_opts = options::Options::from_configs(&[Self::AUTO_NAMESPACE_MAP]).unwrap();
let auto_namespace_map = match config_opts.hhvm.aliased_namespaces.get().as_map() {
Some(m) => m.iter().map(|(k, v)| (k.clone(), v.clone())).collect(),
None => Vec::new(),
};
DeclParserOptions {
auto_namespace_map,
disable_xhp_element_mangling: false,
interpret_soft_types_as_like_types: true,
allow_new_attribute_syntax: true,
enable_xhp_class_modifier: false,
php5_compat_mode: true,
hhvm_compat_mode: true,
..Default::default()
}
}
pub fn native_env(&self, path: PathBuf) -> Result<NativeEnv<'_>> {
let hhvm_options = &self.hhvm_options;
let hhvm_config = hhvm_options.to_config()?;
let parser_flags = ParserFlags::from_hhvm_config(&hhvm_config)?;
let hhbc_flags = HHBCFlags::from_hhvm_config(&hhvm_config)?;
Ok(NativeEnv {
filepath: RelativePath::make(relative_path::Prefix::Dummy, path),
aliased_namespaces: crate::Opts::AUTO_NAMESPACE_MAP,
include_roots: crate::Opts::INCLUDE_ROOTS,
hhbc_flags,
parser_flags,
flags: self.env_flags(),
emit_class_pointers: self.emit_class_pointers,
check_int_overflow: self.check_int_overflow,
})
}
// TODO (T118266805): get these from nearest.hhconfig enclosing each file.
pub(crate) const AUTO_NAMESPACE_MAP: &'static str = r#"{
"hhvm.aliased_namespaces": {
"global_value": {
"Async": "HH\\Lib\\Async",
"C": "FlibSL\\C",
"Dict": "FlibSL\\Dict",
"File": "HH\\Lib\\File",
"IO": "HH\\Lib\\IO",
"Keyset": "FlibSL\\Keyset",
"Locale": "FlibSL\\Locale",
"Math": "FlibSL\\Math",
"OS": "HH\\Lib\\OS",
"PHP": "FlibSL\\PHP",
"PseudoRandom": "FlibSL\\PseudoRandom",
"Regex": "FlibSL\\Regex",
"SecureRandom": "FlibSL\\SecureRandom",
"Str": "FlibSL\\Str",
"Vec": "FlibSL\\Vec"
}
}
}"#;
pub(crate) const INCLUDE_ROOTS: &'static str = "";
}
fn main() -> Result<()> {
env_logger::init();
let mut opts = Opts::parse();
// Some subcommands need worker threads with larger than default stacks,
// even when using Stacker. In particular, various derived traits (e.g. Drop)
// on AAST nodes are inherently recursive.
rayon::ThreadPoolBuilder::new()
.num_threads(opts.num_threads)
.stack_size(opts.stack_size.get_bytes().try_into()?)
.build_global()
.unwrap();
match opts.command.take() {
Some(Command::Assemble(opts)) => assemble::run(opts),
Some(Command::Crc(opts)) => crc::run(opts),
Some(Command::Parse(parse_opts)) => parse::run(parse_opts),
Some(Command::ParseBench(bench_opts)) => parse::run_bench_command(bench_opts),
Some(Command::Verify(opts)) => verify::run(opts),
// Expr trees
Some(Command::DesugarExprTrees(et_opts)) => expr_trees::desugar_expr_trees(&opts, et_opts),
// Facts
Some(Command::Facts(facts_opts)) => {
facts::extract_facts(&opts, facts_opts, &mut std::io::stdout())
}
None if opts.daemon && opts.flag_commands.extract_facts_from_decls => {
facts::daemon(&mut opts)
}
None if opts.flag_commands.extract_facts_from_decls => {
facts::run_flag(&mut opts, &mut std::io::stdout())
}
// Test Decls-in-Compilation
None if opts.daemon && opts.flag_commands.test_compile_with_decls => {
compile::test_decl_compile_daemon(&mut opts)
}
None if opts.flag_commands.test_compile_with_decls => {
compile::test_decl_compile(&mut opts, &mut std::io::stdout())
}
// Compile to hhas
Some(Command::Compile(mut opts)) => compile::run(&mut opts),
None if opts.daemon => compile::daemon(&mut opts),
None => compile::compile_from_text(&mut opts, &mut std::io::stdout()),
}
}
/// In daemon mode, hackc blocks waiting for a filename on stdin.
/// Then, using the originally invoked options, dispatches that file to be compiled.
fn daemon_loop(mut f: impl FnMut(PathBuf, &mut Vec<u8>) -> Result<()>) -> Result<()> |
#[cfg(test)]
mod tests {
use super::*;
/// Just make sure json parsing produces a proper list.
/// If the alias map length changes, keep this test in sync.
#[test]
fn test_auto_namespace_map() {
let dp_opts = Opts::default().decl_opts();
assert_eq!(dp_opts.auto_namespace_map.len(), 15);
}
}
| {
use std::io::Write;
for line in std::io::stdin().lock().lines() {
let mut buf = Vec::new();
f(Path::new(&line?).to_path_buf(), &mut buf)?;
// Account for utf-8 encoding and text streams with the python test runner:
// https://stackoverflow.com/questions/3586923/counting-unicode-characters-in-c
let mut w = std::io::stdout();
let num_chars = buf.iter().filter(|&b| (b & 0xc0) != 0x80).count() + 1;
writeln!(w, "{num_chars}")?;
w.write_all(&buf)?;
w.write_all(b"\n")?;
w.flush()?;
}
Ok(())
} | identifier_body |
hackc.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
mod assemble;
mod cmp_unit;
mod compile;
mod crc;
mod expr_trees;
mod facts;
mod parse;
mod util;
mod verify;
use ::compile::EnvFlags;
use ::compile::HHBCFlags;
use ::compile::NativeEnv;
use ::compile::ParserFlags;
use anyhow::Result;
use byte_unit::Byte;
use clap::Parser;
use hhvm_options::HhvmOptions;
use oxidized::decl_parser_options::DeclParserOptions;
use oxidized::relative_path::RelativePath;
use oxidized::relative_path::{self};
use std::io::BufRead;
use std::path::Path;
use std::path::PathBuf;
/// Hack Compiler
#[derive(Parser, Debug, Default)]
struct Opts {
#[clap(subcommand)]
command: Option<Command>,
/// Runs in daemon mode for testing purposes. Do not rely on for production
#[clap(long)]
daemon: bool,
#[clap(flatten)]
flag_commands: FlagCommands,
#[clap(flatten)]
hhvm_options: HhvmOptions,
#[clap(flatten)]
files: FileOpts,
/// Disable toplevel definition elaboration
#[clap(long)]
disable_toplevel_elaboration: bool,
/// Mutate the program as if we're in the debugger repl
#[clap(long)]
for_debugger_eval: bool,
#[clap(long, default_value("0"))]
emit_class_pointers: i32,
#[clap(long, default_value("0"))]
check_int_overflow: i32,
/// Number of parallel worker threads for subcommands that support parallelism,
/// otherwise ignored. If 0, use available parallelism, typically num-cpus.
#[clap(long, default_value("0"))]
num_threads: usize,
/// Stack size to use for parallel worker threads. Supports unit suffixes like KB, MiB, etc.
#[clap(long, default_value("32 MiB"))]
stack_size: Byte,
/// Instead of printing the unit, print a list of the decls requested during compilation.
/// (only used by --test-compile-with-decls)
#[clap(long)]
pub(crate) log_decls_requested: bool,
/// Use serialized decl instead of decl pointer as the decl provider API
/// (only used by --test-compile-with-decls)
#[clap(long)]
pub(crate) use_serialized_decls: bool,
/// Controls systemlib specific logic
#[clap(long)]
is_systemlib: bool,
}
/// Hack Compiler
#[derive(Parser, Debug, Default)]
struct | {
/// Input file(s)
filenames: Vec<PathBuf>,
/// Read a list of files (one-per-line) from this file
#[clap(long)]
input_file_list: Option<PathBuf>,
}
#[derive(Parser, Debug)]
enum Command {
/// Assemble HHAS file(s) into HackCUnit. Prints those HCUs' HHAS representation.
Assemble(assemble::Opts),
/// Compile one Hack source file or a list of files to HHAS
Compile(compile::Opts),
/// Compile Hack source files or directories and produce a single CRC per
/// input file.
Crc(crc::Opts),
/// Print the source code with expression tree literals desugared.
/// Best effort debugging tool.
DesugarExprTrees(expr_trees::Opts),
/// Compute facts for a set of files.
Facts(facts::Opts),
/// Render the source text parse tree for each given file.
Parse(parse::Opts),
/// Parse many files whose filenames are read from stdin, discard parser output.
ParseBench(parse::BenchOpts),
/// Compile Hack source files or directories and check for compilation errors.
Verify(verify::Opts),
}
/// Which command are we running? Using bool opts for compatibility with test harnesses.
/// New commands should be defined as subcommands using the Command enum.
#[derive(Parser, Debug, Default)]
struct FlagCommands {
/// Parse decls from source text, transform them into facts, and print the facts
/// in JSON format.
#[clap(long)]
extract_facts_from_decls: bool,
/// Compile file with decls from the same file available during compilation.
#[clap(long)]
test_compile_with_decls: bool,
}
impl FileOpts {
pub fn gather_input_files(&mut self) -> Result<Vec<PathBuf>> {
use std::io::BufReader;
let mut files: Vec<PathBuf> = Default::default();
if let Some(list_path) = self.input_file_list.take() {
for line in BufReader::new(std::fs::File::open(list_path)?).lines() {
files.push(Path::new(&line?).to_path_buf());
}
}
files.append(&mut self.filenames);
Ok(files)
}
pub fn is_batch_mode(&self) -> bool {
self.input_file_list.is_some() || self.filenames.len() > 1
}
}
impl Opts {
pub fn env_flags(&self) -> EnvFlags {
let mut flags = EnvFlags::empty();
if self.for_debugger_eval {
flags |= EnvFlags::FOR_DEBUGGER_EVAL;
}
if self.disable_toplevel_elaboration {
flags |= EnvFlags::DISABLE_TOPLEVEL_ELABORATION;
}
if self.is_systemlib {
flags |= EnvFlags::IS_SYSTEMLIB;
}
flags
}
pub fn decl_opts(&self) -> DeclParserOptions {
// TODO: share this logic with hackc_create_decl_parse_options()
let config_opts = options::Options::from_configs(&[Self::AUTO_NAMESPACE_MAP]).unwrap();
let auto_namespace_map = match config_opts.hhvm.aliased_namespaces.get().as_map() {
Some(m) => m.iter().map(|(k, v)| (k.clone(), v.clone())).collect(),
None => Vec::new(),
};
DeclParserOptions {
auto_namespace_map,
disable_xhp_element_mangling: false,
interpret_soft_types_as_like_types: true,
allow_new_attribute_syntax: true,
enable_xhp_class_modifier: false,
php5_compat_mode: true,
hhvm_compat_mode: true,
..Default::default()
}
}
pub fn native_env(&self, path: PathBuf) -> Result<NativeEnv<'_>> {
let hhvm_options = &self.hhvm_options;
let hhvm_config = hhvm_options.to_config()?;
let parser_flags = ParserFlags::from_hhvm_config(&hhvm_config)?;
let hhbc_flags = HHBCFlags::from_hhvm_config(&hhvm_config)?;
Ok(NativeEnv {
filepath: RelativePath::make(relative_path::Prefix::Dummy, path),
aliased_namespaces: crate::Opts::AUTO_NAMESPACE_MAP,
include_roots: crate::Opts::INCLUDE_ROOTS,
hhbc_flags,
parser_flags,
flags: self.env_flags(),
emit_class_pointers: self.emit_class_pointers,
check_int_overflow: self.check_int_overflow,
})
}
// TODO (T118266805): get these from nearest.hhconfig enclosing each file.
pub(crate) const AUTO_NAMESPACE_MAP: &'static str = r#"{
"hhvm.aliased_namespaces": {
"global_value": {
"Async": "HH\\Lib\\Async",
"C": "FlibSL\\C",
"Dict": "FlibSL\\Dict",
"File": "HH\\Lib\\File",
"IO": "HH\\Lib\\IO",
"Keyset": "FlibSL\\Keyset",
"Locale": "FlibSL\\Locale",
"Math": "FlibSL\\Math",
"OS": "HH\\Lib\\OS",
"PHP": "FlibSL\\PHP",
"PseudoRandom": "FlibSL\\PseudoRandom",
"Regex": "FlibSL\\Regex",
"SecureRandom": "FlibSL\\SecureRandom",
"Str": "FlibSL\\Str",
"Vec": "FlibSL\\Vec"
}
}
}"#;
pub(crate) const INCLUDE_ROOTS: &'static str = "";
}
fn main() -> Result<()> {
env_logger::init();
let mut opts = Opts::parse();
// Some subcommands need worker threads with larger than default stacks,
// even when using Stacker. In particular, various derived traits (e.g. Drop)
// on AAST nodes are inherently recursive.
rayon::ThreadPoolBuilder::new()
.num_threads(opts.num_threads)
.stack_size(opts.stack_size.get_bytes().try_into()?)
.build_global()
.unwrap();
match opts.command.take() {
Some(Command::Assemble(opts)) => assemble::run(opts),
Some(Command::Crc(opts)) => crc::run(opts),
Some(Command::Parse(parse_opts)) => parse::run(parse_opts),
Some(Command::ParseBench(bench_opts)) => parse::run_bench_command(bench_opts),
Some(Command::Verify(opts)) => verify::run(opts),
// Expr trees
Some(Command::DesugarExprTrees(et_opts)) => expr_trees::desugar_expr_trees(&opts, et_opts),
// Facts
Some(Command::Facts(facts_opts)) => {
facts::extract_facts(&opts, facts_opts, &mut std::io::stdout())
}
None if opts.daemon && opts.flag_commands.extract_facts_from_decls => {
facts::daemon(&mut opts)
}
None if opts.flag_commands.extract_facts_from_decls => {
facts::run_flag(&mut opts, &mut std::io::stdout())
}
// Test Decls-in-Compilation
None if opts.daemon && opts.flag_commands.test_compile_with_decls => {
compile::test_decl_compile_daemon(&mut opts)
}
None if opts.flag_commands.test_compile_with_decls => {
compile::test_decl_compile(&mut opts, &mut std::io::stdout())
}
// Compile to hhas
Some(Command::Compile(mut opts)) => compile::run(&mut opts),
None if opts.daemon => compile::daemon(&mut opts),
None => compile::compile_from_text(&mut opts, &mut std::io::stdout()),
}
}
/// In daemon mode, hackc blocks waiting for a filename on stdin.
/// Then, using the originally invoked options, dispatches that file to be compiled.
fn daemon_loop(mut f: impl FnMut(PathBuf, &mut Vec<u8>) -> Result<()>) -> Result<()> {
use std::io::Write;
for line in std::io::stdin().lock().lines() {
let mut buf = Vec::new();
f(Path::new(&line?).to_path_buf(), &mut buf)?;
// Account for utf-8 encoding and text streams with the python test runner:
// https://stackoverflow.com/questions/3586923/counting-unicode-characters-in-c
let mut w = std::io::stdout();
let num_chars = buf.iter().filter(|&b| (b & 0xc0)!= 0x80).count() + 1;
writeln!(w, "{num_chars}")?;
w.write_all(&buf)?;
w.write_all(b"\n")?;
w.flush()?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
/// Just make sure json parsing produces a proper list.
/// If the alias map length changes, keep this test in sync.
#[test]
fn test_auto_namespace_map() {
let dp_opts = Opts::default().decl_opts();
assert_eq!(dp_opts.auto_namespace_map.len(), 15);
}
}
| FileOpts | identifier_name |
hackc.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
mod assemble;
mod cmp_unit;
mod compile;
mod crc;
mod expr_trees;
mod facts;
mod parse;
mod util;
mod verify;
use ::compile::EnvFlags;
use ::compile::HHBCFlags;
use ::compile::NativeEnv;
use ::compile::ParserFlags;
use anyhow::Result;
use byte_unit::Byte;
use clap::Parser;
use hhvm_options::HhvmOptions;
use oxidized::decl_parser_options::DeclParserOptions;
use oxidized::relative_path::RelativePath;
use oxidized::relative_path::{self};
use std::io::BufRead;
use std::path::Path;
use std::path::PathBuf;
/// Hack Compiler
#[derive(Parser, Debug, Default)]
struct Opts {
#[clap(subcommand)]
command: Option<Command>,
/// Runs in daemon mode for testing purposes. Do not rely on for production
#[clap(long)]
daemon: bool,
#[clap(flatten)]
flag_commands: FlagCommands,
#[clap(flatten)]
hhvm_options: HhvmOptions,
#[clap(flatten)]
files: FileOpts,
/// Disable toplevel definition elaboration
#[clap(long)]
disable_toplevel_elaboration: bool,
/// Mutate the program as if we're in the debugger repl
#[clap(long)]
for_debugger_eval: bool,
#[clap(long, default_value("0"))]
emit_class_pointers: i32,
#[clap(long, default_value("0"))]
check_int_overflow: i32,
/// Number of parallel worker threads for subcommands that support parallelism,
/// otherwise ignored. If 0, use available parallelism, typically num-cpus.
#[clap(long, default_value("0"))]
num_threads: usize,
/// Stack size to use for parallel worker threads. Supports unit suffixes like KB, MiB, etc.
#[clap(long, default_value("32 MiB"))]
stack_size: Byte,
/// Instead of printing the unit, print a list of the decls requested during compilation.
/// (only used by --test-compile-with-decls)
#[clap(long)]
pub(crate) log_decls_requested: bool,
/// Use serialized decl instead of decl pointer as the decl provider API
/// (only used by --test-compile-with-decls)
#[clap(long)]
pub(crate) use_serialized_decls: bool,
/// Controls systemlib specific logic
#[clap(long)]
is_systemlib: bool,
}
/// Hack Compiler
#[derive(Parser, Debug, Default)]
struct FileOpts {
/// Input file(s)
filenames: Vec<PathBuf>,
/// Read a list of files (one-per-line) from this file
#[clap(long)]
input_file_list: Option<PathBuf>,
}
#[derive(Parser, Debug)]
enum Command {
/// Assemble HHAS file(s) into HackCUnit. Prints those HCUs' HHAS representation.
Assemble(assemble::Opts),
/// Compile one Hack source file or a list of files to HHAS
Compile(compile::Opts),
/// Compile Hack source files or directories and produce a single CRC per
/// input file.
Crc(crc::Opts),
/// Print the source code with expression tree literals desugared.
/// Best effort debugging tool.
DesugarExprTrees(expr_trees::Opts),
/// Compute facts for a set of files.
Facts(facts::Opts),
/// Render the source text parse tree for each given file.
Parse(parse::Opts),
/// Parse many files whose filenames are read from stdin, discard parser output.
ParseBench(parse::BenchOpts),
/// Compile Hack source files or directories and check for compilation errors.
Verify(verify::Opts),
}
/// Which command are we running? Using bool opts for compatibility with test harnesses.
/// New commands should be defined as subcommands using the Command enum.
#[derive(Parser, Debug, Default)]
struct FlagCommands {
/// Parse decls from source text, transform them into facts, and print the facts
/// in JSON format.
#[clap(long)]
extract_facts_from_decls: bool,
/// Compile file with decls from the same file available during compilation.
#[clap(long)]
test_compile_with_decls: bool,
}
impl FileOpts {
pub fn gather_input_files(&mut self) -> Result<Vec<PathBuf>> {
use std::io::BufReader;
let mut files: Vec<PathBuf> = Default::default();
if let Some(list_path) = self.input_file_list.take() {
for line in BufReader::new(std::fs::File::open(list_path)?).lines() {
files.push(Path::new(&line?).to_path_buf());
}
}
files.append(&mut self.filenames);
Ok(files)
}
pub fn is_batch_mode(&self) -> bool {
self.input_file_list.is_some() || self.filenames.len() > 1
}
}
impl Opts {
pub fn env_flags(&self) -> EnvFlags {
let mut flags = EnvFlags::empty();
if self.for_debugger_eval {
flags |= EnvFlags::FOR_DEBUGGER_EVAL;
}
if self.disable_toplevel_elaboration {
flags |= EnvFlags::DISABLE_TOPLEVEL_ELABORATION;
}
if self.is_systemlib {
flags |= EnvFlags::IS_SYSTEMLIB;
}
flags
}
pub fn decl_opts(&self) -> DeclParserOptions {
// TODO: share this logic with hackc_create_decl_parse_options()
let config_opts = options::Options::from_configs(&[Self::AUTO_NAMESPACE_MAP]).unwrap();
let auto_namespace_map = match config_opts.hhvm.aliased_namespaces.get().as_map() {
Some(m) => m.iter().map(|(k, v)| (k.clone(), v.clone())).collect(),
None => Vec::new(),
};
DeclParserOptions {
auto_namespace_map,
disable_xhp_element_mangling: false,
interpret_soft_types_as_like_types: true,
allow_new_attribute_syntax: true,
enable_xhp_class_modifier: false,
php5_compat_mode: true,
hhvm_compat_mode: true,
..Default::default()
}
}
pub fn native_env(&self, path: PathBuf) -> Result<NativeEnv<'_>> {
let hhvm_options = &self.hhvm_options;
let hhvm_config = hhvm_options.to_config()?;
let parser_flags = ParserFlags::from_hhvm_config(&hhvm_config)?;
let hhbc_flags = HHBCFlags::from_hhvm_config(&hhvm_config)?;
Ok(NativeEnv {
filepath: RelativePath::make(relative_path::Prefix::Dummy, path),
aliased_namespaces: crate::Opts::AUTO_NAMESPACE_MAP,
include_roots: crate::Opts::INCLUDE_ROOTS,
hhbc_flags,
parser_flags,
flags: self.env_flags(),
emit_class_pointers: self.emit_class_pointers,
check_int_overflow: self.check_int_overflow,
})
}
// TODO (T118266805): get these from nearest.hhconfig enclosing each file.
pub(crate) const AUTO_NAMESPACE_MAP: &'static str = r#"{
"hhvm.aliased_namespaces": {
"global_value": {
"Async": "HH\\Lib\\Async",
"C": "FlibSL\\C",
"Dict": "FlibSL\\Dict",
"File": "HH\\Lib\\File",
"IO": "HH\\Lib\\IO",
"Keyset": "FlibSL\\Keyset",
"Locale": "FlibSL\\Locale",
"Math": "FlibSL\\Math",
"OS": "HH\\Lib\\OS",
"PHP": "FlibSL\\PHP",
"PseudoRandom": "FlibSL\\PseudoRandom",
"Regex": "FlibSL\\Regex",
"SecureRandom": "FlibSL\\SecureRandom",
"Str": "FlibSL\\Str",
"Vec": "FlibSL\\Vec"
}
}
}"#;
pub(crate) const INCLUDE_ROOTS: &'static str = "";
}
fn main() -> Result<()> {
env_logger::init();
let mut opts = Opts::parse();
// Some subcommands need worker threads with larger than default stacks,
// even when using Stacker. In particular, various derived traits (e.g. Drop)
// on AAST nodes are inherently recursive.
rayon::ThreadPoolBuilder::new()
.num_threads(opts.num_threads)
.stack_size(opts.stack_size.get_bytes().try_into()?)
.build_global()
.unwrap();
match opts.command.take() {
Some(Command::Assemble(opts)) => assemble::run(opts),
Some(Command::Crc(opts)) => crc::run(opts),
Some(Command::Parse(parse_opts)) => parse::run(parse_opts),
Some(Command::ParseBench(bench_opts)) => parse::run_bench_command(bench_opts),
Some(Command::Verify(opts)) => verify::run(opts),
// Expr trees
Some(Command::DesugarExprTrees(et_opts)) => expr_trees::desugar_expr_trees(&opts, et_opts),
// Facts
Some(Command::Facts(facts_opts)) => {
facts::extract_facts(&opts, facts_opts, &mut std::io::stdout())
}
None if opts.daemon && opts.flag_commands.extract_facts_from_decls => {
facts::daemon(&mut opts)
}
None if opts.flag_commands.extract_facts_from_decls => |
// Test Decls-in-Compilation
None if opts.daemon && opts.flag_commands.test_compile_with_decls => {
compile::test_decl_compile_daemon(&mut opts)
}
None if opts.flag_commands.test_compile_with_decls => {
compile::test_decl_compile(&mut opts, &mut std::io::stdout())
}
// Compile to hhas
Some(Command::Compile(mut opts)) => compile::run(&mut opts),
None if opts.daemon => compile::daemon(&mut opts),
None => compile::compile_from_text(&mut opts, &mut std::io::stdout()),
}
}
/// In daemon mode, hackc blocks waiting for a filename on stdin.
/// Then, using the originally invoked options, dispatches that file to be compiled.
fn daemon_loop(mut f: impl FnMut(PathBuf, &mut Vec<u8>) -> Result<()>) -> Result<()> {
use std::io::Write;
for line in std::io::stdin().lock().lines() {
let mut buf = Vec::new();
f(Path::new(&line?).to_path_buf(), &mut buf)?;
// Account for utf-8 encoding and text streams with the python test runner:
// https://stackoverflow.com/questions/3586923/counting-unicode-characters-in-c
let mut w = std::io::stdout();
let num_chars = buf.iter().filter(|&b| (b & 0xc0)!= 0x80).count() + 1;
writeln!(w, "{num_chars}")?;
w.write_all(&buf)?;
w.write_all(b"\n")?;
w.flush()?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
/// Just make sure json parsing produces a proper list.
/// If the alias map length changes, keep this test in sync.
#[test]
fn test_auto_namespace_map() {
let dp_opts = Opts::default().decl_opts();
assert_eq!(dp_opts.auto_namespace_map.len(), 15);
}
}
| {
facts::run_flag(&mut opts, &mut std::io::stdout())
} | conditional_block |
hackc.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
mod assemble;
mod cmp_unit;
mod compile;
mod crc;
mod expr_trees;
mod facts;
mod parse;
mod util;
mod verify;
use ::compile::EnvFlags;
use ::compile::HHBCFlags;
use ::compile::NativeEnv;
use ::compile::ParserFlags;
use anyhow::Result;
use byte_unit::Byte;
use clap::Parser;
use hhvm_options::HhvmOptions;
use oxidized::decl_parser_options::DeclParserOptions;
use oxidized::relative_path::RelativePath;
use oxidized::relative_path::{self};
use std::io::BufRead;
use std::path::Path;
use std::path::PathBuf;
/// Hack Compiler
#[derive(Parser, Debug, Default)]
struct Opts {
#[clap(subcommand)]
command: Option<Command>,
/// Runs in daemon mode for testing purposes. Do not rely on for production
#[clap(long)]
daemon: bool,
#[clap(flatten)]
flag_commands: FlagCommands,
#[clap(flatten)]
hhvm_options: HhvmOptions,
#[clap(flatten)]
files: FileOpts,
/// Disable toplevel definition elaboration | for_debugger_eval: bool,
#[clap(long, default_value("0"))]
emit_class_pointers: i32,
#[clap(long, default_value("0"))]
check_int_overflow: i32,
/// Number of parallel worker threads for subcommands that support parallelism,
/// otherwise ignored. If 0, use available parallelism, typically num-cpus.
#[clap(long, default_value("0"))]
num_threads: usize,
/// Stack size to use for parallel worker threads. Supports unit suffixes like KB, MiB, etc.
#[clap(long, default_value("32 MiB"))]
stack_size: Byte,
/// Instead of printing the unit, print a list of the decls requested during compilation.
/// (only used by --test-compile-with-decls)
#[clap(long)]
pub(crate) log_decls_requested: bool,
/// Use serialized decl instead of decl pointer as the decl provider API
/// (only used by --test-compile-with-decls)
#[clap(long)]
pub(crate) use_serialized_decls: bool,
/// Controls systemlib specific logic
#[clap(long)]
is_systemlib: bool,
}
/// Hack Compiler
#[derive(Parser, Debug, Default)]
struct FileOpts {
/// Input file(s)
filenames: Vec<PathBuf>,
/// Read a list of files (one-per-line) from this file
#[clap(long)]
input_file_list: Option<PathBuf>,
}
#[derive(Parser, Debug)]
enum Command {
/// Assemble HHAS file(s) into HackCUnit. Prints those HCUs' HHAS representation.
Assemble(assemble::Opts),
/// Compile one Hack source file or a list of files to HHAS
Compile(compile::Opts),
/// Compile Hack source files or directories and produce a single CRC per
/// input file.
Crc(crc::Opts),
/// Print the source code with expression tree literals desugared.
/// Best effort debugging tool.
DesugarExprTrees(expr_trees::Opts),
/// Compute facts for a set of files.
Facts(facts::Opts),
/// Render the source text parse tree for each given file.
Parse(parse::Opts),
/// Parse many files whose filenames are read from stdin, discard parser output.
ParseBench(parse::BenchOpts),
/// Compile Hack source files or directories and check for compilation errors.
Verify(verify::Opts),
}
/// Which command are we running? Using bool opts for compatibility with test harnesses.
/// New commands should be defined as subcommands using the Command enum.
#[derive(Parser, Debug, Default)]
struct FlagCommands {
/// Parse decls from source text, transform them into facts, and print the facts
/// in JSON format.
#[clap(long)]
extract_facts_from_decls: bool,
/// Compile file with decls from the same file available during compilation.
#[clap(long)]
test_compile_with_decls: bool,
}
impl FileOpts {
pub fn gather_input_files(&mut self) -> Result<Vec<PathBuf>> {
use std::io::BufReader;
let mut files: Vec<PathBuf> = Default::default();
if let Some(list_path) = self.input_file_list.take() {
for line in BufReader::new(std::fs::File::open(list_path)?).lines() {
files.push(Path::new(&line?).to_path_buf());
}
}
files.append(&mut self.filenames);
Ok(files)
}
pub fn is_batch_mode(&self) -> bool {
self.input_file_list.is_some() || self.filenames.len() > 1
}
}
impl Opts {
pub fn env_flags(&self) -> EnvFlags {
let mut flags = EnvFlags::empty();
if self.for_debugger_eval {
flags |= EnvFlags::FOR_DEBUGGER_EVAL;
}
if self.disable_toplevel_elaboration {
flags |= EnvFlags::DISABLE_TOPLEVEL_ELABORATION;
}
if self.is_systemlib {
flags |= EnvFlags::IS_SYSTEMLIB;
}
flags
}
pub fn decl_opts(&self) -> DeclParserOptions {
// TODO: share this logic with hackc_create_decl_parse_options()
let config_opts = options::Options::from_configs(&[Self::AUTO_NAMESPACE_MAP]).unwrap();
let auto_namespace_map = match config_opts.hhvm.aliased_namespaces.get().as_map() {
Some(m) => m.iter().map(|(k, v)| (k.clone(), v.clone())).collect(),
None => Vec::new(),
};
DeclParserOptions {
auto_namespace_map,
disable_xhp_element_mangling: false,
interpret_soft_types_as_like_types: true,
allow_new_attribute_syntax: true,
enable_xhp_class_modifier: false,
php5_compat_mode: true,
hhvm_compat_mode: true,
..Default::default()
}
}
pub fn native_env(&self, path: PathBuf) -> Result<NativeEnv<'_>> {
let hhvm_options = &self.hhvm_options;
let hhvm_config = hhvm_options.to_config()?;
let parser_flags = ParserFlags::from_hhvm_config(&hhvm_config)?;
let hhbc_flags = HHBCFlags::from_hhvm_config(&hhvm_config)?;
Ok(NativeEnv {
filepath: RelativePath::make(relative_path::Prefix::Dummy, path),
aliased_namespaces: crate::Opts::AUTO_NAMESPACE_MAP,
include_roots: crate::Opts::INCLUDE_ROOTS,
hhbc_flags,
parser_flags,
flags: self.env_flags(),
emit_class_pointers: self.emit_class_pointers,
check_int_overflow: self.check_int_overflow,
})
}
// TODO (T118266805): get these from nearest.hhconfig enclosing each file.
pub(crate) const AUTO_NAMESPACE_MAP: &'static str = r#"{
"hhvm.aliased_namespaces": {
"global_value": {
"Async": "HH\\Lib\\Async",
"C": "FlibSL\\C",
"Dict": "FlibSL\\Dict",
"File": "HH\\Lib\\File",
"IO": "HH\\Lib\\IO",
"Keyset": "FlibSL\\Keyset",
"Locale": "FlibSL\\Locale",
"Math": "FlibSL\\Math",
"OS": "HH\\Lib\\OS",
"PHP": "FlibSL\\PHP",
"PseudoRandom": "FlibSL\\PseudoRandom",
"Regex": "FlibSL\\Regex",
"SecureRandom": "FlibSL\\SecureRandom",
"Str": "FlibSL\\Str",
"Vec": "FlibSL\\Vec"
}
}
}"#;
pub(crate) const INCLUDE_ROOTS: &'static str = "";
}
fn main() -> Result<()> {
env_logger::init();
let mut opts = Opts::parse();
// Some subcommands need worker threads with larger than default stacks,
// even when using Stacker. In particular, various derived traits (e.g. Drop)
// on AAST nodes are inherently recursive.
rayon::ThreadPoolBuilder::new()
.num_threads(opts.num_threads)
.stack_size(opts.stack_size.get_bytes().try_into()?)
.build_global()
.unwrap();
match opts.command.take() {
Some(Command::Assemble(opts)) => assemble::run(opts),
Some(Command::Crc(opts)) => crc::run(opts),
Some(Command::Parse(parse_opts)) => parse::run(parse_opts),
Some(Command::ParseBench(bench_opts)) => parse::run_bench_command(bench_opts),
Some(Command::Verify(opts)) => verify::run(opts),
// Expr trees
Some(Command::DesugarExprTrees(et_opts)) => expr_trees::desugar_expr_trees(&opts, et_opts),
// Facts
Some(Command::Facts(facts_opts)) => {
facts::extract_facts(&opts, facts_opts, &mut std::io::stdout())
}
None if opts.daemon && opts.flag_commands.extract_facts_from_decls => {
facts::daemon(&mut opts)
}
None if opts.flag_commands.extract_facts_from_decls => {
facts::run_flag(&mut opts, &mut std::io::stdout())
}
// Test Decls-in-Compilation
None if opts.daemon && opts.flag_commands.test_compile_with_decls => {
compile::test_decl_compile_daemon(&mut opts)
}
None if opts.flag_commands.test_compile_with_decls => {
compile::test_decl_compile(&mut opts, &mut std::io::stdout())
}
// Compile to hhas
Some(Command::Compile(mut opts)) => compile::run(&mut opts),
None if opts.daemon => compile::daemon(&mut opts),
None => compile::compile_from_text(&mut opts, &mut std::io::stdout()),
}
}
/// In daemon mode, hackc blocks waiting for a filename on stdin.
/// Then, using the originally invoked options, dispatches that file to be compiled.
fn daemon_loop(mut f: impl FnMut(PathBuf, &mut Vec<u8>) -> Result<()>) -> Result<()> {
use std::io::Write;
for line in std::io::stdin().lock().lines() {
let mut buf = Vec::new();
f(Path::new(&line?).to_path_buf(), &mut buf)?;
// Account for utf-8 encoding and text streams with the python test runner:
// https://stackoverflow.com/questions/3586923/counting-unicode-characters-in-c
let mut w = std::io::stdout();
let num_chars = buf.iter().filter(|&b| (b & 0xc0)!= 0x80).count() + 1;
writeln!(w, "{num_chars}")?;
w.write_all(&buf)?;
w.write_all(b"\n")?;
w.flush()?;
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
/// Just make sure json parsing produces a proper list.
/// If the alias map length changes, keep this test in sync.
#[test]
fn test_auto_namespace_map() {
let dp_opts = Opts::default().decl_opts();
assert_eq!(dp_opts.auto_namespace_map.len(), 15);
}
} | #[clap(long)]
disable_toplevel_elaboration: bool,
/// Mutate the program as if we're in the debugger repl
#[clap(long)] | random_line_split |
fiber.rs | //! Сooperative multitasking module
//!
//! With the fiber module, you can:
//! - create, run and manage [fibers](struct.Fiber.html),
//! - use a synchronization mechanism for fibers, similar to “condition variables” and similar to operating-system
//! functions such as `pthread_cond_wait()` plus `pthread_cond_signal()`.
//!
//! See also:
//! - [Threads, fibers and yields](https://www.tarantool.io/en/doc/latest/book/box/atomic/#threads-fibers-and-yields)
//! - [Lua reference: Module fiber](https://www.tarantool.io/en/doc/latest/reference/reference_lua/fiber/)
//! - [C API reference: Module fiber](https://www.tarantool.io/en/doc/latest/dev_guide/reference_capi/fiber/)
use std::ffi::CString;
use std::marker::PhantomData;
use std::os::raw::c_void;
use std::time::Duration;
use va_list::VaList;
use crate::error::{Error, TarantoolError};
use crate::ffi::tarantool as ffi;
/// A fiber is a set of instructions which are executed with cooperative multitasking.
///
/// Fibers managed by the fiber module are associated with a user-supplied function called the fiber function.
///
/// A fiber has three possible states: **running**, **suspended** or **dead**.
/// When a fiber is started with [fiber.start()](struct.Fiber.html#method.start), it is **running**.
/// When a fiber is created with [Fiber::new()](struct.Fiber.html#method.new) (and has not been started yet) or yields control
/// with [sleep()](fn.sleep.html), it is **suspended**.
/// When a fiber ends (because the fiber function ends), it is **dead**.
///
/// A runaway fiber can be stopped with [fiber.cancel()](struct.Fiber.html#method.cancel).
/// However, [fiber.cancel()](struct.Fiber.html#method.cancel) is advisory — it works only if the runaway fiber calls
/// [is_cancelled()](fn.is_cancelled.html) occasionally. In practice, a runaway fiber can only become unresponsive if it
/// does many computations and does not check whether it has been cancelled.
///
/// The other potential problem comes from fibers which never get scheduled, because they are not subscribed to any events,
/// or because no relevant events occur. Such morphing fibers can be killed with [fiber.cancel()](struct.Fiber.html#method.cancel)
/// at any time, since [fiber.cancel()](struct.Fiber.html#method.cancel) sends an asynchronous wakeup event to the fiber,
/// and [is_cancelled()](fn.is_cancelled.html) is checked whenever such a wakeup event occurs.
///
/// Example:
/// ```rust
/// use tarantool::fiber::Fiber;
/// let mut fiber = Fiber::new("test_fiber", &mut |_| {
/// println!("I'm a fiber");
/// 0
/// });
/// fiber.start(());
/// println!("Fiber started")
/// ```
///
/// ```text
/// I'm a fiber
/// Fiber started
/// ```
pub struct Fiber<'a, T: 'a> {
inner: *mut ffi::Fiber,
callback: *mut c_void,
phantom: PhantomData<&'a T>,
}
impl<'a, T> Fiber<'a, T> {
/// Create a new fiber.
///
/// Takes a fiber from fiber cache, if it's not empty. Can fail only if there is not enough memory for
/// the fiber structure or fiber stack.
///
/// The created fiber automatically returns itself to the fiber cache when its `main` function
/// completes. The initial fiber state is **suspended**.
///
/// Ordinarily [Fiber::new()](#method.new) is used in conjunction with [fiber.set_joinable()](#method.set_joinable)
/// and [fiber.join()](#method.join)
///
/// - `name` - string with fiber name
/// - `callback` - function for run inside fiber
///
/// See also: [fiber.start()](#method.start)
pub fn new<F>(name: &str, callback: &mut F) -> Self
where
F: FnMut(Box<T>) -> i32,
{
let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) };
Self {
inner: unsafe { ffi::fiber_new(CString::new(name).unwrap().into_raw(), trampoline) },
callback: callback_ptr,
phantom: PhantomData,
}
}
/// Create a new fiber with defined attributes.
///
/// Can fail only if there is not enough memory for the fiber structure or fiber stack.
///
/// The created fiber automatically returns itself to the fiber cache if has default stack size
/// when its `main` function completes. The initial fiber state is **suspended**.
///
/// - `name` - string with fiber name
/// - `fiber_attr` - fiber attributes
/// - `callback` - function for run inside fiber
///
/// See also: [fiber.start()](#method.start)
pub fn new_with_attr<F>(name: &str, attr: &FiberAttr, callback: &mut F) -> Self
where
F: FnMut(Box<T>) -> i32,
{
let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) };
Self {
inner: unsafe {
ffi::fiber_new_ex(
CString::new(name).unwrap().into_raw(),
attr.inner,
trampoline,
)
},
callback: callback_ptr,
phantom: PhantomData,
}
}
/// Start execution of created fiber.
///
/// - `arg` - argument to start the fiber with
///
/// See also: [fiber.new()](#method.new)
pub fn start(&mut self, arg: T) {
unsafe {
ffi::fiber_start(self.inner, self.callback, Box::into_raw(Box::<T>::new(arg)));
}
}
/// Interrupt a synchronous wait of a fiber.
pub fn wakeup(&self) {
unsafe { ffi::fiber_wakeup(self.inner) }
}
/// Wait until the fiber is dead and then move its execution status to the caller.
///
/// “Join” a joinable fiber. That is, let the fiber’s function run and wait until the fiber’s status is **dead**
/// (normally a status becomes **dead** when the function execution finishes). Joining will cause a yield,
/// therefore, if the fiber is currently in a **suspended** state, execution of its fiber function will resume.
///
/// This kind of waiting is more convenient than going into a loop and periodically checking the status;
/// however, it works only if the fiber was created with [fiber.new()](#method.new) and was made joinable with
/// [fiber.set_joinable()](#method.set_joinable).
///
/// The fiber must not be detached (See also: [fiber.set_joinable()](#method.set_joinable)).
///
/// Return: fiber function return code
pub fn join(&self) -> i32 {
unsafe { ffi::fiber_join(self.inner) }
}
/// Set fiber to be joinable (false by default).
///
/// - `is_joinable` - status to set
pub fn set_joinable(&mut self, is_joinable: bool) {
unsafe { ffi::fiber_set_joinable(self.inner, is_joinable) }
}
/// Cancel a fiber. (set `FIBER_IS_CANCELLED` flag)
///
/// Running and suspended fibers can be cancelled. After a fiber has been cancelled, attempts to operate on it will
/// cause error: the fiber is dead. But a dead fiber can still report its id and status.
/// Possible errors: cancel is not permitted for the specified fiber object.
///
/// If target fiber's flag `FIBER_IS_CANCELLABLE` set, then it would be woken up (maybe prematurely).
/// Then current fiber yields until the target fiber is dead (or is woken up by
/// [fiber.wakeup()](#method.wakeup)).
pub fn cancel(&mut self) {
unsafe { ffi::fiber_cancel(self.inner) }
}
}
/// Make it possible or not possible to wakeup the current
/// fiber immediately when it's cancelled.
///
/// - `is_cancellable` - status to set
///
/// Returns previous state.
pub fn set_cancellable(is_cancellable: bool) -> bool {
unsafe { ffi::fiber_set_cancellable(is_cancellable) }
}
/// Check current fiber for cancellation (it must be checked manually).
pub fn is_cancelled() -> bool {
unsafe { ffi::fiber_is_cancelled() }
}
/// Put the current fiber to sleep for at least `time` seconds.
///
/// Yield control to the scheduler and sleep for the specified number of seconds.
/// Only the current fiber can be made to sleep.
///
/// - `time` - time to sleep
///
/// > **Note:** this is a cancellation point (See also: [is_cancelled()](fn.is_cancelled.html))
pub fn sleep(time: f64) {
unsafe { ffi::fiber_sleep(time) }
}
/// Report loop begin time as double (cheap).
pub fn time() -> f64 {
unsafe { ffi::fiber_time() }
}
/// Report loop begin time as 64-bit int.
pub fn time64() -> u64 {
unsafe { ffi::fiber_time64() }
}
/// Report loop begin time as double (cheap). Uses monotonic clock.
pub fn clock() -> f64 {
unsafe { ffi::fiber_clock() }
}
/// Report loop begin time as 64-bit int. Uses monotonic clock.
pub fn clock64() -> u64 {
unsafe { ffi::fiber_clock64() }
}
/// Yield control to the scheduler.
///
/// Return control to another fiber and wait until it'll be woken. Equivalent to `fiber.sleep(0)`.
///
/// See also: [Fiber::wakeup()](struct.Fiber.html#method.wakeup)
pub fn fiber_yield() {
unsafe { ffi::fiber_yield() }
}
/// Reschedule fiber to end of event loop cycle.
pub fn reschedule() {
unsafe { ffi::fiber_reschedule() }
}
/// Fiber attributes container
pub struct FiberAttr {
inner: *mut ffi::FiberAttr,
}
impl FiberAttr {
/// Create a new fiber attribute container and initialize it with default parameters.
/// Can be used for many fibers creation, corresponding fibers will not take ownership.
///
/// This is safe to drop `FiberAttr` value when fibers created with this attribute still exist.
pub fn new() -> Self {
FiberAttr {
inner: unsafe { ffi::fiber_attr_new() },
}
}
/// Get stack size from the fiber attribute.
///
/// Returns: stack size
pub fn stack_size(&self) -> usize {
unsafe { ffi::fiber_attr_getstacksize(self.inner) }
}
///Set stack size for the fiber attribute.
///
/// - `stack_size` - stack size for new fibers
pub fn set_stack_size(&mut self, stack_size: usize) -> Result<(), Error> {
if unsafe { ffi::fiber_attr_setstacksize(self.inner, stack_size) } < 0 {
Err(TarantoolError::last().into())
} else {
Ok(())
}
}
}
impl Drop for FiberAttr {
fn drop(&mut self) {
unsafe { ffi::fiber_attr_delete(self.inner) }
}
}
/// Conditional variable for cooperative multitasking (fibers).
///
/// A cond (short for "condition variable") is a synchronization primitive
/// that allow fibers to yield until some predicate is satisfied. Fiber
/// conditions have two basic operations - `wait()` and `signal()`. [cond.wait()](#method.wait)
/// suspends execution of fiber (i.e. yields) until [cond.signal()](#method.signal) is called.
///
/// Example:
///
/// ```rust
/// use tarantool::fiber::Cond;
/// let cond = fiber.cond();
/// cond.wait();
/// ```
///
/// The job will hang because [cond.wait()](#method.wait) – will go to sleep until the condition variable changes.
///
/// ```rust
/// // Call from another fiber:
/// cond.signal();
/// ```
///
/// The waiting stopped, and the [cond.wait()](#method.wait) function returned true.
///
/// This example depended on the use of a global conditional variable with the arbitrary name cond.
/// In real life, programmers would make sure to use different conditional variable names for different applications.
///
/// Unlike `pthread_cond`, [Cond]() doesn't require mutex/latch wrapping.
pub struct Cond {
inner: *mut ffi::FiberCond,
}
/// - call [Cond::new()](#method.new) to create a named condition variable, which will be called `cond` for examples in this section.
/// - call [cond.wait()](#method.wait) to make a fiber wait for a signal via a condition variable.
/// - call [cond.signal()](#method.signal) to send a signal to wake up a single fiber that has executed [cond.wait()](#method.wait).
/// - call [cond.broadcast()](#method.broadcast) to send a signal to all fibers that have executed [cond.wait()](#method.wait).
impl Cond {
/// Instantiate a new fiber cond object.
pub fn new() -> Self {
Cond {
inner: unsafe { ffi::fiber_cond_new() },
}
}
/// Wake one fiber waiting for the cond.
/// Does nothing if no one is waiting. Does not yield.
pub fn signal(&self) {
unsafe { ffi::fiber_cond_signal(self.inner) }
}
/// Wake up all fibers waiting for the cond.
/// Does not yield.
pub fn broadcast(&self) {
unsafe { ffi::fiber_cond_broadcast(self.inner) }
}
/// Suspend the execution of the current fiber (i.e. yield) until [signal()](#method.signal) is called.
///
/// Like pthread_cond, FiberCond can issue spurious wake ups caused by explicit
/// [Fiber::wakeup()](struct.Fiber.html#method.wakeup) or [Fiber::cancel()](struct.Fiber.html#method.cancel)
/// calls. It is highly recommended to wrap calls to this function into a loop
/// and check an actual predicate and `fiber_testcancel()` on every iteration.
///
/// - `timeout` - timeout in seconds
///
/// Returns:
/// - `true` on [signal()](#method.signal) call or a spurious wake up.
/// - `false` on timeout, diag is set to `TimedOut`
pub fn wait_timeout(&self, timeout: Duration) -> bool {
!(unsafe { ffi::fiber_cond_wait_timeout(self.inner, timeout.as_secs_f64()) } < 0)
}
/// Shortcut for [wait_timeout()](#method.wait_timeout).
pub fn wait(&self) -> bool {
!(unsafe { ffi::fiber_cond_wait(self.inner) } < 0)
}
}
impl Drop for Cond {
fn drop(&mut self) {
unsafe { ffi::fiber_cond_delete(self.inner) }
}
}
/// A lock for cooperative multitasking environment
pub struct Latch {
inner: *mut ffi::Latch,
}
impl Latch {
/// Allocate and initialize the new latch.
pub fn new() -> Self {
Latch {
inner: unsafe { ffi::box_latch_new() },
}
}
/// Lock a latch. Waits indefinitely until the current fiber can gain access to the latch.
pub fn lock(&self) -> LatchGuard {
unsafe { ffi::box_latch_lock(self.inner) };
LatchGuard {
latch_inner: self.inner,
}
}
/// Try to lock a latch. Return immediately if the latch is locked.
///
/// Returns:
/// - `Some` - success | pub fn try_lock(&self) -> Option<LatchGuard> {
if unsafe { ffi::box_latch_trylock(self.inner) } == 0 {
Some(LatchGuard {
latch_inner: self.inner,
})
} else {
None
}
}
}
impl Drop for Latch {
fn drop(&mut self) {
unsafe { ffi::box_latch_delete(self.inner) }
}
}
/// An RAII implementation of a "scoped lock" of a latch. When this structure is dropped (falls out of scope),
/// the lock will be unlocked.
pub struct LatchGuard {
latch_inner: *mut ffi::Latch,
}
impl Drop for LatchGuard {
fn drop(&mut self) {
unsafe { ffi::box_latch_unlock(self.latch_inner) }
}
}
pub(crate) unsafe fn unpack_callback<F, T>(callback: &mut F) -> (*mut c_void, ffi::FiberFunc)
where
F: FnMut(Box<T>) -> i32,
{
unsafe extern "C" fn trampoline<F, T>(mut args: VaList) -> i32
where
F: FnMut(Box<T>) -> i32,
{
let closure: &mut F = &mut *(args.get::<*const c_void>() as *mut F);
let arg = Box::from_raw(args.get::<*const c_void>() as *mut T);
(*closure)(arg)
}
(callback as *mut F as *mut c_void, Some(trampoline::<F, T>))
} | /// - `None` - the latch is locked. | random_line_split |
fiber.rs | //! Сooperative multitasking module
//!
//! With the fiber module, you can:
//! - create, run and manage [fibers](struct.Fiber.html),
//! - use a synchronization mechanism for fibers, similar to “condition variables” and similar to operating-system
//! functions such as `pthread_cond_wait()` plus `pthread_cond_signal()`.
//!
//! See also:
//! - [Threads, fibers and yields](https://www.tarantool.io/en/doc/latest/book/box/atomic/#threads-fibers-and-yields)
//! - [Lua reference: Module fiber](https://www.tarantool.io/en/doc/latest/reference/reference_lua/fiber/)
//! - [C API reference: Module fiber](https://www.tarantool.io/en/doc/latest/dev_guide/reference_capi/fiber/)
use std::ffi::CString;
use std::marker::PhantomData;
use std::os::raw::c_void;
use std::time::Duration;
use va_list::VaList;
use crate::error::{Error, TarantoolError};
use crate::ffi::tarantool as ffi;
/// A fiber is a set of instructions which are executed with cooperative multitasking.
///
/// Fibers managed by the fiber module are associated with a user-supplied function called the fiber function.
///
/// A fiber has three possible states: **running**, **suspended** or **dead**.
/// When a fiber is started with [fiber.start()](struct.Fiber.html#method.start), it is **running**.
/// When a fiber is created with [Fiber::new()](struct.Fiber.html#method.new) (and has not been started yet) or yields control
/// with [sleep()](fn.sleep.html), it is **suspended**.
/// When a fiber ends (because the fiber function ends), it is **dead**.
///
/// A runaway fiber can be stopped with [fiber.cancel()](struct.Fiber.html#method.cancel).
/// However, [fiber.cancel()](struct.Fiber.html#method.cancel) is advisory — it works only if the runaway fiber calls
/// [is_cancelled()](fn.is_cancelled.html) occasionally. In practice, a runaway fiber can only become unresponsive if it
/// does many computations and does not check whether it has been cancelled.
///
/// The other potential problem comes from fibers which never get scheduled, because they are not subscribed to any events,
/// or because no relevant events occur. Such morphing fibers can be killed with [fiber.cancel()](struct.Fiber.html#method.cancel)
/// at any time, since [fiber.cancel()](struct.Fiber.html#method.cancel) sends an asynchronous wakeup event to the fiber,
/// and [is_cancelled()](fn.is_cancelled.html) is checked whenever such a wakeup event occurs.
///
/// Example:
/// ```rust
/// use tarantool::fiber::Fiber;
/// let mut fiber = Fiber::new("test_fiber", &mut |_| {
/// println!("I'm a fiber");
/// 0
/// });
/// fiber.start(());
/// println!("Fiber started")
/// ```
///
/// ```text
/// I'm a fiber
/// Fiber started
/// ```
pub struct Fiber<'a, T: 'a> {
inner: *mut ffi::Fiber,
callback: *mut c_void,
phantom: PhantomData<&'a T>,
}
impl<'a, T> Fiber<'a, T> {
/// Create a new fiber.
///
/// Takes a fiber from fiber cache, if it's not empty. Can fail only if there is not enough memory for
/// the fiber structure or fiber stack.
///
/// The created fiber automatically returns itself to the fiber cache when its `main` function
/// completes. The initial fiber state is **suspended**.
///
/// Ordinarily [Fiber::new()](#method.new) is used in conjunction with [fiber.set_joinable()](#method.set_joinable)
/// and [fiber.join()](#method.join)
///
/// - `name` - string with fiber name
/// - `callback` - function for run inside fiber
///
/// See also: [fiber.start()](#method.start)
pub fn new<F>(name: &str, callback: &mut F) -> Self
where
F: FnMut(Box<T>) -> i32,
{
let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) };
Self {
inner: unsafe { ffi::fiber_new(CString::new(name).unwrap().into_raw(), trampoline) },
callback: callback_ptr,
phantom: PhantomData,
}
}
/// Create a new fiber with defined attributes.
///
/// Can fail only if there is not enough memory for the fiber structure or fiber stack.
///
/// The created fiber automatically returns itself to the fiber cache if has default stack size
/// when its `main` function completes. The initial fiber state is **suspended**.
///
/// - `name` - string with fiber name
/// - `fiber_attr` - fiber attributes
/// - `callback` - function for run inside fiber
///
/// See also: [fiber.start()](#method.start)
pub fn new_with_attr<F>(name: &str, attr: &FiberAttr, callback: &mut F) -> Self
where
F: FnMut(Box<T>) -> i32,
{
let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) };
Self {
inner: unsafe {
ffi::fiber_new_ex(
CString::new(name).unwrap().into_raw(),
attr.inner,
trampoline,
)
},
callback: callback_ptr,
phantom: PhantomData,
}
}
/// Start execution of created fiber.
///
/// - `arg` - argument to start the fiber with
///
/// See also: [fiber.new()](#method.new)
pub fn start(&mut self, arg: T) {
unsafe {
ffi::fiber_start(self.inner, self.callback, Box::into_raw(Box::<T>::new(arg)));
}
}
/// Interrupt a synchronous wait of a fiber.
pub fn wakeup( | {
unsafe { ffi::fiber_wakeup(self.inner) }
}
/// Wait until the fiber is dead and then move its execution status to the caller.
///
/// “Join” a joinable fiber. That is, let the fiber’s function run and wait until the fiber’s status is **dead**
/// (normally a status becomes **dead** when the function execution finishes). Joining will cause a yield,
/// therefore, if the fiber is currently in a **suspended** state, execution of its fiber function will resume.
///
/// This kind of waiting is more convenient than going into a loop and periodically checking the status;
/// however, it works only if the fiber was created with [fiber.new()](#method.new) and was made joinable with
/// [fiber.set_joinable()](#method.set_joinable).
///
/// The fiber must not be detached (See also: [fiber.set_joinable()](#method.set_joinable)).
///
/// Return: fiber function return code
pub fn join(&self) -> i32 {
unsafe { ffi::fiber_join(self.inner) }
}
/// Set fiber to be joinable (false by default).
///
/// - `is_joinable` - status to set
pub fn set_joinable(&mut self, is_joinable: bool) {
unsafe { ffi::fiber_set_joinable(self.inner, is_joinable) }
}
/// Cancel a fiber. (set `FIBER_IS_CANCELLED` flag)
///
/// Running and suspended fibers can be cancelled. After a fiber has been cancelled, attempts to operate on it will
/// cause error: the fiber is dead. But a dead fiber can still report its id and status.
/// Possible errors: cancel is not permitted for the specified fiber object.
///
/// If target fiber's flag `FIBER_IS_CANCELLABLE` set, then it would be woken up (maybe prematurely).
/// Then current fiber yields until the target fiber is dead (or is woken up by
/// [fiber.wakeup()](#method.wakeup)).
pub fn cancel(&mut self) {
unsafe { ffi::fiber_cancel(self.inner) }
}
}
/// Make it possible or not possible to wakeup the current
/// fiber immediately when it's cancelled.
///
/// - `is_cancellable` - status to set
///
/// Returns previous state.
pub fn set_cancellable(is_cancellable: bool) -> bool {
unsafe { ffi::fiber_set_cancellable(is_cancellable) }
}
/// Check current fiber for cancellation (it must be checked manually).
pub fn is_cancelled() -> bool {
unsafe { ffi::fiber_is_cancelled() }
}
/// Put the current fiber to sleep for at least `time` seconds.
///
/// Yield control to the scheduler and sleep for the specified number of seconds.
/// Only the current fiber can be made to sleep.
///
/// - `time` - time to sleep
///
/// > **Note:** this is a cancellation point (See also: [is_cancelled()](fn.is_cancelled.html))
pub fn sleep(time: f64) {
unsafe { ffi::fiber_sleep(time) }
}
/// Report loop begin time as double (cheap).
pub fn time() -> f64 {
unsafe { ffi::fiber_time() }
}
/// Report loop begin time as 64-bit int.
pub fn time64() -> u64 {
unsafe { ffi::fiber_time64() }
}
/// Report loop begin time as double (cheap). Uses monotonic clock.
pub fn clock() -> f64 {
unsafe { ffi::fiber_clock() }
}
/// Report loop begin time as 64-bit int. Uses monotonic clock.
pub fn clock64() -> u64 {
unsafe { ffi::fiber_clock64() }
}
/// Yield control to the scheduler.
///
/// Return control to another fiber and wait until it'll be woken. Equivalent to `fiber.sleep(0)`.
///
/// See also: [Fiber::wakeup()](struct.Fiber.html#method.wakeup)
pub fn fiber_yield() {
unsafe { ffi::fiber_yield() }
}
/// Reschedule fiber to end of event loop cycle.
pub fn reschedule() {
unsafe { ffi::fiber_reschedule() }
}
/// Fiber attributes container
pub struct FiberAttr {
inner: *mut ffi::FiberAttr,
}
impl FiberAttr {
/// Create a new fiber attribute container and initialize it with default parameters.
/// Can be used for many fibers creation, corresponding fibers will not take ownership.
///
/// This is safe to drop `FiberAttr` value when fibers created with this attribute still exist.
pub fn new() -> Self {
FiberAttr {
inner: unsafe { ffi::fiber_attr_new() },
}
}
/// Get stack size from the fiber attribute.
///
/// Returns: stack size
pub fn stack_size(&self) -> usize {
unsafe { ffi::fiber_attr_getstacksize(self.inner) }
}
///Set stack size for the fiber attribute.
///
/// - `stack_size` - stack size for new fibers
pub fn set_stack_size(&mut self, stack_size: usize) -> Result<(), Error> {
if unsafe { ffi::fiber_attr_setstacksize(self.inner, stack_size) } < 0 {
Err(TarantoolError::last().into())
} else {
Ok(())
}
}
}
impl Drop for FiberAttr {
fn drop(&mut self) {
unsafe { ffi::fiber_attr_delete(self.inner) }
}
}
/// Conditional variable for cooperative multitasking (fibers).
///
/// A cond (short for "condition variable") is a synchronization primitive
/// that allow fibers to yield until some predicate is satisfied. Fiber
/// conditions have two basic operations - `wait()` and `signal()`. [cond.wait()](#method.wait)
/// suspends execution of fiber (i.e. yields) until [cond.signal()](#method.signal) is called.
///
/// Example:
///
/// ```rust
/// use tarantool::fiber::Cond;
/// let cond = fiber.cond();
/// cond.wait();
/// ```
///
/// The job will hang because [cond.wait()](#method.wait) – will go to sleep until the condition variable changes.
///
/// ```rust
/// // Call from another fiber:
/// cond.signal();
/// ```
///
/// The waiting stopped, and the [cond.wait()](#method.wait) function returned true.
///
/// This example depended on the use of a global conditional variable with the arbitrary name cond.
/// In real life, programmers would make sure to use different conditional variable names for different applications.
///
/// Unlike `pthread_cond`, [Cond]() doesn't require mutex/latch wrapping.
pub struct Cond {
inner: *mut ffi::FiberCond,
}
/// - call [Cond::new()](#method.new) to create a named condition variable, which will be called `cond` for examples in this section.
/// - call [cond.wait()](#method.wait) to make a fiber wait for a signal via a condition variable.
/// - call [cond.signal()](#method.signal) to send a signal to wake up a single fiber that has executed [cond.wait()](#method.wait).
/// - call [cond.broadcast()](#method.broadcast) to send a signal to all fibers that have executed [cond.wait()](#method.wait).
impl Cond {
/// Instantiate a new fiber cond object.
pub fn new() -> Self {
Cond {
inner: unsafe { ffi::fiber_cond_new() },
}
}
/// Wake one fiber waiting for the cond.
/// Does nothing if no one is waiting. Does not yield.
pub fn signal(&self) {
unsafe { ffi::fiber_cond_signal(self.inner) }
}
/// Wake up all fibers waiting for the cond.
/// Does not yield.
pub fn broadcast(&self) {
unsafe { ffi::fiber_cond_broadcast(self.inner) }
}
/// Suspend the execution of the current fiber (i.e. yield) until [signal()](#method.signal) is called.
///
/// Like pthread_cond, FiberCond can issue spurious wake ups caused by explicit
/// [Fiber::wakeup()](struct.Fiber.html#method.wakeup) or [Fiber::cancel()](struct.Fiber.html#method.cancel)
/// calls. It is highly recommended to wrap calls to this function into a loop
/// and check an actual predicate and `fiber_testcancel()` on every iteration.
///
/// - `timeout` - timeout in seconds
///
/// Returns:
/// - `true` on [signal()](#method.signal) call or a spurious wake up.
/// - `false` on timeout, diag is set to `TimedOut`
pub fn wait_timeout(&self, timeout: Duration) -> bool {
!(unsafe { ffi::fiber_cond_wait_timeout(self.inner, timeout.as_secs_f64()) } < 0)
}
/// Shortcut for [wait_timeout()](#method.wait_timeout).
pub fn wait(&self) -> bool {
!(unsafe { ffi::fiber_cond_wait(self.inner) } < 0)
}
}
impl Drop for Cond {
fn drop(&mut self) {
unsafe { ffi::fiber_cond_delete(self.inner) }
}
}
/// A lock for cooperative multitasking environment
pub struct Latch {
inner: *mut ffi::Latch,
}
impl Latch {
/// Allocate and initialize the new latch.
pub fn new() -> Self {
Latch {
inner: unsafe { ffi::box_latch_new() },
}
}
/// Lock a latch. Waits indefinitely until the current fiber can gain access to the latch.
pub fn lock(&self) -> LatchGuard {
unsafe { ffi::box_latch_lock(self.inner) };
LatchGuard {
latch_inner: self.inner,
}
}
/// Try to lock a latch. Return immediately if the latch is locked.
///
/// Returns:
/// - `Some` - success
/// - `None` - the latch is locked.
pub fn try_lock(&self) -> Option<LatchGuard> {
if unsafe { ffi::box_latch_trylock(self.inner) } == 0 {
Some(LatchGuard {
latch_inner: self.inner,
})
} else {
None
}
}
}
impl Drop for Latch {
fn drop(&mut self) {
unsafe { ffi::box_latch_delete(self.inner) }
}
}
/// An RAII implementation of a "scoped lock" of a latch. When this structure is dropped (falls out of scope),
/// the lock will be unlocked.
pub struct LatchGuard {
latch_inner: *mut ffi::Latch,
}
impl Drop for LatchGuard {
fn drop(&mut self) {
unsafe { ffi::box_latch_unlock(self.latch_inner) }
}
}
pub(crate) unsafe fn unpack_callback<F, T>(callback: &mut F) -> (*mut c_void, ffi::FiberFunc)
where
F: FnMut(Box<T>) -> i32,
{
unsafe extern "C" fn trampoline<F, T>(mut args: VaList) -> i32
where
F: FnMut(Box<T>) -> i32,
{
let closure: &mut F = &mut *(args.get::<*const c_void>() as *mut F);
let arg = Box::from_raw(args.get::<*const c_void>() as *mut T);
(*closure)(arg)
}
(callback as *mut F as *mut c_void, Some(trampoline::<F, T>))
}
| &self) | identifier_name |
fiber.rs | //! Сooperative multitasking module
//!
//! With the fiber module, you can:
//! - create, run and manage [fibers](struct.Fiber.html),
//! - use a synchronization mechanism for fibers, similar to “condition variables” and similar to operating-system
//! functions such as `pthread_cond_wait()` plus `pthread_cond_signal()`.
//!
//! See also:
//! - [Threads, fibers and yields](https://www.tarantool.io/en/doc/latest/book/box/atomic/#threads-fibers-and-yields)
//! - [Lua reference: Module fiber](https://www.tarantool.io/en/doc/latest/reference/reference_lua/fiber/)
//! - [C API reference: Module fiber](https://www.tarantool.io/en/doc/latest/dev_guide/reference_capi/fiber/)
use std::ffi::CString;
use std::marker::PhantomData;
use std::os::raw::c_void;
use std::time::Duration;
use va_list::VaList;
use crate::error::{Error, TarantoolError};
use crate::ffi::tarantool as ffi;
/// A fiber is a set of instructions which are executed with cooperative multitasking.
///
/// Fibers managed by the fiber module are associated with a user-supplied function called the fiber function.
///
/// A fiber has three possible states: **running**, **suspended** or **dead**.
/// When a fiber is started with [fiber.start()](struct.Fiber.html#method.start), it is **running**.
/// When a fiber is created with [Fiber::new()](struct.Fiber.html#method.new) (and has not been started yet) or yields control
/// with [sleep()](fn.sleep.html), it is **suspended**.
/// When a fiber ends (because the fiber function ends), it is **dead**.
///
/// A runaway fiber can be stopped with [fiber.cancel()](struct.Fiber.html#method.cancel).
/// However, [fiber.cancel()](struct.Fiber.html#method.cancel) is advisory — it works only if the runaway fiber calls
/// [is_cancelled()](fn.is_cancelled.html) occasionally. In practice, a runaway fiber can only become unresponsive if it
/// does many computations and does not check whether it has been cancelled.
///
/// The other potential problem comes from fibers which never get scheduled, because they are not subscribed to any events,
/// or because no relevant events occur. Such morphing fibers can be killed with [fiber.cancel()](struct.Fiber.html#method.cancel)
/// at any time, since [fiber.cancel()](struct.Fiber.html#method.cancel) sends an asynchronous wakeup event to the fiber,
/// and [is_cancelled()](fn.is_cancelled.html) is checked whenever such a wakeup event occurs.
///
/// Example:
/// ```rust
/// use tarantool::fiber::Fiber;
/// let mut fiber = Fiber::new("test_fiber", &mut |_| {
/// println!("I'm a fiber");
/// 0
/// });
/// fiber.start(());
/// println!("Fiber started")
/// ```
///
/// ```text
/// I'm a fiber
/// Fiber started
/// ```
pub struct Fiber<'a, T: 'a> {
inner: *mut ffi::Fiber,
callback: *mut c_void,
phantom: PhantomData<&'a T>,
}
impl<'a, T> Fiber<'a, T> {
/// Create a new fiber.
///
/// Takes a fiber from fiber cache, if it's not empty. Can fail only if there is not enough memory for
/// the fiber structure or fiber stack.
///
/// The created fiber automatically returns itself to the fiber cache when its `main` function
/// completes. The initial fiber state is **suspended**.
///
/// Ordinarily [Fiber::new()](#method.new) is used in conjunction with [fiber.set_joinable()](#method.set_joinable)
/// and [fiber.join()](#method.join)
///
/// - `name` - string with fiber name
/// - `callback` - function for run inside fiber
///
/// See also: [fiber.start()](#method.start)
pub fn new<F>(name: &str, callback: &mut F) -> Self
where
F: FnMut(Box<T>) -> i32,
{
let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) };
Self {
inner: unsafe { ffi::fiber_new(CString::new(name).unwrap().into_raw(), trampoline) },
callback: callback_ptr,
phantom: PhantomData,
}
}
/// Create a new fiber with defined attributes.
///
/// Can fail only if there is not enough memory for the fiber structure or fiber stack.
///
/// The created fiber automatically returns itself to the fiber cache if has default stack size
/// when its `main` function completes. The initial fiber state is **suspended**.
///
/// - `name` - string with fiber name
/// - `fiber_attr` - fiber attributes
/// - `callback` - function for run inside fiber
///
/// See also: [fiber.start()](#method.start)
pub fn new_with_attr<F>(name: &str, attr: &FiberAttr, callback: &mut F) -> Self
where
F: FnMut(Box<T>) -> i32,
{
let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) };
Self {
inner: unsafe {
ffi::fiber_new_ex(
CString::new(name).unwrap().into_raw(),
attr.inner,
trampoline,
)
},
callback: callback_ptr,
phantom: PhantomData,
}
}
/// Start execution of created fiber.
///
/// - `arg` - argument to start the fiber with
///
/// See also: [fiber.new()](#method.new)
pub fn start(&mut self, arg: T) {
unsafe {
ffi::fiber_start(self.inner, self.callback, Box::into_raw(Box::<T>::new(arg)));
}
}
/// Interrupt a synchronous wait of a fiber.
pub fn wakeup(&self) {
unsafe { ffi::fiber_wakeup(self.inner) }
}
/// Wait until the fiber is dead and then move its execution status to the caller.
///
/// “Join” a joinable fiber. That is, let the fiber’s function run and wait until the fiber’s status is **dead**
/// (normally a status becomes **dead** when the function execution finishes). Joining will cause a yield,
/// therefore, if the fiber is currently in a **suspended** state, execution of its fiber function will resume.
///
/// This kind of waiting is more convenient than going into a loop and periodically checking the status;
/// however, it works only if the fiber was created with [fiber.new()](#method.new) and was made joinable with
/// [fiber.set_joinable()](#method.set_joinable).
///
/// The fiber must not be detached (See also: [fiber.set_joinable()](#method.set_joinable)).
///
/// Return: fiber function return code
pub fn join(&self) -> i32 {
unsafe { ffi::fiber_join(self.inner) }
}
/// Set fiber to be joinable (false by default).
///
/// - `is_joinable` - status to set
pub fn set_joinable(&mut self, is_joinable: bool) {
unsafe { ffi::fiber_set_joinable(self.inner, is_joinable) }
}
/// Cancel a fiber. (set `FIBER_IS_CANCELLED` flag)
///
/// Running and suspended fibers can be cancelled. After a fiber has been cancelled, attempts to operate on it will
/// cause error: the fiber is dead. But a dead fiber can still report its id and status.
/// Possible errors: cancel is not permitted for the specified fiber object.
///
/// If target fiber's flag `FIBER_IS_CANCELLABLE` set, then it would be woken up (maybe prematurely).
/// Then current fiber yields until the target fiber is dead (or is woken up by
/// [fiber.wakeup()](#method.wakeup)).
pub fn cancel(&mut self) {
unsafe { ffi::fiber_cancel(self.inner) }
}
}
/// Make it possible or not possible to wakeup the current
/// fiber immediately when it's cancelled.
///
/// - `is_cancellable` - status to set
///
/// Returns previous state.
pub fn set_cancellable(is_cancellable: bool) -> bool {
unsafe { ffi::fiber_set_cancellable(is_cancellable) }
}
/// Check current fiber for cancellation (it must be checked manually).
pub fn is_cancelled() -> bool {
unsafe { ffi::fiber_is_cancelled() }
}
/// Put the current fiber to sleep for at least `time` seconds.
///
/// Yield control to the scheduler and sleep for the specified number of seconds.
/// Only the current fiber can be made to sleep.
///
/// - `time` - time to sleep
///
/// > **Note:** this is a cancellation point (See also: [is_cancelled()](fn.is_cancelled.html))
pub fn sleep(time: f64) {
unsafe { ffi::fiber_sleep(time) }
}
/// Report loop begin time as double (cheap).
pub fn time() -> f64 {
unsafe { ffi::fiber_time() }
}
/// Report loop begin time as 64-bit int.
pub fn time64() -> u64 {
unsafe { ffi::fiber_time64() }
}
/// Report loop begin time as double (cheap). Uses monotonic clock.
pub fn clock() -> f64 {
unsafe { ffi::fiber_clock() }
}
/// Report loop begin time as 64-bit int. Uses monotonic clock.
pub fn clock64() -> u64 {
unsafe { ffi::fiber_clock64() }
}
/// Yield control to the scheduler.
///
/// Return control to another fiber and wait until it'll be woken. Equivalent to `fiber.sleep(0)`.
///
/// See also: [Fiber::wakeup()](struct.Fiber.html#method.wakeup)
pub fn fiber_yield() {
unsafe { ffi::fiber_yield() }
}
/// Reschedule fiber to end of event loop cycle.
pub fn reschedule() {
unsafe { ffi::fiber_reschedule() }
}
/// Fiber attributes container
pub struct FiberAttr {
inner: *mut ffi::FiberAttr,
}
impl FiberAttr {
/// Create a new fiber attribute container and initialize it with default parameters.
/// Can be used for many fibers creation, corresponding fibers will not take ownership.
///
/// This is safe to drop `FiberAttr` value when fibers created with this attribute still exist.
pub fn new() -> Self {
FiberAttr {
inner: unsafe { ffi::fiber_attr_new() },
}
}
/// Get stack size from the fiber attribute.
///
/// Returns: stack size
pub fn stack_size(&self) -> usize {
unsafe { ffi::fiber_attr_getstacksize(self.inner) }
}
///Set stack size for the fiber attribute.
///
/// - `stack_size` - stack size for new fibers
pub fn set_stack_size(&mut self, stack_size: usize) -> Result<(), Error> {
if unsafe { ffi::fiber_attr_setstacksize(self.inner, stack_size) } < 0 {
Err(TarantoolError::last().into())
} else {
Ok(())
}
}
}
impl Drop for FiberAttr {
fn drop(&mut self) {
unsafe { ffi::fiber_attr_delete(self.inner) }
}
}
/// Conditional variable for cooperative multitasking (fibers).
///
/// A cond (short for "condition variable") is a synchronization primitive
/// that allow fibers to yield until some predicate is satisfied. Fiber
/// conditions have two basic operations - `wait()` and `signal()`. [cond.wait()](#method.wait)
/// suspends execution of fiber (i.e. yields) until [cond.signal()](#method.signal) is called.
///
/// Example:
///
/// ```rust
/// use tarantool::fiber::Cond;
/// let cond = fiber.cond();
/// cond.wait();
/// ```
///
/// The job will hang because [cond.wait()](#method.wait) – will go to sleep until the condition variable changes.
///
/// ```rust
/// // Call from another fiber:
/// cond.signal();
/// ```
///
/// The waiting stopped, and the [cond.wait()](#method.wait) function returned true.
///
/// This example depended on the use of a global conditional variable with the arbitrary name cond.
/// In real life, programmers would make sure to use different conditional variable names for different applications.
///
/// Unlike `pthread_cond`, [Cond]() doesn't require mutex/latch wrapping.
pub struct Cond {
inner: *mut ffi::FiberCond,
}
/// - call [Cond::new()](#method.new) to create a named condition variable, which will be called `cond` for examples in this section.
/// - call [cond.wait()](#method.wait) to make a fiber wait for a signal via a condition variable.
/// - call [cond.signal()](#method.signal) to send a signal to wake up a single fiber that has executed [cond.wait()](#method.wait).
/// - call [cond.broadcast()](#method.broadcast) to send a signal to all fibers that have executed [cond.wait()](#method.wait).
impl Cond {
/// Instantiate a new fiber cond object.
pub fn new() -> Self {
Cond {
inner: unsafe { ffi::fiber_cond_new() },
}
}
/// Wake one fiber waiting for the cond.
/// Does nothing if no one is waiting. Does not yield.
pub fn signal(&self) {
unsafe { ffi::fiber_cond_signal(self.inner) }
}
/// Wake up all fibers waiting for the cond.
/// Does not yield.
pub fn broadcast(&self) {
unsafe { ffi::fiber_cond_broadcast(self.inner) }
}
/// Suspend the execution of the current fiber (i.e. yield) until [signal()](#method.signal) is called.
///
/// Like pthread_cond, FiberCond can issue spurious wake ups caused by explicit
/// [Fiber::wakeup()](struct.Fiber.html#method.wakeup) or [Fiber::cancel()](struct.Fiber.html#method.cancel)
/// calls. It is highly recommended to wrap calls to this function into a loop
/// and check an actual predicate and `fiber_testcancel()` on every iteration.
///
/// - `timeout` - timeout in seconds
///
/// Returns:
/// - `true` on [signal()](#method.signal) call or a spurious wake up.
/// - `false` on timeout, diag is set to `TimedOut`
pub fn wait_timeout(&self, timeout: Duration) -> bool {
!(unsafe { ffi::fiber_cond_wait_timeout(self.inner, timeout.as_secs_f64()) } < 0)
}
/// Shortcut for [wait_timeout()](#method.wait_timeout).
pub fn wait(&self) -> bool {
!(unsafe { ffi::fiber_cond_wait(self.inner) } < 0)
}
}
impl Drop for Cond {
fn drop(&mut self) {
unsafe { ffi::fiber_cond_delete(self.inner) }
}
}
/// A lock for cooperative multitasking environment
pub struct Latch {
inner: *mut ffi::Latch,
}
impl Latch {
/// Allocate and initialize the new latch.
pub fn new() -> Self {
Latch {
inner: unsafe { ffi::box_latch_new() },
}
}
/// Lock a latch. Waits indefinitely until the current fiber can gain access to the latch.
pub fn lock(&self) -> LatchGuard {
unsafe { ffi::box_latch_lock(self.inner) };
LatchGuard {
latch_inner: self.inner,
}
}
/// Try to lock a latch. Return immediately if the latch is locked.
///
/// Returns:
/// - `Some` - success
/// - `None` - the latch is locked.
pub fn try_lock(&self) -> Option<LatchGuard> {
if unsafe { ffi::box_latch_trylock(self.inner) } == 0 {
Some(LatchGuard {
latch_inner: self.inner,
})
} else {
Non | op for Latch {
fn drop(&mut self) {
unsafe { ffi::box_latch_delete(self.inner) }
}
}
/// An RAII implementation of a "scoped lock" of a latch. When this structure is dropped (falls out of scope),
/// the lock will be unlocked.
pub struct LatchGuard {
latch_inner: *mut ffi::Latch,
}
impl Drop for LatchGuard {
fn drop(&mut self) {
unsafe { ffi::box_latch_unlock(self.latch_inner) }
}
}
pub(crate) unsafe fn unpack_callback<F, T>(callback: &mut F) -> (*mut c_void, ffi::FiberFunc)
where
F: FnMut(Box<T>) -> i32,
{
unsafe extern "C" fn trampoline<F, T>(mut args: VaList) -> i32
where
F: FnMut(Box<T>) -> i32,
{
let closure: &mut F = &mut *(args.get::<*const c_void>() as *mut F);
let arg = Box::from_raw(args.get::<*const c_void>() as *mut T);
(*closure)(arg)
}
(callback as *mut F as *mut c_void, Some(trampoline::<F, T>))
}
| e
}
}
}
impl Dr | conditional_block |
fiber.rs | //! Сooperative multitasking module
//!
//! With the fiber module, you can:
//! - create, run and manage [fibers](struct.Fiber.html),
//! - use a synchronization mechanism for fibers, similar to “condition variables” and similar to operating-system
//! functions such as `pthread_cond_wait()` plus `pthread_cond_signal()`.
//!
//! See also:
//! - [Threads, fibers and yields](https://www.tarantool.io/en/doc/latest/book/box/atomic/#threads-fibers-and-yields)
//! - [Lua reference: Module fiber](https://www.tarantool.io/en/doc/latest/reference/reference_lua/fiber/)
//! - [C API reference: Module fiber](https://www.tarantool.io/en/doc/latest/dev_guide/reference_capi/fiber/)
use std::ffi::CString;
use std::marker::PhantomData;
use std::os::raw::c_void;
use std::time::Duration;
use va_list::VaList;
use crate::error::{Error, TarantoolError};
use crate::ffi::tarantool as ffi;
/// A fiber is a set of instructions which are executed with cooperative multitasking.
///
/// Fibers managed by the fiber module are associated with a user-supplied function called the fiber function.
///
/// A fiber has three possible states: **running**, **suspended** or **dead**.
/// When a fiber is started with [fiber.start()](struct.Fiber.html#method.start), it is **running**.
/// When a fiber is created with [Fiber::new()](struct.Fiber.html#method.new) (and has not been started yet) or yields control
/// with [sleep()](fn.sleep.html), it is **suspended**.
/// When a fiber ends (because the fiber function ends), it is **dead**.
///
/// A runaway fiber can be stopped with [fiber.cancel()](struct.Fiber.html#method.cancel).
/// However, [fiber.cancel()](struct.Fiber.html#method.cancel) is advisory — it works only if the runaway fiber calls
/// [is_cancelled()](fn.is_cancelled.html) occasionally. In practice, a runaway fiber can only become unresponsive if it
/// does many computations and does not check whether it has been cancelled.
///
/// The other potential problem comes from fibers which never get scheduled, because they are not subscribed to any events,
/// or because no relevant events occur. Such morphing fibers can be killed with [fiber.cancel()](struct.Fiber.html#method.cancel)
/// at any time, since [fiber.cancel()](struct.Fiber.html#method.cancel) sends an asynchronous wakeup event to the fiber,
/// and [is_cancelled()](fn.is_cancelled.html) is checked whenever such a wakeup event occurs.
///
/// Example:
/// ```rust
/// use tarantool::fiber::Fiber;
/// let mut fiber = Fiber::new("test_fiber", &mut |_| {
/// println!("I'm a fiber");
/// 0
/// });
/// fiber.start(());
/// println!("Fiber started")
/// ```
///
/// ```text
/// I'm a fiber
/// Fiber started
/// ```
pub struct Fiber<'a, T: 'a> {
inner: *mut ffi::Fiber,
callback: *mut c_void,
phantom: PhantomData<&'a T>,
}
impl<'a, T> Fiber<'a, T> {
/// Create a new fiber.
///
/// Takes a fiber from fiber cache, if it's not empty. Can fail only if there is not enough memory for
/// the fiber structure or fiber stack.
///
/// The created fiber automatically returns itself to the fiber cache when its `main` function
/// completes. The initial fiber state is **suspended**.
///
/// Ordinarily [Fiber::new()](#method.new) is used in conjunction with [fiber.set_joinable()](#method.set_joinable)
/// and [fiber.join()](#method.join)
///
/// - `name` - string with fiber name
/// - `callback` - function for run inside fiber
///
/// See also: [fiber.start()](#method.start)
pub fn new<F>(name: &str, callback: &mut F) -> Self
where
F: FnMut(Box<T>) -> i32,
{
let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) };
Self {
inner: unsafe { ffi::fiber_new(CString::new(name).unwrap().into_raw(), trampoline) },
callback: callback_ptr,
phantom: PhantomData,
}
}
/// Create a new fiber with defined attributes.
///
/// Can fail only if there is not enough memory for the fiber structure or fiber stack.
///
/// The created fiber automatically returns itself to the fiber cache if has default stack size
/// when its `main` function completes. The initial fiber state is **suspended**.
///
/// - `name` - string with fiber name
/// - `fiber_attr` - fiber attributes
/// - `callback` - function for run inside fiber
///
/// See also: [fiber.start()](#method.start)
pub fn new_with_attr<F>(name: &str, attr: &FiberAttr, callback: &mut F) -> Self
where
F: FnMut(Box<T>) -> i32,
{
let (callback_ptr, trampoline) = unsafe { unpack_callback(callback) };
Self {
inner: unsafe {
ffi::fiber_new_ex(
CString::new(name).unwrap().into_raw(),
attr.inner,
trampoline,
)
},
callback: callback_ptr,
phantom: PhantomData,
}
}
/// Start execution of created fiber.
///
/// - `arg` - argument to start the fiber with
///
/// See also: [fiber.new()](#method.new)
pub fn start(&mut self, arg: T) {
unsafe {
ffi::fiber_start(self.inner, self.callback, Box::into_raw(Box::<T>::new(arg)));
}
}
/// Interrupt a synchronous wait of a fiber.
pub fn wakeup(&self) {
unsafe { ffi::fiber_wakeup(self.inner) }
}
/// Wait until the fiber is dead and then move its execution status to the caller.
///
/// “Join” a joinable fiber. That is, let the fiber’s function run and wait until the fiber’s status is **dead**
/// (normally a status becomes **dead** when the function execution finishes). Joining will cause a yield,
/// therefore, if the fiber is currently in a **suspended** state, execution of its fiber function will resume.
///
/// This kind of waiting is more convenient than going into a loop and periodically checking the status;
/// however, it works only if the fiber was created with [fiber.new()](#method.new) and was made joinable with
/// [fiber.set_joinable()](#method.set_joinable).
///
/// The fiber must not be detached (See also: [fiber.set_joinable()](#method.set_joinable)).
///
/// Return: fiber function return code
pub fn join(&self) -> i32 {
unsafe { ffi::fiber_join(self.inner) }
}
/// Set fiber to be joinable (false by default).
///
/// - `is_joinable` - status to set
pub fn set_joinable(&mut self, is_joinable: bool) {
unsafe { ffi::fiber_set_joinable(self.inner, is_joinable) }
}
/// Cancel a fiber. (set `FIBER_IS_CANCELLED` flag)
///
/// Running and suspended fibers can be cancelled. After a fiber has been cancelled, attempts to operate on it will
/// cause error: the fiber is dead. But a dead fiber can still report its id and status.
/// Possible errors: cancel is not permitted for the specified fiber object.
///
/// If target fiber's flag `FIBER_IS_CANCELLABLE` set, then it would be woken up (maybe prematurely).
/// Then current fiber yields until the target fiber is dead (or is woken up by
/// [fiber.wakeup()](#method.wakeup)).
pub fn cancel(&mut self) {
unsafe { ffi::fiber_cancel(self.inner) }
}
}
/// Make it possible or not possible to wakeup the current
/// fiber immediately when it's cancelled.
///
/// - `is_cancellable` - status to set
///
/// Returns previous state.
pub fn set_cancellable(is_cancellable: bool) -> bool {
unsafe { ffi::fiber_set_cancellable(is_cancellable) }
}
/// Check current fiber for cancellation (it must be checked manually).
pub fn is_cancelled() -> bool {
unsafe { ffi::fiber_is_cancelled() }
}
/// Put the current fiber to sleep for at least `time` seconds.
///
/// Yield control to the scheduler and sleep for the specified number of seconds.
/// Only the current fiber can be made to sleep.
///
/// - `time` - time to sleep
///
/// > **Note:** this is a cancellation point (See also: [is_cancelled()](fn.is_cancelled.html))
pub fn sleep(time: f64) {
unsafe { ffi::fiber_sleep(time) }
}
/// Report loop begin time as double (cheap).
pub fn time() -> f64 {
unsafe { ffi::fiber_time() }
}
/// Report loop begin time as 64-bit int.
pub fn time64() -> u64 {
unsafe { ffi::fiber_time64() }
}
/// Report loop begin time as double (cheap). Uses monotonic clock.
pub fn clock() -> f64 {
unsafe { ffi::fiber_clock() }
}
/// Report loop begin time as 64-bit int. Uses monotonic clock.
pub fn clock64() -> u64 {
unsafe { ffi::fiber_clock64() }
}
/// Yield control to the scheduler.
///
/// Return control to another fiber and wait until it'll be woken. Equivalent to `fiber.sleep(0)`.
///
/// See also: [Fiber::wakeup()](struct.Fiber.html#method.wakeup)
pub fn fiber_yield() {
unsafe { ffi::fiber_yield() }
}
/// Reschedule fiber to end of event loop cycle.
pub fn reschedule() {
unsafe { ffi::fiber_reschedule() }
}
/// Fiber attributes container
pub struct FiberAttr {
inner: *mut ffi::FiberAttr,
}
impl FiberAttr {
/// Create a new fiber attribute container and initialize it with default parameters.
/// Can be used for many fibers creation, corresponding fibers will not take ownership.
///
/// This is safe to drop `FiberAttr` value when fibers created with this attribute still exist.
pub fn new() -> Self {
FiberAttr {
inner: unsafe { ffi::fiber_attr_new() },
}
}
/// Get stack size from the fiber attribute.
///
/// Returns: stack size
pub fn stack_size(&self) -> usize {
unsafe { ffi::fiber_attr_getstacksize(self.inner) }
}
///Set stack size for the fiber attribute.
///
/// - `stack_size` - stack size for new fibers
pub fn set_stack_size(&mut self, stack_size: usize) -> Result<(), Error> {
if unsafe { ffi::fiber_attr_setstacksize(self.inner, stack_size) } < 0 {
Err(TarantoolError::last().into())
} else {
Ok(())
}
}
}
impl Drop for FiberAttr {
fn drop(&mut self) {
unsafe { ffi::fiber_attr_delete(self.inner) }
}
}
/// Conditional variable for cooperative multitasking (fibers).
///
/// A cond (short for "condition variable") is a synchronization primitive
/// that allow fibers to yield until some predicate is satisfied. Fiber
/// conditions have two basic operations - `wait()` and `signal()`. [cond.wait()](#method.wait)
/// suspends execution of fiber (i.e. yields) until [cond.signal()](#method.signal) is called.
///
/// Example:
///
/// ```rust
/// use tarantool::fiber::Cond;
/// let cond = fiber.cond();
/// cond.wait();
/// ```
///
/// The job will hang because [cond.wait()](#method.wait) – will go to sleep until the condition variable changes.
///
/// ```rust
/// // Call from another fiber:
/// cond.signal();
/// ```
///
/// The waiting stopped, and the [cond.wait()](#method.wait) function returned true.
///
/// This example depended on the use of a global conditional variable with the arbitrary name cond.
/// In real life, programmers would make sure to use different conditional variable names for different applications.
///
/// Unlike `pthread_cond`, [Cond]() doesn't require mutex/latch wrapping.
pub struct Cond {
inner: *mut ffi::FiberCond,
}
/// - call [Cond::new()](#method.new) to create a named condition variable, which will be called `cond` for examples in this section.
/// - call [cond.wait()](#method.wait) to make a fiber wait for a signal via a condition variable.
/// - call [cond.signal()](#method.signal) to send a signal to wake up a single fiber that has executed [cond.wait()](#method.wait).
/// - call [cond.broadcast()](#method.broadcast) to send a signal to all fibers that have executed [cond.wait()](#method.wait).
impl Cond {
/// Instantiate a new fiber cond object.
pub fn new() -> Self {
Cond {
inner: unsafe { ffi::fiber_cond_new() },
}
}
/// Wake one fiber waiting for the cond.
/// Does nothing if no one is waiting. Does not yield.
pub fn signal(&self) {
unsafe { ffi::fiber_cond_signal(self.inner) }
}
/// Wake up all fibers waiting for the cond.
/// Does not yield.
pub fn broadcast(&self) {
unsafe | the execution of the current fiber (i.e. yield) until [signal()](#method.signal) is called.
///
/// Like pthread_cond, FiberCond can issue spurious wake ups caused by explicit
/// [Fiber::wakeup()](struct.Fiber.html#method.wakeup) or [Fiber::cancel()](struct.Fiber.html#method.cancel)
/// calls. It is highly recommended to wrap calls to this function into a loop
/// and check an actual predicate and `fiber_testcancel()` on every iteration.
///
/// - `timeout` - timeout in seconds
///
/// Returns:
/// - `true` on [signal()](#method.signal) call or a spurious wake up.
/// - `false` on timeout, diag is set to `TimedOut`
pub fn wait_timeout(&self, timeout: Duration) -> bool {
!(unsafe { ffi::fiber_cond_wait_timeout(self.inner, timeout.as_secs_f64()) } < 0)
}
/// Shortcut for [wait_timeout()](#method.wait_timeout).
pub fn wait(&self) -> bool {
!(unsafe { ffi::fiber_cond_wait(self.inner) } < 0)
}
}
impl Drop for Cond {
fn drop(&mut self) {
unsafe { ffi::fiber_cond_delete(self.inner) }
}
}
/// A lock for cooperative multitasking environment
pub struct Latch {
inner: *mut ffi::Latch,
}
impl Latch {
/// Allocate and initialize the new latch.
pub fn new() -> Self {
Latch {
inner: unsafe { ffi::box_latch_new() },
}
}
/// Lock a latch. Waits indefinitely until the current fiber can gain access to the latch.
pub fn lock(&self) -> LatchGuard {
unsafe { ffi::box_latch_lock(self.inner) };
LatchGuard {
latch_inner: self.inner,
}
}
/// Try to lock a latch. Return immediately if the latch is locked.
///
/// Returns:
/// - `Some` - success
/// - `None` - the latch is locked.
pub fn try_lock(&self) -> Option<LatchGuard> {
if unsafe { ffi::box_latch_trylock(self.inner) } == 0 {
Some(LatchGuard {
latch_inner: self.inner,
})
} else {
None
}
}
}
impl Drop for Latch {
fn drop(&mut self) {
unsafe { ffi::box_latch_delete(self.inner) }
}
}
/// An RAII implementation of a "scoped lock" of a latch. When this structure is dropped (falls out of scope),
/// the lock will be unlocked.
pub struct LatchGuard {
latch_inner: *mut ffi::Latch,
}
impl Drop for LatchGuard {
fn drop(&mut self) {
unsafe { ffi::box_latch_unlock(self.latch_inner) }
}
}
pub(crate) unsafe fn unpack_callback<F, T>(callback: &mut F) -> (*mut c_void, ffi::FiberFunc)
where
F: FnMut(Box<T>) -> i32,
{
unsafe extern "C" fn trampoline<F, T>(mut args: VaList) -> i32
where
F: FnMut(Box<T>) -> i32,
{
let closure: &mut F = &mut *(args.get::<*const c_void>() as *mut F);
let arg = Box::from_raw(args.get::<*const c_void>() as *mut T);
(*closure)(arg)
}
(callback as *mut F as *mut c_void, Some(trampoline::<F, T>))
}
| { ffi::fiber_cond_broadcast(self.inner) }
}
/// Suspend | identifier_body |
ballot.rs | use indexmap::IndexMap;
use prost::Message;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Ballot {
pub id: String,
pub contests: Vec<u32>, // List of contest indexes
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Contest {
pub id: String,
pub index: u32,
pub contest_type: ContestType,
pub num_winners: u32,
pub write_in: bool,
pub candidates: Vec<Candidate>,
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Candidate {
pub id: String,
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum ContestType {
/// Plurality voting is an electoral system in which each voter is allowed to vote for only one candidate and the candidate
/// who polls the most among their counterparts (a plurality) is elected. It may be called first-past-the-post (FPTP),
/// single-choice voting, simple plurality, or relative/simple majority.
///
/// For Plurality tally, `Selection.score` has no meaning.
Plurality,
/// Score voting or “range voting” is an electoral system in which voters give each candidate a score, the scores are summed,
/// and the candidate with the highest total is elected. It has been described by various other names including “evaluative voting”,
/// “utilitarian voting”, and “the point system”.
///
/// For Score tally, `Selection.score` represents the number of points assigned to each candidate. Zero is the worst score that can be asssigned to a candidate.
Score,
/// Approval voting is a single-winner electoral system where each voter may select (“approve”) any number of candidates.
/// The winner is the most-approved candidate.
///
/// For Approval tally, `Selection.score` has no meaning.
Approval,
/// The Condorcet method is a ranked-choice voting system that elects the candidate that would win a majority of the vote in all of the head-to-head elections against each of the other candidates.
/// The Condorcet method isn’t guarunteed to produce a single-winner due to the non-transitive nature of group choice.
///
/// For Condorcet tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
Condorcet,
/// The standard Borda count where each candidate is assigned a number of points equal to the number of candidates ranked lower than them.
/// It is known as the "Starting at 0" Borda count since the least-significantly ranked candidate is given zero points.
/// Each candidate is given points according to:
///
/// ```number-candidates - candidate-position - 1```
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 3 |
/// | 1 | Bob | 2 |
/// | 2 | Carlos | 1 |
/// | 3 | Dave | 0 |
///
/// For Borda tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
Borda,
/// The classic Borda count as defined in Jean-Charles de Borda's [original proposal](http://gerardgreco.free.fr/IMG/pdf/MA_c_moire-Borda-1781.pdf).
/// It is known as the "Starting at 1" Borda count since the least-significantly ranked candidate is given one point.
/// Each candidate is given points according to:
///
/// ```number-candidates - candidate-position```
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 4 |
/// | 1 | Bob | 3 |
/// | 2 | Carlos | 2 |
/// | 3 | Dave | 1 |
///
/// For BordaClassic tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaClassic,
/// In the Dowdall system, the highest-ranked candidate obtains 1 point, while the 2nd-ranked candidate receives ½ a point, the 3rd-ranked candidate receives ⅓ of a point, etc.
/// An important difference of this method from the others is that the number of points assigned to each preference does not depend on the number of candidates.
/// Each candidate is given points according to:
///
/// ```1 / (candidate-position + 1)```
///
/// If Dowdall is selected, tallystick will panic if an integer count type is used in the tally. This variant should only be used with a float or rational tally.
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 1 |
/// | 1 | Bob | ½ |
/// | 2 | Carlos | ⅓ |
/// | 3 | Dave | ¼ |
///
/// For BordaDowdall tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaDowdall,
/// In a modified Borda count, the number of points given for a voter's first and subsequent preferences is determined by the total number of candidates they have actually ranked, rather than the total number listed.
/// This is to say, typically, on a ballot of `n` candidates, if a voter casts only `m` preferences (where `n ≥ m ≥ 1`), a first preference gets `m` points, a second preference `m – 1` points, and so on.
/// Modified Borda counts are used to counteract the problem of [bullet voting](https://en.wikipedia.org/wiki/Bullet_voting).
/// Each candidate is given points according to:
///
/// ```number-marked - candidate-position```
///
/// For BordaModifiedClassic tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaModifiedClassic,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeWinning Strength of a link is measured by its support. You should use this Schulze variant if you are unsure.
///
/// For SchulzeWinning tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeWinning,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeRatio, the strength of a link is measured by the difference between its support and opposition.
///
/// For SchulzeRatio tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeRatio,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeMargin, the strength of a link is measured by the ratio of its support and opposition.
///
/// For SchulzeMargin tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeMargin,
}
| #[serde(default)]
pub write_in: bool,
/// Score has different meanings depending on the tally type:
/// STV, Condorcet, Borda and Schulze: `score` means candidate rank, where a zero is the best rank that can be assigned to a candidate.
/// Score: `score` is the points assinged to this candidate. Zero is the worst score that can be asssigned to a candidate.
/// Plurality, Approval, and InstantRunoff: `score` is meaningless and has no effect.
#[prost(uint32)]
#[serde(default)]
pub score: u32,
/// Known candidate-id or free-form text, depending on the value of the `write_in` field.
#[prost(string)]
pub selection: String,
}
impl Into<(String, u64)> for Selection {
fn into(self) -> (String, u64) {
(self.selection, self.score as u64)
}
}
impl Into<(String, u32)> for Selection {
fn into(self) -> (String, u32) {
(self.selection, self.score)
}
} | #[derive(Serialize, Deserialize, Clone, Message, PartialEq, Eq)]
pub struct Selection {
/// true if the `selection` field is a free-form write-in, false if the `selection` field corresponds to a known candidate-id
#[prost(bool)] | random_line_split |
ballot.rs | use indexmap::IndexMap;
use prost::Message;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct | {
pub id: String,
pub contests: Vec<u32>, // List of contest indexes
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Contest {
pub id: String,
pub index: u32,
pub contest_type: ContestType,
pub num_winners: u32,
pub write_in: bool,
pub candidates: Vec<Candidate>,
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Candidate {
pub id: String,
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum ContestType {
/// Plurality voting is an electoral system in which each voter is allowed to vote for only one candidate and the candidate
/// who polls the most among their counterparts (a plurality) is elected. It may be called first-past-the-post (FPTP),
/// single-choice voting, simple plurality, or relative/simple majority.
///
/// For Plurality tally, `Selection.score` has no meaning.
Plurality,
/// Score voting or “range voting” is an electoral system in which voters give each candidate a score, the scores are summed,
/// and the candidate with the highest total is elected. It has been described by various other names including “evaluative voting”,
/// “utilitarian voting”, and “the point system”.
///
/// For Score tally, `Selection.score` represents the number of points assigned to each candidate. Zero is the worst score that can be asssigned to a candidate.
Score,
/// Approval voting is a single-winner electoral system where each voter may select (“approve”) any number of candidates.
/// The winner is the most-approved candidate.
///
/// For Approval tally, `Selection.score` has no meaning.
Approval,
/// The Condorcet method is a ranked-choice voting system that elects the candidate that would win a majority of the vote in all of the head-to-head elections against each of the other candidates.
/// The Condorcet method isn’t guarunteed to produce a single-winner due to the non-transitive nature of group choice.
///
/// For Condorcet tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
Condorcet,
/// The standard Borda count where each candidate is assigned a number of points equal to the number of candidates ranked lower than them.
/// It is known as the "Starting at 0" Borda count since the least-significantly ranked candidate is given zero points.
/// Each candidate is given points according to:
///
/// ```number-candidates - candidate-position - 1```
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 3 |
/// | 1 | Bob | 2 |
/// | 2 | Carlos | 1 |
/// | 3 | Dave | 0 |
///
/// For Borda tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
Borda,
/// The classic Borda count as defined in Jean-Charles de Borda's [original proposal](http://gerardgreco.free.fr/IMG/pdf/MA_c_moire-Borda-1781.pdf).
/// It is known as the "Starting at 1" Borda count since the least-significantly ranked candidate is given one point.
/// Each candidate is given points according to:
///
/// ```number-candidates - candidate-position```
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 4 |
/// | 1 | Bob | 3 |
/// | 2 | Carlos | 2 |
/// | 3 | Dave | 1 |
///
/// For BordaClassic tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaClassic,
/// In the Dowdall system, the highest-ranked candidate obtains 1 point, while the 2nd-ranked candidate receives ½ a point, the 3rd-ranked candidate receives ⅓ of a point, etc.
/// An important difference of this method from the others is that the number of points assigned to each preference does not depend on the number of candidates.
/// Each candidate is given points according to:
///
/// ```1 / (candidate-position + 1)```
///
/// If Dowdall is selected, tallystick will panic if an integer count type is used in the tally. This variant should only be used with a float or rational tally.
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 1 |
/// | 1 | Bob | ½ |
/// | 2 | Carlos | ⅓ |
/// | 3 | Dave | ¼ |
///
/// For BordaDowdall tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaDowdall,
/// In a modified Borda count, the number of points given for a voter's first and subsequent preferences is determined by the total number of candidates they have actually ranked, rather than the total number listed.
/// This is to say, typically, on a ballot of `n` candidates, if a voter casts only `m` preferences (where `n ≥ m ≥ 1`), a first preference gets `m` points, a second preference `m – 1` points, and so on.
/// Modified Borda counts are used to counteract the problem of [bullet voting](https://en.wikipedia.org/wiki/Bullet_voting).
/// Each candidate is given points according to:
///
/// ```number-marked - candidate-position```
///
/// For BordaModifiedClassic tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaModifiedClassic,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeWinning Strength of a link is measured by its support. You should use this Schulze variant if you are unsure.
///
/// For SchulzeWinning tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeWinning,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeRatio, the strength of a link is measured by the difference between its support and opposition.
///
/// For SchulzeRatio tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeRatio,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeMargin, the strength of a link is measured by the ratio of its support and opposition.
///
/// For SchulzeMargin tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeMargin,
}
#[derive(Serialize, Deserialize, Clone, Message, PartialEq, Eq)]
pub struct Selection {
/// true if the `selection` field is a free-form write-in, false if the `selection` field corresponds to a known candidate-id
#[prost(bool)]
#[serde(default)]
pub write_in: bool,
/// Score has different meanings depending on the tally type:
/// STV, Condorcet, Borda and Schulze: `score` means candidate rank, where a zero is the best rank that can be assigned to a candidate.
/// Score: `score` is the points assinged to this candidate. Zero is the worst score that can be asssigned to a candidate.
/// Plurality, Approval, and InstantRunoff: `score` is meaningless and has no effect.
#[prost(uint32)]
#[serde(default)]
pub score: u32,
/// Known candidate-id or free-form text, depending on the value of the `write_in` field.
#[prost(string)]
pub selection: String,
}
impl Into<(String, u64)> for Selection {
fn into(self) -> (String, u64) {
(self.selection, self.score as u64)
}
}
impl Into<(String, u32)> for Selection {
fn into(self) -> (String, u32) {
(self.selection, self.score)
}
}
| Ballot | identifier_name |
universe.rs | /*!
# RS Mate Poe: Universe
*/
use crate::{
Frame,
Position,
State,
};
#[cfg(feature = "director")] use crate::{Animation, dom};
use std::sync::atomic::{
AtomicU8,
AtomicU32,
AtomicU64,
Ordering::SeqCst,
};
#[cfg(feature = "director")] use std::sync::atomic::AtomicU16;
#[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
extern "C" {
#[allow(unsafe_code)]
#[wasm_bindgen(js_namespace = Math, js_name = "random")]
/// # Math.random.
///
/// This is the only `js_sys` thing we'd be using, so may as well handle
/// the import manually.
fn js_random() -> f64;
}
/// # Flags.
///
/// This holds a few basic bitflag runtime settings. (See the constants defined
/// on the [`Universe`] below.)
static FLAGS: AtomicU8 = AtomicU8::new(Universe::AUDIO);
#[cfg(feature = "director")]
/// # Next Animation.
///
/// This holds the `u8` equivalent of an animation requested from Browserland,
/// if any. Because those begin at `1`, zero is equivalent to none.
static NEXT_ANIMATION: AtomicU8 = AtomicU8::new(0);
/// # Mouse Coordinates.
///
/// This holds the (x, y) mouse coordinates captured while dragging.
///
/// The logical values for each are `i32`, but because they're always accessed
/// or updated as a pair, they're stored within a single 64-bit atomic.
static POS: AtomicU64 = AtomicU64::new(0);
/// # Xoshi Seed #1.
static SEED1: AtomicU64 = AtomicU64::new(0x8596_cc44_bef0_1aa0);
/// # Xoshi Seed #2.
static SEED2: AtomicU64 = AtomicU64::new(0x98d4_0948_da60_19ae);
/// # Xoshi Seed #3.
static SEED3: AtomicU64 = AtomicU64::new(0x49f1_3013_c503_a6aa);
/// # Xoshi Seed #4.
static SEED4: AtomicU64 = AtomicU64::new(0xc4d7_82ff_3c9f_7bef);
#[cfg(feature = "director")]
/// # Speed.
///
/// This holds the playback speed as an integer percentage in the range of
/// `0..=1000`, where `100` is normal.
static SPEED: AtomicU16 = AtomicU16::new(100);
/// # Screen Width and Height.
///
/// The dimensions are both `u16`, stored together because they're only ever
/// accessed/updated as a pair.
static SIZE: AtomicU32 = AtomicU32::new(0);
#[derive(Debug, Clone, Copy)]
/// # Universe.
///
/// This struct holds global settings that can be statically accessed from
/// anywhere within the application, mimicking Javascript's slutty inheritance
/// model, but safely because all of the data is atomic.
///
/// The [`Poe`](crate::Poe) struct exposes some of these settings — start/stop,
/// audio, speed — to the end user, but the rest are fully closed off.
pub(crate) struct Universe;
impl Universe {
const ACTIVE: u8 = 0b0000_0001; // Poe is active.
const AUDIO: u8 = 0b0000_0010; // Audio is enabled.
const DRAGGING: u8 = 0b0000_0100; // Poe is currently being dragged.
const ASSIGN_CHILD: u8 = 0b0000_1000; // The primary mate needs a child animation.
const NO_CHILD: u8 = 0b0001_0000; // Children must be stopped!
const NO_FOCUS: u8 = 0b0010_0000; // Disable primary mate focus support.
const STATE: u8 = 0b0100_0000; // State is active.
#[cfg(feature = "firefox")]
const FIX_BINDINGS: u8 = 0b1000_0000; // Body element bindings were lost.
}
macro_rules! get {
($title:literal, $flag:ident, $fn:ident) => (
#[doc = concat!("# Is ", $title, "?")]
#[inline]
pub(crate) fn $fn() -> bool {
Self::$flag == FLAGS.load(SeqCst) & Self::$flag
}
);
}
impl Universe {
get!("Active", ACTIVE, active);
get!("Audio Enabled", AUDIO, audio);
get!("Dragging", DRAGGING, dragging);
get!("No Focus Allowed", NO_FOCUS, no_focus);
/// # Assign Child Animation?
///
/// Returns `true` if the previous mate requested a new child since the
/// last time this method was called.
pub(crate) fn assign_child() -> bool {
| [cfg(feature = "firefox")]
/// # Fix Element Bindings?
///
/// Returns `true` if one or both elements seem to have disappeared from
/// the document body since the last time this method was called.
pub(crate) fn fix_bindings() -> bool {
let old = FLAGS.fetch_and(! Self::FIX_BINDINGS, SeqCst);
let expected = Self::FIX_BINDINGS | Self::ACTIVE;
expected == old & expected
}
/// # Stop Child Animations?
///
/// Returns `true` if the previous mate requested the end to childhood.
pub(crate) fn no_child() -> bool {
let old = FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
Self::NO_CHILD == old & Self::NO_CHILD
}
#[cfg(feature = "director")]
#[inline]
/// # Are We Paused?
pub(crate) fn paused() -> bool { SPEED.load(SeqCst) == 0 }
#[cfg(not(feature = "director"))]
/// # We Aren't Paused.
pub(crate) const fn paused() -> bool { false }
/// # Position.
///
/// The current — or last recorded — X/Y position of the mouse on the
/// screen.
///
/// This information is only captured when the primary Poe mate is being
/// dragged, so will otherwise grow stale.
pub(crate) fn pos() -> Position {
let pos = POS.load(SeqCst).to_le_bytes();
let x = i32::from_le_bytes([pos[0], pos[1], pos[2], pos[3]]);
let y = i32::from_le_bytes([pos[4], pos[5], pos[6], pos[7]]);
Position::new(x, y)
}
/// # Width/Height.
///
/// Returns the current — or last recorded — dimensions of the screen.
///
/// These are captured when the universe is first initialized and refreshed
/// whenever the window is resized, but will grow stale when Poe has been
/// de-activated.
pub(crate) fn size() -> (u16, u16) {
let size = SIZE.load(SeqCst).to_le_bytes();
let width = u16::from_le_bytes([size[0], size[1]]);
let height = u16::from_le_bytes([size[2], size[3]]);
match (width, height) {
(0, 0) => (1, 1),
(0, h) => (1, h),
(w, 0) => (w, 1),
(w, h) => (w, h),
}
}
}
impl Universe {
#[inline]
/// # Random Value.
///
/// Return a random `u64` (xoshiro256).
pub(crate) fn rand() -> u64 {
let mut seeds = get_seeds();
let out = seeds[1].overflowing_mul(5).0.rotate_left(7).overflowing_mul(9).0;
update_seeds(&mut seeds);
set_seeds(&seeds);
out
}
#[allow(clippy::cast_possible_truncation)]
/// # Random (Capped) U16.
///
/// Return a random number between `0..max`, mitigating bias the same way
/// as `fastrand` (i.e. <https://lemire.me/blog/2016/06/30/fast-random-shuffling/>).
pub(crate) fn rand_mod(n: u16) -> u16 {
let mut r = Self::rand() as u16;
let mut hi = mul_high_u16(r, n);
let mut lo = r.wrapping_mul(n);
if lo < n {
let t = n.wrapping_neg() % n;
while lo < t {
r = Self::rand() as u16;
hi = mul_high_u16(r, n);
lo = r.wrapping_mul(n);
}
}
hi
}
}
macro_rules! set {
($title:literal, $flag:ident, $fn:ident) => (
#[doc = concat!("# Set ", $title, ".")]
pub(crate) fn $fn(v: bool) {
if v { FLAGS.fetch_or(Self::$flag, SeqCst); }
else { FLAGS.fetch_and(! Self::$flag, SeqCst); }
}
);
}
impl Universe {
set!("Allow Audio", AUDIO, set_audio);
set!("Dragging", DRAGGING, set_dragging);
set!("State", STATE, set_state);
/// # Set Active.
///
/// Enable or disable the universe (and Poe, etc.), returning `true` if
/// different than the previous state.
pub(crate) fn set_active(v: bool) -> bool {
if v == (0!= FLAGS.load(SeqCst) & (Self::ACTIVE | Self::STATE)) { false }
else {
if v {
// Set active flag.
FLAGS.fetch_or(Self::ACTIVE, SeqCst);
// Seed future randomness if we can.
#[cfg(target_arch = "wasm32")] reseed();
// Set up the DOM elements and event bindings, and begin the
// animation frame loop.
State::init();
}
else {
// Clear everything but the audio, focus, and state properties.
// (State will clear itself in a moment, hopefully.)
FLAGS.fetch_and(Self::AUDIO | Self::NO_FOCUS | Self::STATE, SeqCst);
}
true
}
}
/// # Set Assign Child Flag.
///
/// This will also remove the incompatible no-child flag.
pub(crate) fn set_assign_child() {
if Self::NO_CHILD == FLAGS.fetch_or(Self::ASSIGN_CHILD, SeqCst) & Self::NO_CHILD {
FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
}
}
#[cfg(feature = "firefox")]
/// # Require Element Re-Binding.
pub(crate) fn set_fix_bindings() {
if Self::active() { FLAGS.fetch_or(Self::FIX_BINDINGS, SeqCst); }
}
/// # Set No Child Flag.
///
/// This will also remove the incompatible assign-child flag.
pub(crate) fn set_no_child() {
if Self::ASSIGN_CHILD == FLAGS.fetch_or(Self::NO_CHILD, SeqCst) & Self::ASSIGN_CHILD {
FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
}
}
/// # Set No Focus.
///
/// If true, this will also disable dragging, since there wouldn't be
/// any way to undrag.
pub(crate) fn set_no_focus(v: bool) {
if v {
if Self::DRAGGING == FLAGS.fetch_or(Self::NO_FOCUS, SeqCst) & Self::DRAGGING {
FLAGS.fetch_and(! Self::DRAGGING, SeqCst);
}
}
else { FLAGS.fetch_and(! Self::NO_FOCUS, SeqCst); }
}
/// # Set Position.
///
/// Update the cached X/Y mouse coordinates, only used when dragging a
/// Poe around the screen.
pub(crate) fn set_pos(x: i32, y: i32) {
let half_tile = Frame::SIZE_I.saturating_div(2);
let x = x.saturating_sub(half_tile).to_le_bytes();
let y = y.saturating_sub(half_tile).to_le_bytes();
let pos = u64::from_le_bytes([
x[0], x[1], x[2], x[3],
y[0], y[1], y[2], y[3],
]);
POS.store(pos, SeqCst);
}
/// # Set Width/Height.
///
/// This updates the cached window dimensions.
pub(crate) fn set_size(width: u16, height: u16) {
let width = width.to_le_bytes();
let height = height.to_le_bytes();
SIZE.store(u32::from_le_bytes([width[0], width[1], height[0], height[1]]), SeqCst);
}
}
#[cfg(feature = "director")]
impl Universe {
/// # Speed.
///
/// Returns the current playback speed if other than "normal" or paused.
pub(crate) fn speed() -> Option<f32> {
let speed = SPEED.load(SeqCst);
if speed == 0 || speed == 100 { None }
else { Some(f32::from(speed) / 100.0) }
}
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
/// # Set Speed.
///
/// Change the animation playback speed.
pub(crate) fn set_speed(speed: f32) {
// Clamp the range to something sane.
let speed =
if speed.is_nan() { 100.0 }
else { (speed * 100.0).clamp(0.0, 1000.0) };
// Store as an integer.
SPEED.store(speed as u16, SeqCst);
#[cfg(feature = "director")] dom::console_debug(&format!(
"Playback Speed: {speed:.2}%"
));
}
/// # Browserland Next Animation.
///
/// This returns (and clears) the animation set by `Poe.play`, if any.
pub(crate) fn next_animation() -> Option<Animation> {
Animation::from_u8(NEXT_ANIMATION.swap(0, SeqCst)).filter(|a| a.playable())
}
/// # Set Browserland Next Animation.
///
/// `Poe.play` uses this to manually override the primary mate's current
/// animation.
pub(crate) fn set_next_animation(next: u8) {
NEXT_ANIMATION.store(next, SeqCst);
}
}
#[inline]
/// # Get Seeds.
fn get_seeds() -> [u64; 4] {
[
SEED1.load(SeqCst),
SEED2.load(SeqCst),
SEED3.load(SeqCst),
SEED4.load(SeqCst),
]
}
#[inline]
/// # High 16 Product.
const fn mul_high_u16(a: u16, b: u16) -> u16 {
(((a as u32) * (b as u32)) >> 16) as u16
}
#[cfg(target_arch = "wasm32")]
/// # Reseed Randomness.
fn reseed() {
// Splitmix Math.random to give us a reasonable starting point for the
// subsequent Xoshi randomness.
let mut seed: u64 = js_random().to_bits();
let mut seeds = [0_u64; 4];
for i in &mut seeds { *i = splitmix(&mut seed); }
set_seeds(&seeds);
// Print a debug message if we care about that sort of thing.
#[cfg(feature = "director")]
dom::console_debug(&format!(
"PNRG1: {:016x}\nPNRG2: {:016x}\nPNRG3: {:016x}\nPNRG4: {:016x}",
seeds[0],
seeds[1],
seeds[2],
seeds[3],
));
}
/// # Set Seeds.
fn set_seeds(seeds: &[u64; 4]) {
// We are unlikely to wind up with all zeroes, but just in case…
if seeds[0] == 0 && seeds[1] == 0 && seeds[2] == 0 && seeds[3] == 0 {
SEED1.store(0x8596_cc44_bef0_1aa0, SeqCst);
SEED2.store(0x98d4_0948_da60_19ae, SeqCst);
SEED3.store(0x49f1_3013_c503_a6aa, SeqCst);
SEED4.store(0xc4d7_82ff_3c9f_7bef, SeqCst);
}
else {
SEED1.store(seeds[0], SeqCst);
SEED2.store(seeds[1], SeqCst);
SEED3.store(seeds[2], SeqCst);
SEED4.store(seeds[3], SeqCst);
}
}
/// # Update Seeds.
fn update_seeds(seeds: &mut[u64; 4]) {
let t = seeds[1] << 17;
seeds[2] ^= seeds[0];
seeds[3] ^= seeds[1];
seeds[1] ^= seeds[2];
seeds[0] ^= seeds[3];
seeds[2] ^= t;
seeds[3] = seeds[3].rotate_left(45);
}
#[cfg(target_arch = "wasm32")]
/// # Split/Mix.
///
/// This is used to generate our Xoshi256 seeds from a single source `u64`.
fn splitmix(seed: &mut u64) -> u64 {
// Update the source seed.
*seed = seed.overflowing_add(0x9e37_79b9_7f4a_7c15).0;
// Calculate and return a random value.
let mut z: u64 = (*seed ^ (*seed >> 30)).overflowing_mul(0xbf58_476d_1ce4_e5b9).0;
z = (z ^ (z >> 27)).overflowing_mul(0x94d0_49bb_1331_11eb).0;
z ^ (z >> 31)
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
#[test]
fn t_rand() {
assert_eq!(Universe::rand_mod(0), 0, "Random zero broke!");
let set = (0..5000_u16).into_iter()
.map(|_| Universe::rand_mod(100))
.collect::<HashSet<u16>>();
assert!(set.iter().all(|n| *n < 100), "Value(s) out of range.");
assert_eq!(
set.len(),
100,
"Failed to collect 100/100 possibilities in 5000 tries."
);
}
}
| let old = FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
Self::ASSIGN_CHILD == old & Self::ASSIGN_CHILD
}
# | identifier_body |
universe.rs | /*!
# RS Mate Poe: Universe
*/
use crate::{
Frame,
Position,
State,
};
#[cfg(feature = "director")] use crate::{Animation, dom};
use std::sync::atomic::{
AtomicU8,
AtomicU32,
AtomicU64,
Ordering::SeqCst,
};
#[cfg(feature = "director")] use std::sync::atomic::AtomicU16;
#[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
extern "C" {
#[allow(unsafe_code)]
#[wasm_bindgen(js_namespace = Math, js_name = "random")]
/// # Math.random.
///
/// This is the only `js_sys` thing we'd be using, so may as well handle
/// the import manually.
fn js_random() -> f64;
}
/// # Flags.
///
/// This holds a few basic bitflag runtime settings. (See the constants defined
/// on the [`Universe`] below.)
static FLAGS: AtomicU8 = AtomicU8::new(Universe::AUDIO);
#[cfg(feature = "director")]
/// # Next Animation.
///
/// This holds the `u8` equivalent of an animation requested from Browserland,
/// if any. Because those begin at `1`, zero is equivalent to none.
static NEXT_ANIMATION: AtomicU8 = AtomicU8::new(0);
/// # Mouse Coordinates.
///
/// This holds the (x, y) mouse coordinates captured while dragging.
///
/// The logical values for each are `i32`, but because they're always accessed
/// or updated as a pair, they're stored within a single 64-bit atomic.
static POS: AtomicU64 = AtomicU64::new(0);
/// # Xoshi Seed #1.
static SEED1: AtomicU64 = AtomicU64::new(0x8596_cc44_bef0_1aa0);
/// # Xoshi Seed #2.
static SEED2: AtomicU64 = AtomicU64::new(0x98d4_0948_da60_19ae);
/// # Xoshi Seed #3.
static SEED3: AtomicU64 = AtomicU64::new(0x49f1_3013_c503_a6aa);
/// # Xoshi Seed #4.
static SEED4: AtomicU64 = AtomicU64::new(0xc4d7_82ff_3c9f_7bef);
#[cfg(feature = "director")]
/// # Speed.
///
/// This holds the playback speed as an integer percentage in the range of
/// `0..=1000`, where `100` is normal.
static SPEED: AtomicU16 = AtomicU16::new(100);
/// # Screen Width and Height.
///
/// The dimensions are both `u16`, stored together because they're only ever
/// accessed/updated as a pair.
static SIZE: AtomicU32 = AtomicU32::new(0);
#[derive(Debug, Clone, Copy)]
/// # Universe.
///
/// This struct holds global settings that can be statically accessed from
/// anywhere within the application, mimicking Javascript's slutty inheritance
/// model, but safely because all of the data is atomic.
///
/// The [`Poe`](crate::Poe) struct exposes some of these settings — start/stop,
/// audio, speed — to the end user, but the rest are fully closed off.
pub(crate) struct Universe;
impl Universe {
const ACTIVE: u8 = 0b0000_0001; // Poe is active.
const AUDIO: u8 = 0b0000_0010; // Audio is enabled.
const DRAGGING: u8 = 0b0000_0100; // Poe is currently being dragged.
const ASSIGN_CHILD: u8 = 0b0000_1000; // The primary mate needs a child animation.
const NO_CHILD: u8 = 0b0001_0000; // Children must be stopped!
const NO_FOCUS: u8 = 0b0010_0000; // Disable primary mate focus support.
const STATE: u8 = 0b0100_0000; // State is active.
#[cfg(feature = "firefox")]
const FIX_BINDINGS: u8 = 0b1000_0000; // Body element bindings were lost.
}
macro_rules! get {
($title:literal, $flag:ident, $fn:ident) => (
#[doc = concat!("# Is ", $title, "?")]
#[inline]
pub(crate) fn $fn() -> bool {
Self::$flag == FLAGS.load(SeqCst) & Self::$flag
}
);
}
impl Universe {
get!("Active", ACTIVE, active);
get!("Audio Enabled", AUDIO, audio);
get!("Dragging", DRAGGING, dragging);
get!("No Focus Allowed", NO_FOCUS, no_focus);
/// # Assign Child Animation?
///
/// Returns `true` if the previous mate requested a new child since the
/// last time this method was called.
pub(crate) fn assi | > bool {
let old = FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
Self::ASSIGN_CHILD == old & Self::ASSIGN_CHILD
}
#[cfg(feature = "firefox")]
/// # Fix Element Bindings?
///
/// Returns `true` if one or both elements seem to have disappeared from
/// the document body since the last time this method was called.
pub(crate) fn fix_bindings() -> bool {
let old = FLAGS.fetch_and(! Self::FIX_BINDINGS, SeqCst);
let expected = Self::FIX_BINDINGS | Self::ACTIVE;
expected == old & expected
}
/// # Stop Child Animations?
///
/// Returns `true` if the previous mate requested the end to childhood.
pub(crate) fn no_child() -> bool {
let old = FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
Self::NO_CHILD == old & Self::NO_CHILD
}
#[cfg(feature = "director")]
#[inline]
/// # Are We Paused?
pub(crate) fn paused() -> bool { SPEED.load(SeqCst) == 0 }
#[cfg(not(feature = "director"))]
/// # We Aren't Paused.
pub(crate) const fn paused() -> bool { false }
/// # Position.
///
/// The current — or last recorded — X/Y position of the mouse on the
/// screen.
///
/// This information is only captured when the primary Poe mate is being
/// dragged, so will otherwise grow stale.
pub(crate) fn pos() -> Position {
let pos = POS.load(SeqCst).to_le_bytes();
let x = i32::from_le_bytes([pos[0], pos[1], pos[2], pos[3]]);
let y = i32::from_le_bytes([pos[4], pos[5], pos[6], pos[7]]);
Position::new(x, y)
}
/// # Width/Height.
///
/// Returns the current — or last recorded — dimensions of the screen.
///
/// These are captured when the universe is first initialized and refreshed
/// whenever the window is resized, but will grow stale when Poe has been
/// de-activated.
pub(crate) fn size() -> (u16, u16) {
let size = SIZE.load(SeqCst).to_le_bytes();
let width = u16::from_le_bytes([size[0], size[1]]);
let height = u16::from_le_bytes([size[2], size[3]]);
match (width, height) {
(0, 0) => (1, 1),
(0, h) => (1, h),
(w, 0) => (w, 1),
(w, h) => (w, h),
}
}
}
impl Universe {
#[inline]
/// # Random Value.
///
/// Return a random `u64` (xoshiro256).
pub(crate) fn rand() -> u64 {
let mut seeds = get_seeds();
let out = seeds[1].overflowing_mul(5).0.rotate_left(7).overflowing_mul(9).0;
update_seeds(&mut seeds);
set_seeds(&seeds);
out
}
#[allow(clippy::cast_possible_truncation)]
/// # Random (Capped) U16.
///
/// Return a random number between `0..max`, mitigating bias the same way
/// as `fastrand` (i.e. <https://lemire.me/blog/2016/06/30/fast-random-shuffling/>).
pub(crate) fn rand_mod(n: u16) -> u16 {
let mut r = Self::rand() as u16;
let mut hi = mul_high_u16(r, n);
let mut lo = r.wrapping_mul(n);
if lo < n {
let t = n.wrapping_neg() % n;
while lo < t {
r = Self::rand() as u16;
hi = mul_high_u16(r, n);
lo = r.wrapping_mul(n);
}
}
hi
}
}
macro_rules! set {
($title:literal, $flag:ident, $fn:ident) => (
#[doc = concat!("# Set ", $title, ".")]
pub(crate) fn $fn(v: bool) {
if v { FLAGS.fetch_or(Self::$flag, SeqCst); }
else { FLAGS.fetch_and(! Self::$flag, SeqCst); }
}
);
}
impl Universe {
set!("Allow Audio", AUDIO, set_audio);
set!("Dragging", DRAGGING, set_dragging);
set!("State", STATE, set_state);
/// # Set Active.
///
/// Enable or disable the universe (and Poe, etc.), returning `true` if
/// different than the previous state.
pub(crate) fn set_active(v: bool) -> bool {
if v == (0!= FLAGS.load(SeqCst) & (Self::ACTIVE | Self::STATE)) { false }
else {
if v {
// Set active flag.
FLAGS.fetch_or(Self::ACTIVE, SeqCst);
// Seed future randomness if we can.
#[cfg(target_arch = "wasm32")] reseed();
// Set up the DOM elements and event bindings, and begin the
// animation frame loop.
State::init();
}
else {
// Clear everything but the audio, focus, and state properties.
// (State will clear itself in a moment, hopefully.)
FLAGS.fetch_and(Self::AUDIO | Self::NO_FOCUS | Self::STATE, SeqCst);
}
true
}
}
/// # Set Assign Child Flag.
///
/// This will also remove the incompatible no-child flag.
pub(crate) fn set_assign_child() {
if Self::NO_CHILD == FLAGS.fetch_or(Self::ASSIGN_CHILD, SeqCst) & Self::NO_CHILD {
FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
}
}
#[cfg(feature = "firefox")]
/// # Require Element Re-Binding.
pub(crate) fn set_fix_bindings() {
if Self::active() { FLAGS.fetch_or(Self::FIX_BINDINGS, SeqCst); }
}
/// # Set No Child Flag.
///
/// This will also remove the incompatible assign-child flag.
pub(crate) fn set_no_child() {
if Self::ASSIGN_CHILD == FLAGS.fetch_or(Self::NO_CHILD, SeqCst) & Self::ASSIGN_CHILD {
FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
}
}
/// # Set No Focus.
///
/// If true, this will also disable dragging, since there wouldn't be
/// any way to undrag.
pub(crate) fn set_no_focus(v: bool) {
if v {
if Self::DRAGGING == FLAGS.fetch_or(Self::NO_FOCUS, SeqCst) & Self::DRAGGING {
FLAGS.fetch_and(! Self::DRAGGING, SeqCst);
}
}
else { FLAGS.fetch_and(! Self::NO_FOCUS, SeqCst); }
}
/// # Set Position.
///
/// Update the cached X/Y mouse coordinates, only used when dragging a
/// Poe around the screen.
pub(crate) fn set_pos(x: i32, y: i32) {
let half_tile = Frame::SIZE_I.saturating_div(2);
let x = x.saturating_sub(half_tile).to_le_bytes();
let y = y.saturating_sub(half_tile).to_le_bytes();
let pos = u64::from_le_bytes([
x[0], x[1], x[2], x[3],
y[0], y[1], y[2], y[3],
]);
POS.store(pos, SeqCst);
}
/// # Set Width/Height.
///
/// This updates the cached window dimensions.
pub(crate) fn set_size(width: u16, height: u16) {
let width = width.to_le_bytes();
let height = height.to_le_bytes();
SIZE.store(u32::from_le_bytes([width[0], width[1], height[0], height[1]]), SeqCst);
}
}
#[cfg(feature = "director")]
impl Universe {
/// # Speed.
///
/// Returns the current playback speed if other than "normal" or paused.
pub(crate) fn speed() -> Option<f32> {
let speed = SPEED.load(SeqCst);
if speed == 0 || speed == 100 { None }
else { Some(f32::from(speed) / 100.0) }
}
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
/// # Set Speed.
///
/// Change the animation playback speed.
pub(crate) fn set_speed(speed: f32) {
// Clamp the range to something sane.
let speed =
if speed.is_nan() { 100.0 }
else { (speed * 100.0).clamp(0.0, 1000.0) };
// Store as an integer.
SPEED.store(speed as u16, SeqCst);
#[cfg(feature = "director")] dom::console_debug(&format!(
"Playback Speed: {speed:.2}%"
));
}
/// # Browserland Next Animation.
///
/// This returns (and clears) the animation set by `Poe.play`, if any.
pub(crate) fn next_animation() -> Option<Animation> {
Animation::from_u8(NEXT_ANIMATION.swap(0, SeqCst)).filter(|a| a.playable())
}
/// # Set Browserland Next Animation.
///
/// `Poe.play` uses this to manually override the primary mate's current
/// animation.
pub(crate) fn set_next_animation(next: u8) {
NEXT_ANIMATION.store(next, SeqCst);
}
}
#[inline]
/// # Get Seeds.
fn get_seeds() -> [u64; 4] {
[
SEED1.load(SeqCst),
SEED2.load(SeqCst),
SEED3.load(SeqCst),
SEED4.load(SeqCst),
]
}
#[inline]
/// # High 16 Product.
const fn mul_high_u16(a: u16, b: u16) -> u16 {
(((a as u32) * (b as u32)) >> 16) as u16
}
#[cfg(target_arch = "wasm32")]
/// # Reseed Randomness.
fn reseed() {
// Splitmix Math.random to give us a reasonable starting point for the
// subsequent Xoshi randomness.
let mut seed: u64 = js_random().to_bits();
let mut seeds = [0_u64; 4];
for i in &mut seeds { *i = splitmix(&mut seed); }
set_seeds(&seeds);
// Print a debug message if we care about that sort of thing.
#[cfg(feature = "director")]
dom::console_debug(&format!(
"PNRG1: {:016x}\nPNRG2: {:016x}\nPNRG3: {:016x}\nPNRG4: {:016x}",
seeds[0],
seeds[1],
seeds[2],
seeds[3],
));
}
/// # Set Seeds.
fn set_seeds(seeds: &[u64; 4]) {
// We are unlikely to wind up with all zeroes, but just in case…
if seeds[0] == 0 && seeds[1] == 0 && seeds[2] == 0 && seeds[3] == 0 {
SEED1.store(0x8596_cc44_bef0_1aa0, SeqCst);
SEED2.store(0x98d4_0948_da60_19ae, SeqCst);
SEED3.store(0x49f1_3013_c503_a6aa, SeqCst);
SEED4.store(0xc4d7_82ff_3c9f_7bef, SeqCst);
}
else {
SEED1.store(seeds[0], SeqCst);
SEED2.store(seeds[1], SeqCst);
SEED3.store(seeds[2], SeqCst);
SEED4.store(seeds[3], SeqCst);
}
}
/// # Update Seeds.
fn update_seeds(seeds: &mut[u64; 4]) {
let t = seeds[1] << 17;
seeds[2] ^= seeds[0];
seeds[3] ^= seeds[1];
seeds[1] ^= seeds[2];
seeds[0] ^= seeds[3];
seeds[2] ^= t;
seeds[3] = seeds[3].rotate_left(45);
}
#[cfg(target_arch = "wasm32")]
/// # Split/Mix.
///
/// This is used to generate our Xoshi256 seeds from a single source `u64`.
fn splitmix(seed: &mut u64) -> u64 {
// Update the source seed.
*seed = seed.overflowing_add(0x9e37_79b9_7f4a_7c15).0;
// Calculate and return a random value.
let mut z: u64 = (*seed ^ (*seed >> 30)).overflowing_mul(0xbf58_476d_1ce4_e5b9).0;
z = (z ^ (z >> 27)).overflowing_mul(0x94d0_49bb_1331_11eb).0;
z ^ (z >> 31)
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
#[test]
fn t_rand() {
assert_eq!(Universe::rand_mod(0), 0, "Random zero broke!");
let set = (0..5000_u16).into_iter()
.map(|_| Universe::rand_mod(100))
.collect::<HashSet<u16>>();
assert!(set.iter().all(|n| *n < 100), "Value(s) out of range.");
assert_eq!(
set.len(),
100,
"Failed to collect 100/100 possibilities in 5000 tries."
);
}
}
| gn_child() - | identifier_name |
universe.rs | /*!
# RS Mate Poe: Universe
*/
use crate::{
Frame,
Position,
State,
};
#[cfg(feature = "director")] use crate::{Animation, dom};
use std::sync::atomic::{
AtomicU8,
AtomicU32,
AtomicU64,
Ordering::SeqCst,
};
#[cfg(feature = "director")] use std::sync::atomic::AtomicU16;
#[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
extern "C" {
#[allow(unsafe_code)]
#[wasm_bindgen(js_namespace = Math, js_name = "random")]
/// # Math.random.
///
/// This is the only `js_sys` thing we'd be using, so may as well handle
/// the import manually.
fn js_random() -> f64;
}
/// # Flags.
///
/// This holds a few basic bitflag runtime settings. (See the constants defined
/// on the [`Universe`] below.)
static FLAGS: AtomicU8 = AtomicU8::new(Universe::AUDIO);
#[cfg(feature = "director")]
/// # Next Animation.
///
/// This holds the `u8` equivalent of an animation requested from Browserland,
/// if any. Because those begin at `1`, zero is equivalent to none.
static NEXT_ANIMATION: AtomicU8 = AtomicU8::new(0);
/// # Mouse Coordinates.
///
/// This holds the (x, y) mouse coordinates captured while dragging.
///
/// The logical values for each are `i32`, but because they're always accessed
/// or updated as a pair, they're stored within a single 64-bit atomic.
static POS: AtomicU64 = AtomicU64::new(0);
/// # Xoshi Seed #1.
static SEED1: AtomicU64 = AtomicU64::new(0x8596_cc44_bef0_1aa0);
/// # Xoshi Seed #2.
static SEED2: AtomicU64 = AtomicU64::new(0x98d4_0948_da60_19ae);
/// # Xoshi Seed #3.
static SEED3: AtomicU64 = AtomicU64::new(0x49f1_3013_c503_a6aa);
/// # Xoshi Seed #4.
static SEED4: AtomicU64 = AtomicU64::new(0xc4d7_82ff_3c9f_7bef);
#[cfg(feature = "director")]
/// # Speed.
///
/// This holds the playback speed as an integer percentage in the range of
/// `0..=1000`, where `100` is normal.
static SPEED: AtomicU16 = AtomicU16::new(100);
/// # Screen Width and Height.
///
/// The dimensions are both `u16`, stored together because they're only ever
/// accessed/updated as a pair.
static SIZE: AtomicU32 = AtomicU32::new(0);
#[derive(Debug, Clone, Copy)]
/// # Universe.
///
/// This struct holds global settings that can be statically accessed from
/// anywhere within the application, mimicking Javascript's slutty inheritance
/// model, but safely because all of the data is atomic.
///
/// The [`Poe`](crate::Poe) struct exposes some of these settings — start/stop,
/// audio, speed — to the end user, but the rest are fully closed off.
pub(crate) struct Universe;
impl Universe {
const ACTIVE: u8 = 0b0000_0001; // Poe is active.
const AUDIO: u8 = 0b0000_0010; // Audio is enabled.
const DRAGGING: u8 = 0b0000_0100; // Poe is currently being dragged.
const ASSIGN_CHILD: u8 = 0b0000_1000; // The primary mate needs a child animation.
const NO_CHILD: u8 = 0b0001_0000; // Children must be stopped!
const NO_FOCUS: u8 = 0b0010_0000; // Disable primary mate focus support.
const STATE: u8 = 0b0100_0000; // State is active.
#[cfg(feature = "firefox")]
const FIX_BINDINGS: u8 = 0b1000_0000; // Body element bindings were lost.
}
macro_rules! get {
($title:literal, $flag:ident, $fn:ident) => (
#[doc = concat!("# Is ", $title, "?")]
#[inline]
pub(crate) fn $fn() -> bool {
Self::$flag == FLAGS.load(SeqCst) & Self::$flag
}
);
}
impl Universe {
get!("Active", ACTIVE, active);
get!("Audio Enabled", AUDIO, audio);
get!("Dragging", DRAGGING, dragging);
get!("No Focus Allowed", NO_FOCUS, no_focus);
/// # Assign Child Animation?
///
/// Returns `true` if the previous mate requested a new child since the
/// last time this method was called.
pub(crate) fn assign_child() -> bool {
let old = FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
Self::ASSIGN_CHILD == old & Self::ASSIGN_CHILD
}
#[cfg(feature = "firefox")]
/// # Fix Element Bindings?
///
/// Returns `true` if one or both elements seem to have disappeared from
/// the document body since the last time this method was called.
pub(crate) fn fix_bindings() -> bool {
let old = FLAGS.fetch_and(! Self::FIX_BINDINGS, SeqCst);
let expected = Self::FIX_BINDINGS | Self::ACTIVE;
expected == old & expected
}
/// # Stop Child Animations?
///
/// Returns `true` if the previous mate requested the end to childhood.
pub(crate) fn no_child() -> bool {
let old = FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
Self::NO_CHILD == old & Self::NO_CHILD
}
#[cfg(feature = "director")]
#[inline]
/// # Are We Paused?
pub(crate) fn paused() -> bool { SPEED.load(SeqCst) == 0 }
#[cfg(not(feature = "director"))]
/// # We Aren't Paused.
pub(crate) const fn paused() -> bool { false }
/// # Position.
///
/// The current — or last recorded — X/Y position of the mouse on the
/// screen.
///
/// This information is only captured when the primary Poe mate is being
/// dragged, so will otherwise grow stale.
pub(crate) fn pos() -> Position {
let pos = POS.load(SeqCst).to_le_bytes();
let x = i32::from_le_bytes([pos[0], pos[1], pos[2], pos[3]]);
let y = i32::from_le_bytes([pos[4], pos[5], pos[6], pos[7]]);
Position::new(x, y)
}
/// # Width/Height.
///
/// Returns the current — or last recorded — dimensions of the screen.
///
/// These are captured when the universe is first initialized and refreshed
/// whenever the window is resized, but will grow stale when Poe has been
/// de-activated.
pub(crate) fn size() -> (u16, u16) {
let size = SIZE.load(SeqCst).to_le_bytes();
let width = u16::from_le_bytes([size[0], size[1]]);
let height = u16::from_le_bytes([size[2], size[3]]);
match (width, height) {
(0, 0) => (1, 1),
(0, h) => (1, h),
(w, 0) => (w, 1),
(w, h) => (w, h),
}
}
}
impl Universe {
#[inline]
/// # Random Value.
///
/// Return a random `u64` (xoshiro256).
pub(crate) fn rand() -> u64 {
let mut seeds = get_seeds();
let out = seeds[1].overflowing_mul(5).0.rotate_left(7).overflowing_mul(9).0;
update_seeds(&mut seeds);
set_seeds(&seeds);
out
}
#[allow(clippy::cast_possible_truncation)]
/// # Random (Capped) U16.
///
/// Return a random number between `0..max`, mitigating bias the same way
/// as `fastrand` (i.e. <https://lemire.me/blog/2016/06/30/fast-random-shuffling/>).
pub(crate) fn rand_mod(n: u16) -> u16 {
let mut r = Self::rand() as u16;
let mut hi = mul_high_u16(r, n);
let mut lo = r.wrapping_mul(n);
if lo < n {
let t = n.wrapping_neg() % n;
while lo < t {
r = Self::rand() as u16;
hi = mul_high_u16(r, n);
lo = r.wrapping_mul(n);
}
}
hi
}
}
macro_rules! set {
($title:literal, $flag:ident, $fn:ident) => (
#[doc = concat!("# Set ", $title, ".")]
pub(crate) fn $fn(v: bool) {
if v { FLAGS.fetch_or(Self::$flag, SeqCst); }
else { FLAGS.fetch_and(! Self::$flag, SeqCst); }
}
);
}
impl Universe {
set!("Allow Audio", AUDIO, set_audio);
set!("Dragging", DRAGGING, set_dragging);
set!("State", STATE, set_state);
/// # Set Active.
///
/// Enable or disable the universe (and Poe, etc.), returning `true` if
/// different than the previous state.
pub(crate) fn set_active(v: bool) -> bool {
if v == (0!= FLAGS.load(SeqCst) & (Self::ACTIVE | Self::STATE)) { false }
else {
if v {
// Set active flag.
FLAGS.fetch_or(Self::ACTIVE, SeqCst);
// Seed future randomness if we can.
#[cfg(target_arch = "wasm32")] reseed();
// Set up the DOM elements and event bindings, and begin the
// animation frame loop.
State::init();
}
else {
// Clear everything but the audio, focus, and state properties.
// (State will clear itself in a moment, hopefully.)
FLAGS.fetch_and(Self::AUDIO | Self::NO_FOCUS | Self::STATE, SeqCst);
}
true
}
}
/// # Set Assign Child Flag.
///
/// This will also remove the incompatible no-child flag.
pub(crate) fn set_assign_child() {
if Self::NO_CHILD == FLAGS.fetch_or(Self::ASSIGN_CHILD, SeqCst) & Self::NO_CHILD {
FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
}
}
#[cfg(feature = "firefox")]
/// # Require Element Re-Binding.
pub(crate) fn set_fix_bindings() {
if Self::active() { FLAGS.fetch_or(Self::FIX_BINDINGS, SeqCst); }
}
/// # Set No Child Flag.
///
/// This will also remove the incompatible assign-child flag.
pub(crate) fn set_no_child() {
if Self::ASSIGN_CHILD == FLAGS.fetch_or(Self::NO_CHILD, SeqCst) & Self::ASSIGN_CHILD {
FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
}
}
/// # Set No Focus.
///
/// If true, this will also disable dragging, since there wouldn't be
/// any way to undrag.
pub(crate) fn set_no_focus(v: bool) {
if v {
if Self::DRAGGING == FLAGS.fetch_or(Self::NO_FOCUS, SeqCst) & Self::DRAGGING {
FLAGS.fetch_and(! Self::DRAGGING, SeqCst);
}
}
else { FLAGS.fetch_and(! Self::NO_FOCUS, SeqCst); }
}
/// # Set Position.
///
/// Update the cached X/Y mouse coordinates, only used when dragging a
/// Poe around the screen.
pub(crate) fn set_pos(x: i32, y: i32) {
let half_tile = Frame::SIZE_I.saturating_div(2);
let x = x.saturating_sub(half_tile).to_le_bytes();
let y = y.saturating_sub(half_tile).to_le_bytes();
let pos = u64::from_le_bytes([
x[0], x[1], x[2], x[3],
y[0], y[1], y[2], y[3],
]);
POS.store(pos, SeqCst);
}
/// # Set Width/Height.
///
/// This updates the cached window dimensions.
pub(crate) fn set_size(width: u16, height: u16) {
let width = width.to_le_bytes();
let height = height.to_le_bytes();
SIZE.store(u32::from_le_bytes([width[0], width[1], height[0], height[1]]), SeqCst);
}
}
#[cfg(feature = "director")]
impl Universe {
/// # Speed.
///
/// Returns the current playback speed if other than "normal" or paused.
pub(crate) fn speed() -> Option<f32> {
let speed = SPEED.load(SeqCst);
if speed == 0 || speed == 100 { None }
else { Some(f32::from(speed) / 100.0) }
}
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
/// # Set Speed.
///
/// Change the animation playback speed.
pub(crate) fn set_speed(speed: f32) {
// Clamp the range to something sane.
let speed =
if speed.is_nan() { 100.0 }
else { (speed * 100.0).clamp(0.0, 1000.0) };
// Store as an integer.
SPEED.store(speed as u16, SeqCst);
#[cfg(feature = "director")] dom::console_debug(&format!(
"Playback Speed: {speed:.2}%"
));
}
/// # Browserland Next Animation.
///
/// This returns (and clears) the animation set by `Poe.play`, if any.
pub(crate) fn next_animation() -> Option<Animation> {
Animation::from_u8(NEXT_ANIMATION.swap(0, SeqCst)).filter(|a| a.playable())
}
/// # Set Browserland Next Animation.
///
/// `Poe.play` uses this to manually override the primary mate's current
/// animation.
pub(crate) fn set_next_animation(next: u8) {
NEXT_ANIMATION.store(next, SeqCst);
}
}
#[inline]
/// # Get Seeds.
fn get_seeds() -> [u64; 4] {
[
SEED1.load(SeqCst),
SEED2.load(SeqCst),
SEED3.load(SeqCst),
SEED4.load(SeqCst),
]
}
#[inline]
/// # High 16 Product.
const fn mul_high_u16(a: u16, b: u16) -> u16 {
(((a as u32) * (b as u32)) >> 16) as u16
}
#[cfg(target_arch = "wasm32")]
/// # Reseed Randomness.
fn reseed() {
// Splitmix Math.random to give us a reasonable starting point for the
// subsequent Xoshi randomness.
let mut seed: u64 = js_random().to_bits();
let mut seeds = [0_u64; 4];
for i in &mut seeds { *i = splitmix(&mut seed); }
set_seeds(&seeds);
// Print a debug message if we care about that sort of thing.
#[cfg(feature = "director")]
dom::console_debug(&format!(
"PNRG1: {:016x}\nPNRG2: {:016x}\nPNRG3: {:016x}\nPNRG4: {:016x}",
seeds[0],
seeds[1],
seeds[2],
seeds[3],
));
}
/// # Set Seeds.
fn set_seeds(seeds: &[u64; 4]) {
// We are unlikely to wind up with all zeroes, but just in case…
if seeds[0] == 0 && seeds[1] == 0 && seeds[2] == 0 && seeds[3] == 0 {
SEED1.store(0x8596_cc44_bef0_1aa0, SeqCst);
SEED2.store(0x98d4_0948_da60_19ae, SeqCst);
SEED3.store(0x49f1_3013_c503_a6aa, SeqCst);
SEED4.store(0xc4d7_82ff_3c9f_7bef, SeqCst);
}
else {
SEED1.store(seeds[0], SeqCst);
SEED2.store(seeds[1], SeqCst);
SEED3.store(seeds[2], SeqCst);
SEED4.store(seeds[3], SeqCst);
}
}
/// # Update Seeds.
fn update_seeds(seeds: &mut[u64; 4]) {
let t = seeds[1] << 17;
seeds[2] ^= seeds[0];
seeds[3] ^= seeds[1];
seeds[1] ^= seeds[2];
seeds[0] ^= seeds[3];
seeds[2] ^= t;
seeds[3] = seeds[3].rotate_left(45);
}
#[cfg(target_arch = "wasm32")]
/// # Split/Mix.
///
/// This is used to generate our Xoshi256 seeds from a single source `u64`.
fn splitmix(seed: &mut u64) -> u64 {
// Update the source seed.
*seed = seed.overflowing_add(0x9e37_79b9_7f4a_7c15).0;
// Calculate and return a random value.
let mut z: u64 = (*seed ^ (*seed >> 30)).overflowing_mul(0xbf58_476d_1ce4_e5b9).0;
z = (z ^ (z >> 27)).overflowing_mul(0x94d0_49bb_1331_11eb).0;
z ^ (z >> 31)
}
#[cfg(test)]
mod tests { | assert_eq!(Universe::rand_mod(0), 0, "Random zero broke!");
let set = (0..5000_u16).into_iter()
.map(|_| Universe::rand_mod(100))
.collect::<HashSet<u16>>();
assert!(set.iter().all(|n| *n < 100), "Value(s) out of range.");
assert_eq!(
set.len(),
100,
"Failed to collect 100/100 possibilities in 5000 tries."
);
}
} | use super::*;
use std::collections::HashSet;
#[test]
fn t_rand() { | random_line_split |
lib.rs | //working off example (spi): https://github.com/japaric/mfrc522/blob/master/src/lib.rs
//another example: https://github.com/JohnDoneth/hd44780-driver/blob/master/examples/raspberrypi/src/main.rs
//#![no_std] //FIXME TODO remove all std lib dependencies
extern crate embedded_hal as hal;
#[macro_use]
extern crate bitflags;
use core::num::NonZeroU8;
use core::cmp::min;
use hal::blocking::spi;
use hal::digital::OutputPin;
use hal::spi::{Mode, Phase, Polarity};
use hal::blocking::delay::{DelayMs, DelayUs};
mod registers;
use registers::{Register, RF69_FSTEP, FXOSC};
mod builder;
pub use builder::{RadioBuilder,radio};
pub struct Radio<SPI, CS, DELAY> {
spi: SPI,
cs: CS,
delay: DELAY,
freq: u32,
bitrate: Bitrate, //optional (default = smthg)
power_level: u8, //optional (default, max)
network_filtering: Option<NonZeroU8>,
adress_filtering: AddressFiltering,
encryption_key: Option<[u8;17]>,
mode: RadioMode,
package_len: PackageLength,
register_flags: RegisterFlags
}
//local copy of register flags to save register read operations
struct RegisterFlags {
mode: registers::OpMode,
sync: registers::SyncConfig,
config1: registers::PacketConfig1,
config2: registers::PacketConfig2,
pa_level: registers::PaLevel,
}
impl Default for RegisterFlags {
fn default() -> Self {
Self {
mode: registers::OpMode::Standby
&!registers::OpMode::Sequencer_Off
&!registers::OpMode::Listen_On,
sync: registers::SyncConfig::On
| registers::SyncConfig::Fifofill_Auto
| registers::SyncConfig::Size_2
| registers::SyncConfig::Tol_0,
config1: registers::PacketConfig1::Format_Variable
| registers::PacketConfig1::Dcfree_Off
| registers::PacketConfig1::Crc_On
| registers::PacketConfig1::Crcautoclear_On
| registers::PacketConfig1::Adrsfiltering_Off,
config2: registers::PacketConfig2::Rxrestartdelay_2bits
&!registers::PacketConfig2::Aes_On
| registers::PacketConfig2::Autorxrestart_On,
pa_level: registers::PaLevel::Pa0_On
&!registers::PaLevel::Pa1_On
&!registers::PaLevel::Pa2_On,
}
}
}
#[allow(dead_code)]
enum AddressFiltering {
None,
AddressOnly(u8),
AddressOrBroadcast((u8,u8)), //(addr, broadcast_addr)
}
#[allow(dead_code)]
#[derive(Debug, PartialEq, Clone, Copy)]
enum RadioMode { //rename transeiver?
Sleep = 0, // Xtal Off
Standby = 1, // Xtal On
FreqSynth = 2, // Pll On
Rx = 3, // Rx Mode
Tx = 4, // Tx Mode
}
impl Default for RadioMode {
fn default() -> Self {
RadioMode::Standby
}
}
#[allow(dead_code)]
#[derive(Debug,Clone)]
pub enum Bitrate {
Lowest,
Low,
Standard,
High,
Custom(u32),
}
impl Default for Bitrate {
fn default() -> Self {
Bitrate::Standard
}
}
#[allow(dead_code)]
#[derive(Debug,Clone)]
pub enum PackageLength {
Fixed(u8), //in bytes
Max(u8),
}
impl Default for PackageLength {
fn default() -> Self {
PackageLength::Fixed(16)
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub enum FreqencyBand {
ISM315mhz,
ISM433mhz,
ISM868mhz,
ISM915mhz,
}
/// SPI mode
pub const SPI_MODE: Mode = Mode {
phase: Phase::CaptureOnFirstTransition,
polarity: Polarity::IdleLow,
};
pub const SPI_SPEED: u32 = 500_000;
impl<SPI,CS, D, E> Radio<SPI, CS, D>
where SPI: spi::Transfer<u8, Error = E> + spi::Write<u8, Error = E>,
D: DelayMs<u16>+DelayUs<u16>,
CS: OutputPin,
E: core::fmt::Debug {
fn configure_radio(&mut self) -> Result<(),&'static str> {
self.set_default_config();
self.set_package_filtering();
self.set_bitrate();
self.set_frequency()?;
self.set_payload_length();
self.set_power_level();
self.set_encryption_key();
Ok(())
}
pub fn | (&mut self) -> Result<(),&'static str> {
//self.cs.set_high();
//check if the radio responds by seeing if we can change a register
let mut synced = false;
for _attempt in 0..100 {
self.write_reg(Register::Syncvalue1, 0xAA); //170
self.delay.delay_ms(1);
if self.read_reg(Register::Syncvalue1) == 0xAA {
synced = true;
break;
}
}
if!synced {return Err("could not communicate with radio")}
synced = false;
for _attempt in 0..100 {
self.write_reg(Register::Syncvalue1, 0x55); //85
self.delay.delay_ms(1);
if self.read_reg(Register::Syncvalue1) == 0x55 {
synced = true;
break;
}
}
if!synced {return Err("could not communicate with radio")}
//configure the radio chips for normal use
self.configure_radio()?;
Ok(())
}
// To enable encryption: radio.encrypt("ABCDEFGHIJKLMNOP");
// To disable encryption: radio.encrypt(null) or radio.encrypt(0)
// KEY HAS TO BE 16 bytes!!!
fn set_encryption_key(&mut self) -> Result<(),&'static str> {
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
match self.encryption_key {
None =>
self.register_flags.config2 &=!registers::PacketConfig2::Aes_On, //set aes off
Some(mut key) => {
self.register_flags.config2 |= registers::PacketConfig2::Aes_On; //set aes on
key[0] = Register::Aeskey1.write_address();
self.spi.write(&key).unwrap();
},
}
self.delay.delay_us(15u16);
self.write_reg(Register::Packetconfig2, self.register_flags.config2.bits());
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_power_level(&mut self) {
use crate::registers::PaLevel;
self.register_flags.pa_level -= PaLevel::Power;
self.register_flags.pa_level |= PaLevel::from_bits(self.power_level).unwrap_or(PaLevel::Power);
self.write_reg(Register::Palevel, self.register_flags.pa_level.bits());
}
fn await_interrupt_flag(&mut self, register: Register, flag: registers::IrqFlags2) -> Result<(),&'static str> {
for _attempt in 0..10 {//try for one millisecond
let interrupt_flag = registers::IrqFlags2::from_bits(self.read_reg(register)).unwrap();
if interrupt_flag.contains(flag){
return Ok(())
}
self.delay.delay_us(100u16);
}
Err("interrupt flag was not set within timeout")
}
pub fn send_blocking(&mut self, adress: u8, buffer: &[u8]) -> Result<(),&'static str> {
use crate::registers::DioMapping1;
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
//setup the interrupt pin so an interrupt wil fire once the packet has been send
self.write_reg(Register::Diomapping1, DioMapping1::Dio0_00.bits()); //in tx mode Dio0_00: packet sent
let return_adress = match self.adress_filtering {
AddressFiltering::None => {
0
},
AddressFiltering::AddressOnly(node_addr) => {
node_addr
},
AddressFiltering::AddressOrBroadcast((node_addr,_broadcast_addr)) => {
node_addr
},
};
//spiXfer(spi_handle, (char*)rawDATA, (char*)rawDATA, bufferSize + 5 );
let mut packet = [0u8; registers::MAX_PACKET_SIZE+3];
let send_len = min(buffer.len() + 3, registers::MAX_PACKET_SIZE);
packet[0] = Register::Fifo.write_address();
packet[1] = send_len as u8;
packet[2] = adress; //1
packet[3] = return_adress; //2
packet[4] = 0;//reserved; //3
packet[5..5+buffer.len()].clone_from_slice(buffer);
//self.cs.set_low();
self.spi.write(&packet[..5+buffer.len()]).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
// no need to wait for transmit mode to be ready since its handled by the radio
self.switch_transeiver_mode_blocking(RadioMode::Tx)?;
self.await_interrupt_flag(Register::Irqflags2, registers::IrqFlags2::Packetsent)?;
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_payload_length(&mut self){
match self.package_len {
PackageLength::Fixed(len) => {
self.register_flags.config1 -= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
PackageLength::Max(len) => {
self.register_flags.config1 |= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
}
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
}
fn set_default_config(&mut self) {
for (register, bitflag) in registers::DEFAULT_RADIO_CONFIG.iter() {
self.write_reg(*register, *bitflag);
}
}
fn set_package_filtering(&mut self) {
use registers::SyncConfig;
use registers::PacketConfig1;
match self.network_filtering {
None => {//switch to one sync word (second one is used as network id)
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_1;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
},
Some(network_id) => {
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_2;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
self.write_reg(Register::Syncvalue2, network_id.get());
},
}
self.register_flags.config1 -= PacketConfig1::Adrsfiltering;
match self.adress_filtering {
AddressFiltering::None => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Off;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
},
AddressFiltering::AddressOnly(node_addr) => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Node;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
self.write_reg(Register::Nodeadrs, node_addr);
},
AddressFiltering::AddressOrBroadcast((node_addr,broadcast_addr)) => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Nodebroadcast;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
self.write_reg(Register::Nodeadrs, node_addr);
self.write_reg(Register::Broadcastadrs, broadcast_addr);
},
}
}
fn set_bitrate(&mut self) {
//bitrate reg value: F_xosc / bitrate (b/s)
match self.bitrate {
Bitrate::Lowest => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_1200.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_1200.bits());
},
Bitrate::Low => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_55555.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_55555.bits());
},
Bitrate::High => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_200kbps.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_200kbps.bits());
},
Bitrate::Standard => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_100000.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_100000.bits());
},
Bitrate::Custom(bitrate) => {
let msb = (FXOSC/bitrate >> 8) as u8;
let lsb = (FXOSC/bitrate) as u8;
self.write_reg(Register::Bitratemsb, msb);
self.write_reg(Register::Bitratelsb, lsb);
},
}
}
fn switch_freq(&mut self) -> Result<(),&'static str> {
let frf = (self.freq as f32 / RF69_FSTEP) as u32; // divide down by FSTEP to get FRF
if self.mode == RadioMode::Tx {
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
self.write_reg(Register::Frfmsb, (frf >> 16) as u8);
self.write_reg(Register::Frfmid, (frf >> 8) as u8);
self.write_reg(Register::Frflsb, frf as u8);
self.switch_transeiver_mode_blocking(RadioMode::Tx)?;
} else {
let old_mode = self.mode;
self.write_reg(Register::Frfmsb, (frf >> 16) as u8);
self.write_reg(Register::Frfmid, (frf >> 8) as u8);
self.write_reg(Register::Frflsb, frf as u8);
self.switch_transeiver_mode_blocking(RadioMode::FreqSynth)?;
self.switch_transeiver_mode_blocking(old_mode)?;
}
Ok(())
}
//see page 38 in the datasheet,
//TODO research Fdev and do that too
fn set_frequency(&mut self) -> Result<(),&'static str> {
if!self.register_flags.mode.contains(registers::OpMode::Sequencer_Off) {
self.register_flags.mode |= registers::OpMode::Sequencer_Off;
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
self.switch_freq()?;
self.register_flags.mode -= registers::OpMode::Sequencer_Off;
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
} else {
self.switch_freq()?;
}
Ok(())
}
fn switch_transceiver_mode(&mut self, new_mode: RadioMode) {
use registers::OpMode;
let old_flag = self.register_flags.mode - OpMode::Mode;
self.register_flags.mode = match new_mode {
RadioMode::Sleep => old_flag | OpMode::Sleep, // Xtal Off
RadioMode::Standby => old_flag | OpMode::Standby, // Xtal On
RadioMode::FreqSynth => old_flag | OpMode::Synthesizer, // Pll On
RadioMode::Rx => old_flag | OpMode::Receiver, // Rx Mode
RadioMode::Tx => old_flag | OpMode::Transmitter, // Tx Mode
};
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
self.mode = new_mode;
}
fn switch_transeiver_mode_blocking(&mut self, new_mode: RadioMode) -> Result<(),&'static str>{
use registers::IrqFlags1;
self.switch_transceiver_mode(new_mode);
for _attempt in 0..10 {//try for one millisecond
let interrupt_flag = IrqFlags1::from_bits(self.read_reg(Register::Irqflags1)).unwrap();
if interrupt_flag.contains(IrqFlags1::Modeready){
return Ok(())
}
self.delay.delay_us(100u16);
}
Err("transiever did not switch within timeout")
}
fn write_reg(&mut self, addr: Register, value: u8) {
let to_write: [u8; 2] = [addr.write_address(), value];
//self.cs.set_low();
self.spi.write(&to_write).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
}
fn read_reg(&mut self, addr: Register) -> u8{
let mut to_transfer: [u8; 2] = [addr.read_address(), 0];
//self.cs.set_low();
let to_transfer = self.spi.transfer(&mut to_transfer).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
let awnser = to_transfer[1];
awnser
}
}
| init | identifier_name |
lib.rs | //working off example (spi): https://github.com/japaric/mfrc522/blob/master/src/lib.rs
//another example: https://github.com/JohnDoneth/hd44780-driver/blob/master/examples/raspberrypi/src/main.rs
//#![no_std] //FIXME TODO remove all std lib dependencies
extern crate embedded_hal as hal;
#[macro_use]
extern crate bitflags;
use core::num::NonZeroU8;
use core::cmp::min;
use hal::blocking::spi;
use hal::digital::OutputPin;
use hal::spi::{Mode, Phase, Polarity};
use hal::blocking::delay::{DelayMs, DelayUs};
mod registers;
use registers::{Register, RF69_FSTEP, FXOSC};
mod builder;
pub use builder::{RadioBuilder,radio};
pub struct Radio<SPI, CS, DELAY> {
spi: SPI,
cs: CS,
delay: DELAY,
freq: u32,
bitrate: Bitrate, //optional (default = smthg)
power_level: u8, //optional (default, max)
network_filtering: Option<NonZeroU8>,
adress_filtering: AddressFiltering,
encryption_key: Option<[u8;17]>,
mode: RadioMode,
package_len: PackageLength,
register_flags: RegisterFlags
}
//local copy of register flags to save register read operations
struct RegisterFlags {
mode: registers::OpMode,
sync: registers::SyncConfig,
config1: registers::PacketConfig1,
config2: registers::PacketConfig2,
pa_level: registers::PaLevel,
}
impl Default for RegisterFlags {
fn default() -> Self {
Self {
mode: registers::OpMode::Standby
&!registers::OpMode::Sequencer_Off
&!registers::OpMode::Listen_On,
sync: registers::SyncConfig::On
| registers::SyncConfig::Fifofill_Auto
| registers::SyncConfig::Size_2
| registers::SyncConfig::Tol_0,
config1: registers::PacketConfig1::Format_Variable
| registers::PacketConfig1::Dcfree_Off
| registers::PacketConfig1::Crc_On
| registers::PacketConfig1::Crcautoclear_On
| registers::PacketConfig1::Adrsfiltering_Off,
config2: registers::PacketConfig2::Rxrestartdelay_2bits
&!registers::PacketConfig2::Aes_On
| registers::PacketConfig2::Autorxrestart_On,
pa_level: registers::PaLevel::Pa0_On
&!registers::PaLevel::Pa1_On
&!registers::PaLevel::Pa2_On,
}
}
}
#[allow(dead_code)]
enum AddressFiltering {
None,
AddressOnly(u8),
AddressOrBroadcast((u8,u8)), //(addr, broadcast_addr)
}
#[allow(dead_code)]
#[derive(Debug, PartialEq, Clone, Copy)]
enum RadioMode { //rename transeiver?
Sleep = 0, // Xtal Off
Standby = 1, // Xtal On
FreqSynth = 2, // Pll On
Rx = 3, // Rx Mode
Tx = 4, // Tx Mode
}
impl Default for RadioMode {
fn default() -> Self {
RadioMode::Standby
}
}
#[allow(dead_code)]
#[derive(Debug,Clone)]
pub enum Bitrate {
Lowest,
Low,
Standard,
High,
Custom(u32),
}
impl Default for Bitrate {
fn default() -> Self {
Bitrate::Standard
}
}
#[allow(dead_code)]
#[derive(Debug,Clone)]
pub enum PackageLength {
Fixed(u8), //in bytes
Max(u8),
}
impl Default for PackageLength {
fn default() -> Self {
PackageLength::Fixed(16)
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub enum FreqencyBand {
ISM315mhz,
ISM433mhz,
ISM868mhz,
ISM915mhz,
}
/// SPI mode
pub const SPI_MODE: Mode = Mode {
phase: Phase::CaptureOnFirstTransition,
polarity: Polarity::IdleLow,
};
pub const SPI_SPEED: u32 = 500_000;
impl<SPI,CS, D, E> Radio<SPI, CS, D>
where SPI: spi::Transfer<u8, Error = E> + spi::Write<u8, Error = E>,
D: DelayMs<u16>+DelayUs<u16>,
CS: OutputPin,
E: core::fmt::Debug {
fn configure_radio(&mut self) -> Result<(),&'static str> {
self.set_default_config();
self.set_package_filtering();
self.set_bitrate();
self.set_frequency()?;
self.set_payload_length();
self.set_power_level();
self.set_encryption_key();
Ok(())
}
pub fn init(&mut self) -> Result<(),&'static str> {
//self.cs.set_high();
//check if the radio responds by seeing if we can change a register
let mut synced = false;
for _attempt in 0..100 {
self.write_reg(Register::Syncvalue1, 0xAA); //170
self.delay.delay_ms(1);
if self.read_reg(Register::Syncvalue1) == 0xAA {
synced = true;
break;
}
}
if!synced {return Err("could not communicate with radio")}
synced = false;
for _attempt in 0..100 {
self.write_reg(Register::Syncvalue1, 0x55); //85
self.delay.delay_ms(1);
if self.read_reg(Register::Syncvalue1) == 0x55 {
synced = true;
break;
}
}
if!synced {return Err("could not communicate with radio")}
//configure the radio chips for normal use
self.configure_radio()?;
Ok(())
}
// To enable encryption: radio.encrypt("ABCDEFGHIJKLMNOP");
// To disable encryption: radio.encrypt(null) or radio.encrypt(0)
// KEY HAS TO BE 16 bytes!!!
fn set_encryption_key(&mut self) -> Result<(),&'static str> {
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
match self.encryption_key {
None =>
self.register_flags.config2 &=!registers::PacketConfig2::Aes_On, //set aes off
Some(mut key) => {
self.register_flags.config2 |= registers::PacketConfig2::Aes_On; //set aes on
key[0] = Register::Aeskey1.write_address();
self.spi.write(&key).unwrap();
},
}
self.delay.delay_us(15u16);
self.write_reg(Register::Packetconfig2, self.register_flags.config2.bits());
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_power_level(&mut self) {
use crate::registers::PaLevel;
self.register_flags.pa_level -= PaLevel::Power;
self.register_flags.pa_level |= PaLevel::from_bits(self.power_level).unwrap_or(PaLevel::Power);
self.write_reg(Register::Palevel, self.register_flags.pa_level.bits());
}
fn await_interrupt_flag(&mut self, register: Register, flag: registers::IrqFlags2) -> Result<(),&'static str> {
for _attempt in 0..10 {//try for one millisecond
let interrupt_flag = registers::IrqFlags2::from_bits(self.read_reg(register)).unwrap();
if interrupt_flag.contains(flag){
return Ok(())
}
self.delay.delay_us(100u16);
}
Err("interrupt flag was not set within timeout")
}
pub fn send_blocking(&mut self, adress: u8, buffer: &[u8]) -> Result<(),&'static str> {
use crate::registers::DioMapping1;
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
//setup the interrupt pin so an interrupt wil fire once the packet has been send
self.write_reg(Register::Diomapping1, DioMapping1::Dio0_00.bits()); //in tx mode Dio0_00: packet sent
let return_adress = match self.adress_filtering {
AddressFiltering::None => {
0
},
AddressFiltering::AddressOnly(node_addr) => {
node_addr
},
AddressFiltering::AddressOrBroadcast((node_addr,_broadcast_addr)) => {
node_addr
},
};
//spiXfer(spi_handle, (char*)rawDATA, (char*)rawDATA, bufferSize + 5 );
let mut packet = [0u8; registers::MAX_PACKET_SIZE+3];
let send_len = min(buffer.len() + 3, registers::MAX_PACKET_SIZE);
packet[0] = Register::Fifo.write_address();
packet[1] = send_len as u8;
packet[2] = adress; //1
packet[3] = return_adress; //2
packet[4] = 0;//reserved; //3
packet[5..5+buffer.len()].clone_from_slice(buffer);
//self.cs.set_low();
self.spi.write(&packet[..5+buffer.len()]).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
// no need to wait for transmit mode to be ready since its handled by the radio
self.switch_transeiver_mode_blocking(RadioMode::Tx)?;
self.await_interrupt_flag(Register::Irqflags2, registers::IrqFlags2::Packetsent)?;
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_payload_length(&mut self){
match self.package_len {
PackageLength::Fixed(len) => {
self.register_flags.config1 -= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
PackageLength::Max(len) => {
self.register_flags.config1 |= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
}
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
}
fn set_default_config(&mut self) {
for (register, bitflag) in registers::DEFAULT_RADIO_CONFIG.iter() {
self.write_reg(*register, *bitflag);
}
}
fn set_package_filtering(&mut self) {
use registers::SyncConfig;
use registers::PacketConfig1;
match self.network_filtering {
None => {//switch to one sync word (second one is used as network id)
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_1;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
},
Some(network_id) => {
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_2;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
self.write_reg(Register::Syncvalue2, network_id.get());
},
}
self.register_flags.config1 -= PacketConfig1::Adrsfiltering;
match self.adress_filtering {
AddressFiltering::None => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Off;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
},
AddressFiltering::AddressOnly(node_addr) => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Node;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
self.write_reg(Register::Nodeadrs, node_addr);
},
AddressFiltering::AddressOrBroadcast((node_addr,broadcast_addr)) => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Nodebroadcast;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
self.write_reg(Register::Nodeadrs, node_addr);
self.write_reg(Register::Broadcastadrs, broadcast_addr);
},
}
}
fn set_bitrate(&mut self) {
//bitrate reg value: F_xosc / bitrate (b/s)
match self.bitrate {
Bitrate::Lowest => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_1200.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_1200.bits());
},
Bitrate::Low => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_55555.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_55555.bits());
},
Bitrate::High => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_200kbps.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_200kbps.bits());
},
Bitrate::Standard => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_100000.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_100000.bits());
},
Bitrate::Custom(bitrate) => {
let msb = (FXOSC/bitrate >> 8) as u8;
let lsb = (FXOSC/bitrate) as u8;
self.write_reg(Register::Bitratemsb, msb);
self.write_reg(Register::Bitratelsb, lsb);
},
}
}
fn switch_freq(&mut self) -> Result<(),&'static str> {
let frf = (self.freq as f32 / RF69_FSTEP) as u32; // divide down by FSTEP to get FRF
if self.mode == RadioMode::Tx {
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
self.write_reg(Register::Frfmsb, (frf >> 16) as u8);
self.write_reg(Register::Frfmid, (frf >> 8) as u8);
self.write_reg(Register::Frflsb, frf as u8);
self.switch_transeiver_mode_blocking(RadioMode::Tx)?;
} else {
let old_mode = self.mode;
self.write_reg(Register::Frfmsb, (frf >> 16) as u8);
self.write_reg(Register::Frfmid, (frf >> 8) as u8);
self.write_reg(Register::Frflsb, frf as u8);
self.switch_transeiver_mode_blocking(RadioMode::FreqSynth)?;
self.switch_transeiver_mode_blocking(old_mode)?;
}
Ok(())
}
//see page 38 in the datasheet,
//TODO research Fdev and do that too
fn set_frequency(&mut self) -> Result<(),&'static str> |
fn switch_transceiver_mode(&mut self, new_mode: RadioMode) {
use registers::OpMode;
let old_flag = self.register_flags.mode - OpMode::Mode;
self.register_flags.mode = match new_mode {
RadioMode::Sleep => old_flag | OpMode::Sleep, // Xtal Off
RadioMode::Standby => old_flag | OpMode::Standby, // Xtal On
RadioMode::FreqSynth => old_flag | OpMode::Synthesizer, // Pll On
RadioMode::Rx => old_flag | OpMode::Receiver, // Rx Mode
RadioMode::Tx => old_flag | OpMode::Transmitter, // Tx Mode
};
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
self.mode = new_mode;
}
fn switch_transeiver_mode_blocking(&mut self, new_mode: RadioMode) -> Result<(),&'static str>{
use registers::IrqFlags1;
self.switch_transceiver_mode(new_mode);
for _attempt in 0..10 {//try for one millisecond
let interrupt_flag = IrqFlags1::from_bits(self.read_reg(Register::Irqflags1)).unwrap();
if interrupt_flag.contains(IrqFlags1::Modeready){
return Ok(())
}
self.delay.delay_us(100u16);
}
Err("transiever did not switch within timeout")
}
fn write_reg(&mut self, addr: Register, value: u8) {
let to_write: [u8; 2] = [addr.write_address(), value];
//self.cs.set_low();
self.spi.write(&to_write).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
}
fn read_reg(&mut self, addr: Register) -> u8{
let mut to_transfer: [u8; 2] = [addr.read_address(), 0];
//self.cs.set_low();
let to_transfer = self.spi.transfer(&mut to_transfer).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
let awnser = to_transfer[1];
awnser
}
}
| {
if !self.register_flags.mode.contains(registers::OpMode::Sequencer_Off) {
self.register_flags.mode |= registers::OpMode::Sequencer_Off;
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
self.switch_freq()?;
self.register_flags.mode -= registers::OpMode::Sequencer_Off;
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
} else {
self.switch_freq()?;
}
Ok(())
} | identifier_body |
lib.rs | //working off example (spi): https://github.com/japaric/mfrc522/blob/master/src/lib.rs
//another example: https://github.com/JohnDoneth/hd44780-driver/blob/master/examples/raspberrypi/src/main.rs
//#![no_std] //FIXME TODO remove all std lib dependencies
extern crate embedded_hal as hal;
#[macro_use]
extern crate bitflags;
use core::num::NonZeroU8;
use core::cmp::min;
use hal::blocking::spi;
use hal::digital::OutputPin;
use hal::spi::{Mode, Phase, Polarity};
use hal::blocking::delay::{DelayMs, DelayUs};
mod registers;
use registers::{Register, RF69_FSTEP, FXOSC};
mod builder;
pub use builder::{RadioBuilder,radio};
pub struct Radio<SPI, CS, DELAY> {
spi: SPI,
cs: CS,
delay: DELAY,
freq: u32,
bitrate: Bitrate, //optional (default = smthg)
power_level: u8, //optional (default, max)
network_filtering: Option<NonZeroU8>,
adress_filtering: AddressFiltering,
encryption_key: Option<[u8;17]>,
mode: RadioMode,
package_len: PackageLength,
register_flags: RegisterFlags
}
//local copy of register flags to save register read operations
struct RegisterFlags {
mode: registers::OpMode,
sync: registers::SyncConfig,
config1: registers::PacketConfig1,
config2: registers::PacketConfig2,
pa_level: registers::PaLevel,
}
impl Default for RegisterFlags {
fn default() -> Self {
Self {
mode: registers::OpMode::Standby
&!registers::OpMode::Sequencer_Off
&!registers::OpMode::Listen_On,
sync: registers::SyncConfig::On
| registers::SyncConfig::Fifofill_Auto
| registers::SyncConfig::Size_2
| registers::SyncConfig::Tol_0,
config1: registers::PacketConfig1::Format_Variable
| registers::PacketConfig1::Dcfree_Off
| registers::PacketConfig1::Crc_On
| registers::PacketConfig1::Crcautoclear_On
| registers::PacketConfig1::Adrsfiltering_Off,
config2: registers::PacketConfig2::Rxrestartdelay_2bits
&!registers::PacketConfig2::Aes_On
| registers::PacketConfig2::Autorxrestart_On,
pa_level: registers::PaLevel::Pa0_On
&!registers::PaLevel::Pa1_On
&!registers::PaLevel::Pa2_On,
}
}
}
#[allow(dead_code)]
enum AddressFiltering {
None,
AddressOnly(u8),
AddressOrBroadcast((u8,u8)), //(addr, broadcast_addr)
}
#[allow(dead_code)]
#[derive(Debug, PartialEq, Clone, Copy)]
enum RadioMode { //rename transeiver?
Sleep = 0, // Xtal Off
Standby = 1, // Xtal On
FreqSynth = 2, // Pll On
Rx = 3, // Rx Mode
Tx = 4, // Tx Mode
}
impl Default for RadioMode {
fn default() -> Self {
RadioMode::Standby
}
}
#[allow(dead_code)]
#[derive(Debug,Clone)]
pub enum Bitrate {
Lowest,
Low,
Standard,
High,
Custom(u32),
}
impl Default for Bitrate {
fn default() -> Self {
Bitrate::Standard
}
}
#[allow(dead_code)]
#[derive(Debug,Clone)]
pub enum PackageLength {
Fixed(u8), //in bytes
Max(u8),
}
impl Default for PackageLength {
fn default() -> Self {
PackageLength::Fixed(16)
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub enum FreqencyBand {
ISM315mhz,
ISM433mhz,
ISM868mhz,
ISM915mhz,
}
/// SPI mode
pub const SPI_MODE: Mode = Mode {
phase: Phase::CaptureOnFirstTransition,
polarity: Polarity::IdleLow,
};
pub const SPI_SPEED: u32 = 500_000;
impl<SPI,CS, D, E> Radio<SPI, CS, D>
where SPI: spi::Transfer<u8, Error = E> + spi::Write<u8, Error = E>,
D: DelayMs<u16>+DelayUs<u16>,
CS: OutputPin,
E: core::fmt::Debug {
fn configure_radio(&mut self) -> Result<(),&'static str> {
self.set_default_config();
self.set_package_filtering();
self.set_bitrate();
self.set_frequency()?;
self.set_payload_length();
self.set_power_level();
self.set_encryption_key();
Ok(())
}
pub fn init(&mut self) -> Result<(),&'static str> { | //self.cs.set_high();
//check if the radio responds by seeing if we can change a register
let mut synced = false;
for _attempt in 0..100 {
self.write_reg(Register::Syncvalue1, 0xAA); //170
self.delay.delay_ms(1);
if self.read_reg(Register::Syncvalue1) == 0xAA {
synced = true;
break;
}
}
if!synced {return Err("could not communicate with radio")}
synced = false;
for _attempt in 0..100 {
self.write_reg(Register::Syncvalue1, 0x55); //85
self.delay.delay_ms(1);
if self.read_reg(Register::Syncvalue1) == 0x55 {
synced = true;
break;
}
}
if!synced {return Err("could not communicate with radio")}
//configure the radio chips for normal use
self.configure_radio()?;
Ok(())
}
// To enable encryption: radio.encrypt("ABCDEFGHIJKLMNOP");
// To disable encryption: radio.encrypt(null) or radio.encrypt(0)
// KEY HAS TO BE 16 bytes!!!
fn set_encryption_key(&mut self) -> Result<(),&'static str> {
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
match self.encryption_key {
None =>
self.register_flags.config2 &=!registers::PacketConfig2::Aes_On, //set aes off
Some(mut key) => {
self.register_flags.config2 |= registers::PacketConfig2::Aes_On; //set aes on
key[0] = Register::Aeskey1.write_address();
self.spi.write(&key).unwrap();
},
}
self.delay.delay_us(15u16);
self.write_reg(Register::Packetconfig2, self.register_flags.config2.bits());
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_power_level(&mut self) {
use crate::registers::PaLevel;
self.register_flags.pa_level -= PaLevel::Power;
self.register_flags.pa_level |= PaLevel::from_bits(self.power_level).unwrap_or(PaLevel::Power);
self.write_reg(Register::Palevel, self.register_flags.pa_level.bits());
}
fn await_interrupt_flag(&mut self, register: Register, flag: registers::IrqFlags2) -> Result<(),&'static str> {
for _attempt in 0..10 {//try for one millisecond
let interrupt_flag = registers::IrqFlags2::from_bits(self.read_reg(register)).unwrap();
if interrupt_flag.contains(flag){
return Ok(())
}
self.delay.delay_us(100u16);
}
Err("interrupt flag was not set within timeout")
}
pub fn send_blocking(&mut self, adress: u8, buffer: &[u8]) -> Result<(),&'static str> {
use crate::registers::DioMapping1;
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
//setup the interrupt pin so an interrupt wil fire once the packet has been send
self.write_reg(Register::Diomapping1, DioMapping1::Dio0_00.bits()); //in tx mode Dio0_00: packet sent
let return_adress = match self.adress_filtering {
AddressFiltering::None => {
0
},
AddressFiltering::AddressOnly(node_addr) => {
node_addr
},
AddressFiltering::AddressOrBroadcast((node_addr,_broadcast_addr)) => {
node_addr
},
};
//spiXfer(spi_handle, (char*)rawDATA, (char*)rawDATA, bufferSize + 5 );
let mut packet = [0u8; registers::MAX_PACKET_SIZE+3];
let send_len = min(buffer.len() + 3, registers::MAX_PACKET_SIZE);
packet[0] = Register::Fifo.write_address();
packet[1] = send_len as u8;
packet[2] = adress; //1
packet[3] = return_adress; //2
packet[4] = 0;//reserved; //3
packet[5..5+buffer.len()].clone_from_slice(buffer);
//self.cs.set_low();
self.spi.write(&packet[..5+buffer.len()]).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
// no need to wait for transmit mode to be ready since its handled by the radio
self.switch_transeiver_mode_blocking(RadioMode::Tx)?;
self.await_interrupt_flag(Register::Irqflags2, registers::IrqFlags2::Packetsent)?;
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_payload_length(&mut self){
match self.package_len {
PackageLength::Fixed(len) => {
self.register_flags.config1 -= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
PackageLength::Max(len) => {
self.register_flags.config1 |= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
}
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
}
fn set_default_config(&mut self) {
for (register, bitflag) in registers::DEFAULT_RADIO_CONFIG.iter() {
self.write_reg(*register, *bitflag);
}
}
fn set_package_filtering(&mut self) {
use registers::SyncConfig;
use registers::PacketConfig1;
match self.network_filtering {
None => {//switch to one sync word (second one is used as network id)
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_1;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
},
Some(network_id) => {
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_2;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
self.write_reg(Register::Syncvalue2, network_id.get());
},
}
self.register_flags.config1 -= PacketConfig1::Adrsfiltering;
match self.adress_filtering {
AddressFiltering::None => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Off;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
},
AddressFiltering::AddressOnly(node_addr) => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Node;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
self.write_reg(Register::Nodeadrs, node_addr);
},
AddressFiltering::AddressOrBroadcast((node_addr,broadcast_addr)) => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Nodebroadcast;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
self.write_reg(Register::Nodeadrs, node_addr);
self.write_reg(Register::Broadcastadrs, broadcast_addr);
},
}
}
fn set_bitrate(&mut self) {
//bitrate reg value: F_xosc / bitrate (b/s)
match self.bitrate {
Bitrate::Lowest => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_1200.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_1200.bits());
},
Bitrate::Low => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_55555.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_55555.bits());
},
Bitrate::High => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_200kbps.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_200kbps.bits());
},
Bitrate::Standard => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_100000.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_100000.bits());
},
Bitrate::Custom(bitrate) => {
let msb = (FXOSC/bitrate >> 8) as u8;
let lsb = (FXOSC/bitrate) as u8;
self.write_reg(Register::Bitratemsb, msb);
self.write_reg(Register::Bitratelsb, lsb);
},
}
}
fn switch_freq(&mut self) -> Result<(),&'static str> {
let frf = (self.freq as f32 / RF69_FSTEP) as u32; // divide down by FSTEP to get FRF
if self.mode == RadioMode::Tx {
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
self.write_reg(Register::Frfmsb, (frf >> 16) as u8);
self.write_reg(Register::Frfmid, (frf >> 8) as u8);
self.write_reg(Register::Frflsb, frf as u8);
self.switch_transeiver_mode_blocking(RadioMode::Tx)?;
} else {
let old_mode = self.mode;
self.write_reg(Register::Frfmsb, (frf >> 16) as u8);
self.write_reg(Register::Frfmid, (frf >> 8) as u8);
self.write_reg(Register::Frflsb, frf as u8);
self.switch_transeiver_mode_blocking(RadioMode::FreqSynth)?;
self.switch_transeiver_mode_blocking(old_mode)?;
}
Ok(())
}
//see page 38 in the datasheet,
//TODO research Fdev and do that too
fn set_frequency(&mut self) -> Result<(),&'static str> {
if!self.register_flags.mode.contains(registers::OpMode::Sequencer_Off) {
self.register_flags.mode |= registers::OpMode::Sequencer_Off;
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
self.switch_freq()?;
self.register_flags.mode -= registers::OpMode::Sequencer_Off;
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
} else {
self.switch_freq()?;
}
Ok(())
}
fn switch_transceiver_mode(&mut self, new_mode: RadioMode) {
use registers::OpMode;
let old_flag = self.register_flags.mode - OpMode::Mode;
self.register_flags.mode = match new_mode {
RadioMode::Sleep => old_flag | OpMode::Sleep, // Xtal Off
RadioMode::Standby => old_flag | OpMode::Standby, // Xtal On
RadioMode::FreqSynth => old_flag | OpMode::Synthesizer, // Pll On
RadioMode::Rx => old_flag | OpMode::Receiver, // Rx Mode
RadioMode::Tx => old_flag | OpMode::Transmitter, // Tx Mode
};
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
self.mode = new_mode;
}
fn switch_transeiver_mode_blocking(&mut self, new_mode: RadioMode) -> Result<(),&'static str>{
use registers::IrqFlags1;
self.switch_transceiver_mode(new_mode);
for _attempt in 0..10 {//try for one millisecond
let interrupt_flag = IrqFlags1::from_bits(self.read_reg(Register::Irqflags1)).unwrap();
if interrupt_flag.contains(IrqFlags1::Modeready){
return Ok(())
}
self.delay.delay_us(100u16);
}
Err("transiever did not switch within timeout")
}
fn write_reg(&mut self, addr: Register, value: u8) {
let to_write: [u8; 2] = [addr.write_address(), value];
//self.cs.set_low();
self.spi.write(&to_write).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
}
fn read_reg(&mut self, addr: Register) -> u8{
let mut to_transfer: [u8; 2] = [addr.read_address(), 0];
//self.cs.set_low();
let to_transfer = self.spi.transfer(&mut to_transfer).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
let awnser = to_transfer[1];
awnser
}
} | random_line_split |
|
http_service_util.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{bail, Error, ResultExt},
fidl_fuchsia_net_oldhttp::{self as http, HttpServiceProxy},
fuchsia_async as fasync,
fuchsia_syslog::fx_log_info,
fuchsia_zircon as zx,
futures::io::{AllowStdIo, AsyncReadExt},
};
pub fn create_url_request<S: ToString>(url_string: S) -> http::UrlRequest {
http::UrlRequest {
url: url_string.to_string(),
method: String::from("GET"),
headers: None,
body: None,
response_body_buffer_size: 0,
auto_follow_redirects: true,
cache_mode: http::CacheMode::Default,
response_body_mode: http::ResponseBodyMode::Stream,
}
}
// Object to hold results of a single download
#[derive(Default)]
pub struct IndividualDownload {
pub bytes: u64,
pub nanos: u64,
pub goodput_mbps: f64,
}
// TODO (NET-1664): verify checksum on data received
pub async fn fetch_and_discard_url(
http_service: &HttpServiceProxy,
mut url_request: http::UrlRequest,
) -> Result<IndividualDownload, Error> {
// Create a UrlLoader instance
let (s, p) = zx::Channel::create().context("failed to create zx channel")?;
let proxy = fasync::Channel::from_channel(p).context("failed to make async channel")?;
let loader_server = fidl::endpoints::ServerEnd::<http::UrlLoaderMarker>::new(s);
http_service.create_url_loader(loader_server)?;
let loader_proxy = http::UrlLoaderProxy::new(proxy);
let start_time = zx::Time::get(zx::ClockId::Monotonic);
let response = loader_proxy.start(&mut url_request).await?;
if let Some(e) = response.error {
bail!("UrlLoaderProxy error - code:{} ({})", e.code, e.description.unwrap_or("".into()))
}
let socket = match response.body.map(|x| *x) {
Some(http::UrlBody::Stream(s)) => fasync::Socket::from_socket(s)?,
_ => {
bail!("failed to read UrlBody from the stream - error: {}", zx::Status::BAD_STATE);
}
};
// discard the bytes
let mut stdio_sink = AllowStdIo::new(::std::io::sink());
let bytes_received = socket.copy_into(&mut stdio_sink).await?;
let stop_time = zx::Time::get(zx::ClockId::Monotonic);
let time_nanos = (stop_time - start_time).into_nanos() as u64;
let time_seconds = time_nanos as f64 * 1e-9;
let bits_received = (bytes_received * 8) as f64;
fx_log_info!("Received {} bytes in {:.3} seconds", bytes_received, time_seconds);
if bytes_received < 1 {
bail!("Failed to download data from url! bytes_received = {}", bytes_received);
}
let megabits_per_sec = bits_received * 1e-6 / time_seconds;
let mut individual_download = IndividualDownload::default();
individual_download.goodput_mbps = megabits_per_sec;
individual_download.bytes = bytes_received;
individual_download.nanos = time_nanos;
Ok(individual_download)
}
#[cfg(test)]
mod tests {
use {
super::*,
fidl::endpoints,
//fidl::endpoints::RequestStream,
fidl_fuchsia_net_oldhttp as http,
fidl_fuchsia_net_oldhttp::HttpError,
fidl_fuchsia_net_oldhttp::{HttpServiceMarker, HttpServiceProxy},
fidl_fuchsia_net_oldhttp::{HttpServiceRequest, HttpServiceRequestStream},
fidl_fuchsia_net_oldhttp::{UrlBody, UrlRequest, UrlResponse},
fidl_fuchsia_net_oldhttp::{UrlLoaderRequest, UrlLoaderRequestStream},
fuchsia_async as fasync,
futures::stream::{StreamExt, StreamFuture},
futures::task::Poll,
pin_utils::pin_mut,
};
#[test]
fn verify_basic_url_request_creation() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
assert_eq!(url_req.url, test_url);
assert_eq!(url_req.method, "GET".to_string());
assert!(url_req.headers.is_none());
assert!(url_req.body.is_none());
assert_eq!(url_req.response_body_buffer_size, 0);
assert!(url_req.auto_follow_redirects);
assert_eq!(url_req.cache_mode, http::CacheMode::Default);
assert_eq!(url_req.response_body_mode, http::ResponseBodyMode::Stream);
}
#[test]
fn response_error_triggers_error_path() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
let url_response = create_url_response(None, None, 404);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn successful_download_returns_valid_indvidual_download_data() |
#[test]
fn zero_byte_download_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
assert_eq!(expected_num_bytes, 0);
let url_response = create_url_response(None, url_body, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn null_response_body_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with 0 bytes downloaded
let url_response = create_url_response(None, None, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
fn trigger_download_with_supplied_response(
request: UrlRequest,
mut response: UrlResponse,
) -> Result<IndividualDownload, Error> {
let mut exec = fasync::Executor::new().expect("failed to create an executor");
let (http_service, server) = create_http_service_util();
let mut next_http_service_req = server.into_future();
let url_target = (&request).url.clone();
let fut = fetch_and_discard_url(&http_service, request);
pin_mut!(fut);
assert!(exec.run_until_stalled(&mut fut).is_pending());
let (url_loader_responder, _service_control_handle) =
match poll_http_service_request(&mut exec, &mut next_http_service_req) {
Poll::Ready(HttpServiceRequest::CreateUrlLoader { loader, control_handle }) => {
(loader, control_handle)
}
Poll::Pending => panic!("expected something"),
};
assert!(exec.run_until_stalled(&mut fut).is_pending());
let mut next_url_loader_req = url_loader_responder
.into_stream()
.expect("failed to create a url_loader response stream")
.into_future();
let (url_request, url_request_responder) =
match poll_url_loader_request(&mut exec, &mut next_url_loader_req) {
Poll::Ready(UrlLoaderRequest::Start { request, responder }) => (request, responder),
Poll::Pending => panic!("expected something"),
_ => panic!("got something unexpected!"),
};
assert_eq!(url_target, url_request.url);
url_request_responder.send(&mut response).expect("failed to send UrlResponse");
let complete = exec.run_until_stalled(&mut fut);
match complete {
Poll::Ready(result) => result,
Poll::Pending => panic!("future is pending and not ready"),
}
}
fn create_url_response(
error: Option<Box<HttpError>>,
body: Option<Box<UrlBody>>,
status_code: u32,
) -> http::UrlResponse {
http::UrlResponse {
error: error,
body: body,
url: None,
status_code: status_code,
status_line: None,
headers: None,
mime_type: None,
charset: None,
redirect_method: None,
redirect_url: None,
redirect_referrer: None,
}
}
fn poll_http_service_request(
exec: &mut fasync::Executor,
next_http_service_req: &mut StreamFuture<HttpServiceRequestStream>,
) -> Poll<HttpServiceRequest> {
exec.run_until_stalled(next_http_service_req).map(|(req, stream)| {
*next_http_service_req = stream.into_future();
req.expect("did not expect the HttpServiceRequestStream to end")
.expect("error polling http service request stream")
})
}
fn poll_url_loader_request(
exec: &mut fasync::Executor,
next_url_loader_req: &mut StreamFuture<UrlLoaderRequestStream>,
) -> Poll<UrlLoaderRequest> {
exec.run_until_stalled(next_url_loader_req).map(|(req, stream)| {
*next_url_loader_req = stream.into_future();
req.expect("did not expect the UrlLoaderRequestStream to end")
.expect("error polling url loader request stream")
})
}
fn create_http_service_util() -> (HttpServiceProxy, HttpServiceRequestStream) {
let (proxy, server) = endpoints::create_proxy::<HttpServiceMarker>()
.expect("falied to create a http_service_channel for tests");
let server = server.into_stream().expect("failed to create a http_service response stream");
(proxy, server)
}
}
| {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "there are some bytes".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
let url_response = create_url_response(None, url_body, 200);
let request_result = trigger_download_with_supplied_response(url_req, url_response);
let download_result = request_result.expect("failed to get individual_download");
assert_eq!(download_result.bytes, expected_num_bytes);
} | identifier_body |
http_service_util.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{bail, Error, ResultExt},
fidl_fuchsia_net_oldhttp::{self as http, HttpServiceProxy},
fuchsia_async as fasync,
fuchsia_syslog::fx_log_info,
fuchsia_zircon as zx,
futures::io::{AllowStdIo, AsyncReadExt},
};
pub fn create_url_request<S: ToString>(url_string: S) -> http::UrlRequest {
http::UrlRequest {
url: url_string.to_string(),
method: String::from("GET"),
headers: None,
body: None,
response_body_buffer_size: 0,
auto_follow_redirects: true,
cache_mode: http::CacheMode::Default,
response_body_mode: http::ResponseBodyMode::Stream,
}
}
// Object to hold results of a single download
#[derive(Default)]
pub struct IndividualDownload {
pub bytes: u64,
pub nanos: u64,
pub goodput_mbps: f64,
}
// TODO (NET-1664): verify checksum on data received
pub async fn fetch_and_discard_url(
http_service: &HttpServiceProxy,
mut url_request: http::UrlRequest,
) -> Result<IndividualDownload, Error> {
// Create a UrlLoader instance
let (s, p) = zx::Channel::create().context("failed to create zx channel")?;
let proxy = fasync::Channel::from_channel(p).context("failed to make async channel")?;
let loader_server = fidl::endpoints::ServerEnd::<http::UrlLoaderMarker>::new(s);
http_service.create_url_loader(loader_server)?;
let loader_proxy = http::UrlLoaderProxy::new(proxy);
let start_time = zx::Time::get(zx::ClockId::Monotonic);
let response = loader_proxy.start(&mut url_request).await?;
if let Some(e) = response.error {
bail!("UrlLoaderProxy error - code:{} ({})", e.code, e.description.unwrap_or("".into()))
}
let socket = match response.body.map(|x| *x) {
Some(http::UrlBody::Stream(s)) => fasync::Socket::from_socket(s)?,
_ => {
bail!("failed to read UrlBody from the stream - error: {}", zx::Status::BAD_STATE);
}
};
// discard the bytes
let mut stdio_sink = AllowStdIo::new(::std::io::sink());
let bytes_received = socket.copy_into(&mut stdio_sink).await?;
let stop_time = zx::Time::get(zx::ClockId::Monotonic);
let time_nanos = (stop_time - start_time).into_nanos() as u64;
let time_seconds = time_nanos as f64 * 1e-9;
let bits_received = (bytes_received * 8) as f64;
fx_log_info!("Received {} bytes in {:.3} seconds", bytes_received, time_seconds);
if bytes_received < 1 {
bail!("Failed to download data from url! bytes_received = {}", bytes_received);
}
let megabits_per_sec = bits_received * 1e-6 / time_seconds;
let mut individual_download = IndividualDownload::default();
individual_download.goodput_mbps = megabits_per_sec;
individual_download.bytes = bytes_received;
individual_download.nanos = time_nanos;
Ok(individual_download)
}
#[cfg(test)]
mod tests {
use {
super::*,
fidl::endpoints,
//fidl::endpoints::RequestStream,
fidl_fuchsia_net_oldhttp as http,
fidl_fuchsia_net_oldhttp::HttpError,
fidl_fuchsia_net_oldhttp::{HttpServiceMarker, HttpServiceProxy},
fidl_fuchsia_net_oldhttp::{HttpServiceRequest, HttpServiceRequestStream},
fidl_fuchsia_net_oldhttp::{UrlBody, UrlRequest, UrlResponse},
fidl_fuchsia_net_oldhttp::{UrlLoaderRequest, UrlLoaderRequestStream},
fuchsia_async as fasync,
futures::stream::{StreamExt, StreamFuture},
futures::task::Poll,
pin_utils::pin_mut,
};
#[test]
fn verify_basic_url_request_creation() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
assert_eq!(url_req.url, test_url);
assert_eq!(url_req.method, "GET".to_string());
assert!(url_req.headers.is_none());
assert!(url_req.body.is_none());
assert_eq!(url_req.response_body_buffer_size, 0);
assert!(url_req.auto_follow_redirects);
assert_eq!(url_req.cache_mode, http::CacheMode::Default);
assert_eq!(url_req.response_body_mode, http::ResponseBodyMode::Stream);
}
#[test]
fn response_error_triggers_error_path() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
let url_response = create_url_response(None, None, 404);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn successful_download_returns_valid_indvidual_download_data() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "there are some bytes".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
let url_response = create_url_response(None, url_body, 200);
let request_result = trigger_download_with_supplied_response(url_req, url_response);
let download_result = request_result.expect("failed to get individual_download");
assert_eq!(download_result.bytes, expected_num_bytes);
}
#[test]
fn zero_byte_download_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
assert_eq!(expected_num_bytes, 0);
let url_response = create_url_response(None, url_body, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn null_response_body_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with 0 bytes downloaded
let url_response = create_url_response(None, None, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
fn | (
request: UrlRequest,
mut response: UrlResponse,
) -> Result<IndividualDownload, Error> {
let mut exec = fasync::Executor::new().expect("failed to create an executor");
let (http_service, server) = create_http_service_util();
let mut next_http_service_req = server.into_future();
let url_target = (&request).url.clone();
let fut = fetch_and_discard_url(&http_service, request);
pin_mut!(fut);
assert!(exec.run_until_stalled(&mut fut).is_pending());
let (url_loader_responder, _service_control_handle) =
match poll_http_service_request(&mut exec, &mut next_http_service_req) {
Poll::Ready(HttpServiceRequest::CreateUrlLoader { loader, control_handle }) => {
(loader, control_handle)
}
Poll::Pending => panic!("expected something"),
};
assert!(exec.run_until_stalled(&mut fut).is_pending());
let mut next_url_loader_req = url_loader_responder
.into_stream()
.expect("failed to create a url_loader response stream")
.into_future();
let (url_request, url_request_responder) =
match poll_url_loader_request(&mut exec, &mut next_url_loader_req) {
Poll::Ready(UrlLoaderRequest::Start { request, responder }) => (request, responder),
Poll::Pending => panic!("expected something"),
_ => panic!("got something unexpected!"),
};
assert_eq!(url_target, url_request.url);
url_request_responder.send(&mut response).expect("failed to send UrlResponse");
let complete = exec.run_until_stalled(&mut fut);
match complete {
Poll::Ready(result) => result,
Poll::Pending => panic!("future is pending and not ready"),
}
}
fn create_url_response(
error: Option<Box<HttpError>>,
body: Option<Box<UrlBody>>,
status_code: u32,
) -> http::UrlResponse {
http::UrlResponse {
error: error,
body: body,
url: None,
status_code: status_code,
status_line: None,
headers: None,
mime_type: None,
charset: None,
redirect_method: None,
redirect_url: None,
redirect_referrer: None,
}
}
fn poll_http_service_request(
exec: &mut fasync::Executor,
next_http_service_req: &mut StreamFuture<HttpServiceRequestStream>,
) -> Poll<HttpServiceRequest> {
exec.run_until_stalled(next_http_service_req).map(|(req, stream)| {
*next_http_service_req = stream.into_future();
req.expect("did not expect the HttpServiceRequestStream to end")
.expect("error polling http service request stream")
})
}
fn poll_url_loader_request(
exec: &mut fasync::Executor,
next_url_loader_req: &mut StreamFuture<UrlLoaderRequestStream>,
) -> Poll<UrlLoaderRequest> {
exec.run_until_stalled(next_url_loader_req).map(|(req, stream)| {
*next_url_loader_req = stream.into_future();
req.expect("did not expect the UrlLoaderRequestStream to end")
.expect("error polling url loader request stream")
})
}
fn create_http_service_util() -> (HttpServiceProxy, HttpServiceRequestStream) {
let (proxy, server) = endpoints::create_proxy::<HttpServiceMarker>()
.expect("falied to create a http_service_channel for tests");
let server = server.into_stream().expect("failed to create a http_service response stream");
(proxy, server)
}
}
| trigger_download_with_supplied_response | identifier_name |
http_service_util.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{bail, Error, ResultExt},
fidl_fuchsia_net_oldhttp::{self as http, HttpServiceProxy},
fuchsia_async as fasync,
fuchsia_syslog::fx_log_info,
fuchsia_zircon as zx,
futures::io::{AllowStdIo, AsyncReadExt},
};
pub fn create_url_request<S: ToString>(url_string: S) -> http::UrlRequest {
http::UrlRequest {
url: url_string.to_string(),
method: String::from("GET"),
headers: None,
body: None,
response_body_buffer_size: 0, |
// Object to hold results of a single download
#[derive(Default)]
pub struct IndividualDownload {
pub bytes: u64,
pub nanos: u64,
pub goodput_mbps: f64,
}
// TODO (NET-1664): verify checksum on data received
pub async fn fetch_and_discard_url(
http_service: &HttpServiceProxy,
mut url_request: http::UrlRequest,
) -> Result<IndividualDownload, Error> {
// Create a UrlLoader instance
let (s, p) = zx::Channel::create().context("failed to create zx channel")?;
let proxy = fasync::Channel::from_channel(p).context("failed to make async channel")?;
let loader_server = fidl::endpoints::ServerEnd::<http::UrlLoaderMarker>::new(s);
http_service.create_url_loader(loader_server)?;
let loader_proxy = http::UrlLoaderProxy::new(proxy);
let start_time = zx::Time::get(zx::ClockId::Monotonic);
let response = loader_proxy.start(&mut url_request).await?;
if let Some(e) = response.error {
bail!("UrlLoaderProxy error - code:{} ({})", e.code, e.description.unwrap_or("".into()))
}
let socket = match response.body.map(|x| *x) {
Some(http::UrlBody::Stream(s)) => fasync::Socket::from_socket(s)?,
_ => {
bail!("failed to read UrlBody from the stream - error: {}", zx::Status::BAD_STATE);
}
};
// discard the bytes
let mut stdio_sink = AllowStdIo::new(::std::io::sink());
let bytes_received = socket.copy_into(&mut stdio_sink).await?;
let stop_time = zx::Time::get(zx::ClockId::Monotonic);
let time_nanos = (stop_time - start_time).into_nanos() as u64;
let time_seconds = time_nanos as f64 * 1e-9;
let bits_received = (bytes_received * 8) as f64;
fx_log_info!("Received {} bytes in {:.3} seconds", bytes_received, time_seconds);
if bytes_received < 1 {
bail!("Failed to download data from url! bytes_received = {}", bytes_received);
}
let megabits_per_sec = bits_received * 1e-6 / time_seconds;
let mut individual_download = IndividualDownload::default();
individual_download.goodput_mbps = megabits_per_sec;
individual_download.bytes = bytes_received;
individual_download.nanos = time_nanos;
Ok(individual_download)
}
#[cfg(test)]
mod tests {
use {
super::*,
fidl::endpoints,
//fidl::endpoints::RequestStream,
fidl_fuchsia_net_oldhttp as http,
fidl_fuchsia_net_oldhttp::HttpError,
fidl_fuchsia_net_oldhttp::{HttpServiceMarker, HttpServiceProxy},
fidl_fuchsia_net_oldhttp::{HttpServiceRequest, HttpServiceRequestStream},
fidl_fuchsia_net_oldhttp::{UrlBody, UrlRequest, UrlResponse},
fidl_fuchsia_net_oldhttp::{UrlLoaderRequest, UrlLoaderRequestStream},
fuchsia_async as fasync,
futures::stream::{StreamExt, StreamFuture},
futures::task::Poll,
pin_utils::pin_mut,
};
#[test]
fn verify_basic_url_request_creation() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
assert_eq!(url_req.url, test_url);
assert_eq!(url_req.method, "GET".to_string());
assert!(url_req.headers.is_none());
assert!(url_req.body.is_none());
assert_eq!(url_req.response_body_buffer_size, 0);
assert!(url_req.auto_follow_redirects);
assert_eq!(url_req.cache_mode, http::CacheMode::Default);
assert_eq!(url_req.response_body_mode, http::ResponseBodyMode::Stream);
}
#[test]
fn response_error_triggers_error_path() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
let url_response = create_url_response(None, None, 404);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn successful_download_returns_valid_indvidual_download_data() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "there are some bytes".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
let url_response = create_url_response(None, url_body, 200);
let request_result = trigger_download_with_supplied_response(url_req, url_response);
let download_result = request_result.expect("failed to get individual_download");
assert_eq!(download_result.bytes, expected_num_bytes);
}
#[test]
fn zero_byte_download_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
assert_eq!(expected_num_bytes, 0);
let url_response = create_url_response(None, url_body, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn null_response_body_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with 0 bytes downloaded
let url_response = create_url_response(None, None, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
fn trigger_download_with_supplied_response(
request: UrlRequest,
mut response: UrlResponse,
) -> Result<IndividualDownload, Error> {
let mut exec = fasync::Executor::new().expect("failed to create an executor");
let (http_service, server) = create_http_service_util();
let mut next_http_service_req = server.into_future();
let url_target = (&request).url.clone();
let fut = fetch_and_discard_url(&http_service, request);
pin_mut!(fut);
assert!(exec.run_until_stalled(&mut fut).is_pending());
let (url_loader_responder, _service_control_handle) =
match poll_http_service_request(&mut exec, &mut next_http_service_req) {
Poll::Ready(HttpServiceRequest::CreateUrlLoader { loader, control_handle }) => {
(loader, control_handle)
}
Poll::Pending => panic!("expected something"),
};
assert!(exec.run_until_stalled(&mut fut).is_pending());
let mut next_url_loader_req = url_loader_responder
.into_stream()
.expect("failed to create a url_loader response stream")
.into_future();
let (url_request, url_request_responder) =
match poll_url_loader_request(&mut exec, &mut next_url_loader_req) {
Poll::Ready(UrlLoaderRequest::Start { request, responder }) => (request, responder),
Poll::Pending => panic!("expected something"),
_ => panic!("got something unexpected!"),
};
assert_eq!(url_target, url_request.url);
url_request_responder.send(&mut response).expect("failed to send UrlResponse");
let complete = exec.run_until_stalled(&mut fut);
match complete {
Poll::Ready(result) => result,
Poll::Pending => panic!("future is pending and not ready"),
}
}
fn create_url_response(
error: Option<Box<HttpError>>,
body: Option<Box<UrlBody>>,
status_code: u32,
) -> http::UrlResponse {
http::UrlResponse {
error: error,
body: body,
url: None,
status_code: status_code,
status_line: None,
headers: None,
mime_type: None,
charset: None,
redirect_method: None,
redirect_url: None,
redirect_referrer: None,
}
}
fn poll_http_service_request(
exec: &mut fasync::Executor,
next_http_service_req: &mut StreamFuture<HttpServiceRequestStream>,
) -> Poll<HttpServiceRequest> {
exec.run_until_stalled(next_http_service_req).map(|(req, stream)| {
*next_http_service_req = stream.into_future();
req.expect("did not expect the HttpServiceRequestStream to end")
.expect("error polling http service request stream")
})
}
fn poll_url_loader_request(
exec: &mut fasync::Executor,
next_url_loader_req: &mut StreamFuture<UrlLoaderRequestStream>,
) -> Poll<UrlLoaderRequest> {
exec.run_until_stalled(next_url_loader_req).map(|(req, stream)| {
*next_url_loader_req = stream.into_future();
req.expect("did not expect the UrlLoaderRequestStream to end")
.expect("error polling url loader request stream")
})
}
fn create_http_service_util() -> (HttpServiceProxy, HttpServiceRequestStream) {
let (proxy, server) = endpoints::create_proxy::<HttpServiceMarker>()
.expect("falied to create a http_service_channel for tests");
let server = server.into_stream().expect("failed to create a http_service response stream");
(proxy, server)
}
} | auto_follow_redirects: true,
cache_mode: http::CacheMode::Default,
response_body_mode: http::ResponseBodyMode::Stream,
}
} | random_line_split |
sumtree.rs | Actual data
Leaf(T::Sum),
/// Node with 2^n children
Internal {
lchild: Box<NodeData<T>>,
rchild: Box<NodeData<T>>,
sum: T::Sum,
},
}
impl<T: Summable> Summable for Node<T> {
type Sum = T::Sum;
fn sum(&self) -> T::Sum {
match *self {
Node::Pruned(ref sum) => sum.clone(),
Node::Leaf(ref sum) => sum.clone(),
Node::Internal { ref sum,.. } => sum.clone(),
}
}
}
#[derive(Clone)]
struct NodeData<T: Summable> {
full: bool,
node: Node<T>,
hash: Hash,
depth: u8,
}
impl<T: Summable> Summable for NodeData<T> {
type Sum = T::Sum;
fn sum(&self) -> T::Sum {
self.node.sum()
}
}
impl<T: Summable> NodeData<T> {
/// Get the root hash and sum of the node
fn root_sum(&self) -> (Hash, T::Sum) {
(self.hash, self.sum())
}
fn n_leaves(&self) -> usize {
if self.full {
1 << self.depth
} else {
if let Node::Internal {
ref lchild,
ref rchild,
..
} = self.node
{
lchild.n_leaves() + rchild.n_leaves()
} else {
unreachable!()
}
}
}
}
/// An insertion ordered merkle sum tree.
#[derive(Clone)]
pub struct SumTree<T: Summable + Writeable> {
/// Index mapping data to its index in the tree
index: HashMap<Hash, usize>,
/// Tree contents
root: Option<NodeData<T>>,
}
impl<T> SumTree<T>
where
T: Summable + Writeable,
{
/// Create a new empty tree
pub fn new() -> SumTree<T> {
SumTree {
index: HashMap::new(),
root: None,
}
}
/// Accessor for the tree's root
pub fn root_sum(&self) -> Option<(Hash, T::Sum)> {
self.root.as_ref().map(|node| node.root_sum())
}
fn insert_right_of(mut old: NodeData<T>, new: NodeData<T>) -> NodeData<T> {
assert!(old.depth >= new.depth);
// If we are inserting next to a full node, make a parent. If we're
// inserting a tree of equal depth then we get a full node, otherwise
// we get a partial node. Leaves and pruned data both count as full
// nodes.
if old.full {
let parent_depth = old.depth + 1;
let parent_sum = old.sum() + new.sum();
let parent_hash = (parent_depth, &parent_sum, old.hash, new.hash).hash();
let parent_full = old.depth == new.depth;
let parent_node = Node::Internal {
lchild: Box::new(old),
rchild: Box::new(new),
sum: parent_sum,
};
NodeData {
full: parent_full,
node: parent_node,
hash: parent_hash,
depth: parent_depth,
}
// If we are inserting next to a partial node, we should actually be
// inserting under the node, so we recurse. The right child of a partial
// node is always another partial node or a leaf.
} else {
if let Node::Internal {
ref lchild,
ref mut rchild,
ref mut sum,
} = old.node
{
// Recurse
let dummy_child = NodeData {
full: true,
node: Node::Pruned(sum.clone()),
hash: old.hash,
depth: 0,
};
let moved_rchild = mem::replace(&mut **rchild, dummy_child);
mem::replace(&mut **rchild, SumTree::insert_right_of(moved_rchild, new));
// Update this node's states to reflect the new right child
if rchild.full && rchild.depth == old.depth - 1 {
old.full = rchild.full;
}
*sum = lchild.sum() + rchild.sum();
old.hash = (old.depth, &*sum, lchild.hash, rchild.hash).hash();
} else {
unreachable!()
}
old
}
}
/// Accessor for number of elements (leaves) in the tree, not including
/// pruned ones.
pub fn len(&self) -> usize {
self.index.len()
}
/// Accessor for number of elements (leaves) in the tree, including pruned
/// ones.
pub fn unpruned_len(&self) -> usize {
match self.root {
None => 0,
Some(ref node) => node.n_leaves(),
}
}
/// Add an element to the tree. Returns true if the element was added,
/// false if it already existed in the tree.
pub fn push(&mut self, elem: T) -> bool {
// Compute element hash and depth-0 node hash
let index_hash = Hashed::hash(&elem);
let elem_sum = elem.sum();
let elem_hash = (0u8, &elem_sum, index_hash).hash();
if self.index.contains_key(&index_hash) {
return false;
}
// Special-case the first element
if self.root.is_none() {
self.root = Some(NodeData {
full: true,
node: Node::Leaf(elem_sum),
hash: elem_hash,
depth: 0,
});
self.index.insert(index_hash, 0);
return true;
}
// Next, move the old root out of the structure so that we are allowed to
// move it. We will move a new root back in at the end of the function
let old_root = mem::replace(&mut self.root, None).unwrap();
// Insert into tree, compute new root
let new_node = NodeData {
full: true,
node: Node::Leaf(elem_sum),
hash: elem_hash,
depth: 0,
};
// Put new root in place and record insertion
let index = old_root.n_leaves();
self.root = Some(SumTree::insert_right_of(old_root, new_node));
self.index.insert(index_hash, index);
true
}
fn replace_recurse(node: &mut NodeData<T>, index: usize, new_elem: T) {
assert!(index < (1 << node.depth));
if node.depth == 0 {
assert!(node.full);
node.hash = (0u8, new_elem.sum(), Hashed::hash(&new_elem)).hash();
node.node = Node::Leaf(new_elem.sum());
} else {
match node.node {
Node::Internal {
ref mut lchild,
ref mut rchild,
ref mut sum,
} => {
let bit = index & (1 << (node.depth - 1));
if bit > 0 {
SumTree::replace_recurse(rchild, index - bit, new_elem);
} else {
SumTree::replace_recurse(lchild, index, new_elem);
}
*sum = lchild.sum() + rchild.sum();
node.hash = (node.depth, &*sum, lchild.hash, rchild.hash).hash();
}
// Pruned data would not have been in the index
Node::Pruned(_) => unreachable!(),
Node::Leaf(_) => unreachable!(),
}
}
}
/// Replaces an element in the tree. Returns true if the element existed
/// and was replaced. Returns false if the old element did not exist or
/// if the new element already existed
pub fn replace(&mut self, elem: &T, new_elem: T) -> bool {
let index_hash = Hashed::hash(elem);
let root = match self.root {
Some(ref mut node) => node,
None => {
return false;
}
};
match self.index.remove(&index_hash) {
None => false,
Some(index) => {
let new_index_hash = Hashed::hash(&new_elem);
if self.index.contains_key(&new_index_hash) {
false
} else {
SumTree::replace_recurse(root, index, new_elem);
self.index.insert(new_index_hash, index);
true
}
}
}
}
/// Determine whether an element exists in the tree.
/// If so, return its index
pub fn contains(&self, elem: &T) -> Option<usize> {
let index_hash = Hashed::hash(elem);
self.index.get(&index_hash).map(|x| *x)
}
fn prune_recurse(node: &mut NodeData<T>, index: usize) {
assert!(index < (1 << node.depth));
if node.depth == 0 {
let sum = if let Node::Leaf(ref sum) = node.node {
sum.clone()
} else {
unreachable!()
};
node.node = Node::Pruned(sum);
} else {
let mut prune_me = None;
match node.node {
Node::Internal {
ref mut lchild,
ref mut rchild,
..
} => {
let bit = index & (1 << (node.depth - 1));
if bit > 0 {
SumTree::prune_recurse(rchild, index - bit);
} else {
SumTree::prune_recurse(lchild, index);
}
if let (&Node::Pruned(ref lsum), &Node::Pruned(ref rsum)) =
(&lchild.node, &rchild.node)
{
if node.full {
prune_me = Some(lsum.clone() + rsum.clone());
}
}
}
Node::Pruned(_) => {
// Already pruned. Ok.
}
Node::Leaf(_) => unreachable!(),
}
if let Some(sum) = prune_me {
node.node = Node::Pruned(sum);
}
}
}
/// Removes an element from storage, not affecting the tree
/// Returns true if the element was actually in the tree
pub fn prune(&mut self, elem: &T) -> bool {
let index_hash = Hashed::hash(elem);
let root = match self.root {
Some(ref mut node) => node,
None => {
return false;
}
};
match self.index.remove(&index_hash) {
None => false,
Some(index) => {
SumTree::prune_recurse(root, index);
true
}
}
}
fn | (node: &NodeData<T>) -> NodeData<T> {
if node.full {
// replaces full internal nodes, leaves and already pruned nodes are full
// as well
NodeData {
full: true,
node: Node::Pruned(node.sum()),
hash: node.hash,
depth: node.depth,
}
} else {
if let Node::Internal { ref lchild, ref rchild, ref sum } = node.node {
// just recurse on each side to get the pruned version
NodeData {
full: false,
node: Node::Internal {
lchild: Box::new(SumTree::clone_pruned_recurse(lchild)),
rchild: Box::new(SumTree::clone_pruned_recurse(rchild)),
sum: sum.clone(),
},
hash: node.hash,
depth: node.depth,
}
} else {
unreachable!()
}
}
}
/// Minimal clone of this tree, replacing all full nodes with a pruned node,
/// therefore only copying non-full subtrees.
pub fn clone_pruned(&self) -> SumTree<T> {
match self.root {
Some(ref node) => {
SumTree {
index: HashMap::new(),
root: Some(SumTree::clone_pruned_recurse(node)),
}
},
None => SumTree::new(),
}
}
// TODO push_many, truncate to allow bulk updates
}
// A SumTree is encoded as follows: an empty tree is the single byte 0x00.
// An nonempty tree is encoded recursively by encoding its root node. Each
// node is encoded as follows:
// flag: two bits, 01 for partial, 10 for full, 11 for pruned
// 00 is reserved so that the 0 byte can uniquely specify an empty tree
// depth: six bits, zero indicates a leaf
// hash: 32 bytes
// sum: <length of sum encoding>
//
// For a leaf, this is followed by an encoding of the element. For an
// internal node, the left child is encoded followed by the right child.
// For a pruned internal node, it is followed by nothing.
//
impl<T> Writeable for SumTree<T>
where
T: Summable + Writeable,
{
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
match self.root {
None => writer.write_u8(0),
Some(ref node) => node.write(writer),
}
}
}
impl<T> Writeable for NodeData<T>
where
T: Summable + Writeable,
{
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
assert!(self.depth < 64);
// Compute depth byte: 0x80 means full, 0xc0 means unpruned
let mut depth = 0;
if self.full {
depth |= 0x80;
}
if let Node::Pruned(_) = self.node {
} else {
depth |= 0xc0;
}
depth |= self.depth;
// Encode node
try!(writer.write_u8(depth));
try!(self.hash.write(writer));
match self.node {
Node::Pruned(ref sum) => sum.write(writer),
Node::Leaf(ref sum) => sum.write(writer),
Node::Internal {
ref lchild,
ref rchild,
ref sum,
} => {
try!(sum.write(writer));
try!(lchild.write(writer));
rchild.write(writer)
}
}
}
}
fn node_read_recurse<T>(
reader: &mut Reader,
index: &mut HashMap<Hash, usize>,
tree_index: &mut usize,
) -> Result<NodeData<T>, ser::Error>
where
T: Summable + Readable + Hashed,
{
// Read depth byte
let depth = try!(reader.read_u8());
let full = depth & 0x80 == 0x80;
let pruned = depth & 0xc0!= 0xc0;
let depth = depth & 0x3f;
// Sanity-check for zero byte
if pruned &&!full {
return Err(ser::Error::CorruptedData);
}
// Read remainder of node
let hash = try!(Readable::read(reader));
let sum = try!(Readable::read(reader));
let data = match (depth, pruned) {
(_, true) => {
*tree_index += 1 << depth as usize;
Node::Pruned(sum)
}
(0, _) => {
index.insert(hash, *tree_index);
*tree_index += 1;
Node::Leaf(sum)
}
(_, _) => {
Node::Internal {
lchild: Box::new(try!(node_read_recurse(reader, index, tree_index))),
rchild: Box::new(try!(node_read_recurse(reader, index, tree_index))),
sum: sum,
}
}
};
Ok(NodeData {
full: full,
node: data,
hash: hash,
depth: depth,
})
}
impl<T> Readable for SumTree<T>
where
T: Summable + Writeable + Readable + Hashed,
{
fn read(reader: &mut Reader) -> Result<SumTree<T>, ser::Error> {
// Read depth byte of root node
let depth = try!(reader.read_u8());
let full = depth & 0x80 == 0x80;
let pruned = depth & 0xc0!= 0xc0;
let depth = depth & 0x3f;
// Special-case the zero byte
if pruned &&!full {
return Ok(SumTree {
index: HashMap::new(),
root: None,
});
}
// Otherwise continue reading it
let mut index = HashMap::new();
let hash = try!(Readable::read(reader));
let sum = try!(Readable::read(reader));
let data = match (depth, pruned) {
(_, true) => Node::Pruned(sum),
(0, _) => Node::Leaf(sum),
(_, _) => {
let mut tree_index = 0;
Node::Internal {
lchild: Box::new(try!(node_read_recurse(reader, &mut index, &mut tree_index))),
rchild: Box::new(try!(node_read_recurse(reader, &mut index, &mut tree_index))),
sum: sum,
}
}
};
Ok(SumTree {
index: index,
root: Some(NodeData {
full: full,
node: data,
hash: hash,
depth: depth,
}),
})
}
}
/// This is used to as a scratch space during root calculation so that we can
/// keep everything on the stack in a fixed-size array. It reflects a maximum
/// tree capacity of 2^48, which is not practically reachable.
const MAX_MMR_HEIGHT: usize = 48;
/// This algorithm is based on Peter Todd's in
/// https://github.com/opentimestamps/opentimestamps-server/blob/master/python-opentimestamps/opentimestamps/core/timestamp.py#L324
///
fn compute_peaks<S, I>(iter: I, peaks: &mut [Option<(u8, Hash, S)>])
where
S: Clone + ops::Add<Output = S> + Writeable,
I: Iterator<Item = (u8, Hash, S)>,
{
for peak in peaks.iter_mut() {
*peak = None;
}
for (mut new_depth, mut new_hash, mut new_sum) in iter {
let mut index = 0;
while let Some((old_depth, old_hash, old_sum)) = peaks[index].take() {
// Erase current peak (done by `take()` above), then combine
// it with the new addition, to be inserted one higher
index += 1;
new_depth = old_depth + 1;
new_sum = old_sum.clone() + new_sum.clone();
new_hash = (new_depth, &new_sum, old_hash, new_hash).hash();
}
peaks[index] = Some((new_depth, new_hash, new_sum));
}
}
/// Directly compute the Merkle root of a sum-tree whose contents are given
/// explicitly in the passed iterator.
pub fn compute_root<'a, T, I>(iter: I) -> Option<(Hash, T::Sum)>
where
T: 'a + Summable + Writeable,
I: Iterator<Item = &'a T>,
{
let mut peaks = vec![None; MAX_MMR_HEIGHT];
compute_peaks(
iter.map(|elem| {
let depth = 0u8;
let sum = elem.sum();
let hash = (depth, &sum, Hashed::hash(elem)).hash();
(depth, hash, sum)
}),
&mut peaks,
);
let mut ret = None;
for peak in peaks {
ret = match (peak, ret) {
(None, x) => x,
(Some((_, hash, sum)), None) => Some((hash, sum)),
(Some((depth, lhash, lsum)), Some((rhash, rsum))) => {
let sum = lsum + rsum;
let hash = (depth + 1, &sum, lhash, rhash).hash();
Some((hash, sum))
}
};
}
ret
}
// a couple functions that help debugging
#[allow(dead_code)]
fn print_node<T>(node: &NodeData<T>, tab_level: usize)
where
T: Summable + Writeable,
T::Sum: std::fmt::Debug,
{
for _ in 0..tab_level {
print!(" ");
}
print!("[{:03}] {} {:?}", node.depth, node.hash, node.sum());
match node.node {
Node::Pruned(_) => println!(" X"),
Node::Leaf(_) => println!(" L"),
Node::Internal {
ref lchild,
ref rchild,
..
} => {
println!(":");
print_node(lchild, tab_level + 1);
print_node(rchild, tab_level + 1);
}
}
}
#[allow(dead_code)]
#[allow(missing_docs)]
pub fn print_tree<T>(tree: &SumTree<T>)
where
T: Summable + Writeable,
T::Sum: std::fmt::Debug,
{
match tree.root {
None => println!("[empty tree]"),
Some(ref node) => {
print_node(node, 0);
}
}
}
#[cfg(test)]
mod test {
use rand::{thread_rng, Rng};
use core::hash::Hashed;
use ser;
use super::*;
#[derive(Copy, Clone, Debug)]
struct TestElem([u32; 4]);
impl Summable for TestElem {
type Sum = u64;
fn sum(&self) -> u64 {
// sums are not allowed to overflow, so we use this simple
// non-injective "sum" function that will still be homomorphic
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 +
self.0[3] as u64
}
}
impl Writeable for TestElem {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
try!(writer.write_u32(self.0[0]));
try!(writer.write_u32(self.0[1]));
try!(writer.write_u32(self.0[2]));
writer.write_u32(self.0[3])
}
}
fn sumtree_create_(prune: bool) {
let mut tree = SumTree::new();
macro_rules! leaf {
($data: expr) => ({
(0u8, $data.sum(), $data.hash())
})
};
macro_rules! node {
($left: expr, $right: expr) => (
($left.0 + 1, $left.1 + $right.1, $left.hash(), $right.hash())
)
};
macro_rules! prune {
($prune: expr, $tree: expr, $elem: expr) => {
if $prune {
assert_eq!($tree.len(), 1);
$tree.prune(&$elem);
assert_eq!($tree.len(), 0);
// double-pruning shouldn't hurt anything
$tree.prune(&$elem);
assert_eq!($tree.len(), 0);
} else {
assert_eq!($tree.len(), $tree.unpruned_len());
}
}
};
let mut elems = [
TestElem([0, 0, 0, 1]),
TestElem([0, 0, 0, 2]),
TestElem([0, 0, 0, 3]),
TestElem([0, 0, 0, 4]),
TestElem([0, 0, 0, 5]),
TestElem([0, 0, 0, 6]),
TestElem([0, 0, 0, 7]),
TestElem([1, 0, 0, 0]),
];
assert_eq!(tree.root_sum(), None);
assert_eq!(tree.root_sum(), compute_root(elems[0..0].iter()));
assert_eq!(tree.len(), 0);
assert_eq!(tree.contains(&elems[0]), None);
assert!(tree.push(elems[0]));
assert_eq!(tree.contains(&elems[0]), Some(0));
// One element
let expected = leaf!(elems[0]).hash();
assert_eq!(tree.root_sum(), Some((expected, 1)));
assert_eq!(tree.root_sum(), compute_root(elems[0..1].iter()));
assert_eq!(tree.unpruned_len(), 1);
prune!(prune, tree, elems[0]);
// Two elements
assert_eq!(tree.contains(&elems[1]), None);
assert!(tree.push(elems[1]));
assert_eq!(tree.contains(&elems[1]), Some(1));
let expected = node!(leaf!(elems[0]), leaf!(elems[1])).hash();
assert_eq!(tree.root_sum(), Some((expected, 3)));
assert_eq!(tree.root_sum(), compute_root(elems[0..2].iter()));
assert_eq!(tree.unpruned_len(), 2);
prune!(prune, tree, elems[1]);
// Three elements
assert_eq!(tree.contains(&elems[2]), None);
assert!(tree.push(elems[2]));
assert_eq!(tree.contains(&elems[2]), Some(2));
let expected = node!(node!(leaf!(elems[0]), leaf!(elems[1])), leaf!(elems[2])).hash();
assert_eq!(tree.root_sum(), Some((expected, 6)));
assert_eq!(tree.root_sum(), compute_root(elems[0..3].iter()));
assert_eq!(tree.unpruned_len(), 3);
prune!(prune, tree, elems[2]);
// Four elements
assert_eq!(tree.contains(&elems[3]), None);
assert!(tree.push(elems[3]));
assert_eq!(tree.contains(&elems[3]), Some(3));
let expected = node!(
node!(leaf!(elems[0]), leaf!(elems[1])),
node!(leaf!(elems[2]), leaf!(elems[3]))
).hash();
assert_eq!(tree.root_sum(), Some((expected, 10)));
assert_eq!(tree.root_sum(), compute_root(elems[0..4].iter()));
assert_eq!(tree.unpruned_len(), 4);
prune!(prune, tree, elems[3]);
// Five elements
assert_eq!(tree.contains(&elems[4]), None);
assert!(tree.push(elems[4]));
assert_eq!(tree.contains(&elems[4]), Some(4));
let expected = node!(
node!(
node!(leaf!(elems[0]), leaf!(elems[1])),
node!(leaf!(elems[2]), leaf!(elems[3]))
),
leaf!(elems[4])
).hash();
assert_eq!(tree.root_sum(), Some((expected, 15)));
assert_eq!(tree.root_sum(), compute_root(elems[0..5].iter()));
assert_eq!(tree.unpruned_len(), 5);
prune!(prune, tree, elems[4]);
// Six elements
assert_eq!(tree.contains(&elems[5]), None);
assert!(tree.push(elems[5]));
assert_eq!(tree.contains(&elems[5]), Some(5));
let expected = node!(
node!(
node!(leaf!(elems[0]), leaf!(elems[1])),
node!(leaf!(elems[2]), leaf!(elems[3]))
),
node!(leaf!(elems[4]), leaf!(elems[5]))
).hash();
assert_eq!(tree.root_sum(), Some((expected, 21)));
assert_eq!(tree.root_sum(), compute_ | clone_pruned_recurse | identifier_name |
sumtree.rs | /// Actual data
Leaf(T::Sum),
/// Node with 2^n children
Internal {
lchild: Box<NodeData<T>>,
rchild: Box<NodeData<T>>,
sum: T::Sum,
},
}
impl<T: Summable> Summable for Node<T> {
type Sum = T::Sum;
fn sum(&self) -> T::Sum {
match *self {
Node::Pruned(ref sum) => sum.clone(),
Node::Leaf(ref sum) => sum.clone(),
Node::Internal { ref sum,.. } => sum.clone(),
}
}
}
#[derive(Clone)]
struct NodeData<T: Summable> {
full: bool,
node: Node<T>,
hash: Hash,
depth: u8,
}
impl<T: Summable> Summable for NodeData<T> {
type Sum = T::Sum;
fn sum(&self) -> T::Sum {
self.node.sum()
}
}
impl<T: Summable> NodeData<T> {
/// Get the root hash and sum of the node
fn root_sum(&self) -> (Hash, T::Sum) {
(self.hash, self.sum())
}
fn n_leaves(&self) -> usize {
if self.full {
1 << self.depth
} else {
if let Node::Internal {
ref lchild,
ref rchild,
..
} = self.node
{
lchild.n_leaves() + rchild.n_leaves()
} else {
unreachable!()
}
}
}
}
/// An insertion ordered merkle sum tree.
#[derive(Clone)]
pub struct SumTree<T: Summable + Writeable> {
/// Index mapping data to its index in the tree
index: HashMap<Hash, usize>,
/// Tree contents
root: Option<NodeData<T>>,
}
impl<T> SumTree<T>
where
T: Summable + Writeable,
{
/// Create a new empty tree
pub fn new() -> SumTree<T> {
SumTree {
index: HashMap::new(),
root: None,
}
}
/// Accessor for the tree's root
pub fn root_sum(&self) -> Option<(Hash, T::Sum)> {
self.root.as_ref().map(|node| node.root_sum())
}
fn insert_right_of(mut old: NodeData<T>, new: NodeData<T>) -> NodeData<T> {
assert!(old.depth >= new.depth);
// If we are inserting next to a full node, make a parent. If we're
// inserting a tree of equal depth then we get a full node, otherwise
// we get a partial node. Leaves and pruned data both count as full
// nodes.
if old.full {
let parent_depth = old.depth + 1;
let parent_sum = old.sum() + new.sum();
let parent_hash = (parent_depth, &parent_sum, old.hash, new.hash).hash();
let parent_full = old.depth == new.depth;
let parent_node = Node::Internal {
lchild: Box::new(old),
rchild: Box::new(new),
sum: parent_sum,
};
NodeData {
full: parent_full,
node: parent_node,
hash: parent_hash,
depth: parent_depth,
}
// If we are inserting next to a partial node, we should actually be
// inserting under the node, so we recurse. The right child of a partial
// node is always another partial node or a leaf.
} else {
if let Node::Internal {
ref lchild,
ref mut rchild,
ref mut sum,
} = old.node
{
// Recurse
let dummy_child = NodeData {
full: true,
node: Node::Pruned(sum.clone()),
hash: old.hash,
depth: 0,
};
let moved_rchild = mem::replace(&mut **rchild, dummy_child);
mem::replace(&mut **rchild, SumTree::insert_right_of(moved_rchild, new));
// Update this node's states to reflect the new right child
if rchild.full && rchild.depth == old.depth - 1 {
old.full = rchild.full;
}
*sum = lchild.sum() + rchild.sum();
old.hash = (old.depth, &*sum, lchild.hash, rchild.hash).hash();
} else {
unreachable!()
}
old
}
}
/// Accessor for number of elements (leaves) in the tree, not including
/// pruned ones.
pub fn len(&self) -> usize {
self.index.len()
}
/// Accessor for number of elements (leaves) in the tree, including pruned
/// ones.
pub fn unpruned_len(&self) -> usize {
match self.root {
None => 0,
Some(ref node) => node.n_leaves(),
}
}
/// Add an element to the tree. Returns true if the element was added,
/// false if it already existed in the tree.
pub fn push(&mut self, elem: T) -> bool {
// Compute element hash and depth-0 node hash
let index_hash = Hashed::hash(&elem);
let elem_sum = elem.sum();
let elem_hash = (0u8, &elem_sum, index_hash).hash();
if self.index.contains_key(&index_hash) {
return false;
}
// Special-case the first element
if self.root.is_none() {
self.root = Some(NodeData {
full: true,
node: Node::Leaf(elem_sum),
hash: elem_hash,
depth: 0,
});
self.index.insert(index_hash, 0);
return true;
}
// Next, move the old root out of the structure so that we are allowed to
// move it. We will move a new root back in at the end of the function
let old_root = mem::replace(&mut self.root, None).unwrap();
// Insert into tree, compute new root
let new_node = NodeData {
full: true,
node: Node::Leaf(elem_sum),
hash: elem_hash,
depth: 0,
};
// Put new root in place and record insertion
let index = old_root.n_leaves();
self.root = Some(SumTree::insert_right_of(old_root, new_node));
self.index.insert(index_hash, index);
true
}
fn replace_recurse(node: &mut NodeData<T>, index: usize, new_elem: T) {
assert!(index < (1 << node.depth));
if node.depth == 0 {
assert!(node.full);
node.hash = (0u8, new_elem.sum(), Hashed::hash(&new_elem)).hash();
node.node = Node::Leaf(new_elem.sum());
} else {
match node.node {
Node::Internal {
ref mut lchild,
ref mut rchild,
ref mut sum,
} => {
let bit = index & (1 << (node.depth - 1));
if bit > 0 {
SumTree::replace_recurse(rchild, index - bit, new_elem);
} else {
SumTree::replace_recurse(lchild, index, new_elem);
}
*sum = lchild.sum() + rchild.sum();
node.hash = (node.depth, &*sum, lchild.hash, rchild.hash).hash();
}
// Pruned data would not have been in the index
Node::Pruned(_) => unreachable!(),
Node::Leaf(_) => unreachable!(),
}
}
}
/// Replaces an element in the tree. Returns true if the element existed
/// and was replaced. Returns false if the old element did not exist or
/// if the new element already existed
pub fn replace(&mut self, elem: &T, new_elem: T) -> bool {
let index_hash = Hashed::hash(elem);
let root = match self.root {
Some(ref mut node) => node,
None => {
return false;
}
};
match self.index.remove(&index_hash) {
None => false,
Some(index) => {
let new_index_hash = Hashed::hash(&new_elem);
if self.index.contains_key(&new_index_hash) {
false
} else {
SumTree::replace_recurse(root, index, new_elem);
self.index.insert(new_index_hash, index);
true
}
}
}
}
/// Determine whether an element exists in the tree.
/// If so, return its index
pub fn contains(&self, elem: &T) -> Option<usize> {
let index_hash = Hashed::hash(elem);
self.index.get(&index_hash).map(|x| *x)
}
fn prune_recurse(node: &mut NodeData<T>, index: usize) {
assert!(index < (1 << node.depth));
if node.depth == 0 {
let sum = if let Node::Leaf(ref sum) = node.node {
sum.clone()
} else {
unreachable!()
};
node.node = Node::Pruned(sum);
} else {
let mut prune_me = None;
match node.node {
Node::Internal {
ref mut lchild,
ref mut rchild,
..
} => {
let bit = index & (1 << (node.depth - 1));
if bit > 0 {
SumTree::prune_recurse(rchild, index - bit);
} else {
SumTree::prune_recurse(lchild, index);
}
if let (&Node::Pruned(ref lsum), &Node::Pruned(ref rsum)) =
(&lchild.node, &rchild.node)
{
if node.full {
prune_me = Some(lsum.clone() + rsum.clone());
}
}
}
Node::Pruned(_) => {
// Already pruned. Ok.
}
Node::Leaf(_) => unreachable!(),
}
if let Some(sum) = prune_me {
node.node = Node::Pruned(sum);
}
}
}
/// Removes an element from storage, not affecting the tree
/// Returns true if the element was actually in the tree
pub fn prune(&mut self, elem: &T) -> bool {
let index_hash = Hashed::hash(elem);
let root = match self.root {
Some(ref mut node) => node,
None => {
return false;
}
};
match self.index.remove(&index_hash) {
None => false,
Some(index) => {
SumTree::prune_recurse(root, index);
true
}
}
}
fn clone_pruned_recurse(node: &NodeData<T>) -> NodeData<T> {
if node.full {
// replaces full internal nodes, leaves and already pruned nodes are full
// as well
NodeData {
full: true,
node: Node::Pruned(node.sum()),
hash: node.hash,
depth: node.depth,
}
} else {
if let Node::Internal { ref lchild, ref rchild, ref sum } = node.node {
// just recurse on each side to get the pruned version
NodeData {
full: false,
node: Node::Internal {
lchild: Box::new(SumTree::clone_pruned_recurse(lchild)),
rchild: Box::new(SumTree::clone_pruned_recurse(rchild)),
sum: sum.clone(),
},
hash: node.hash,
depth: node.depth,
}
} else {
unreachable!()
}
}
}
/// Minimal clone of this tree, replacing all full nodes with a pruned node,
/// therefore only copying non-full subtrees.
pub fn clone_pruned(&self) -> SumTree<T> {
match self.root {
Some(ref node) => {
SumTree {
index: HashMap::new(),
root: Some(SumTree::clone_pruned_recurse(node)),
}
},
None => SumTree::new(),
}
}
// TODO push_many, truncate to allow bulk updates
}
// A SumTree is encoded as follows: an empty tree is the single byte 0x00.
// An nonempty tree is encoded recursively by encoding its root node. Each
// node is encoded as follows:
// flag: two bits, 01 for partial, 10 for full, 11 for pruned
// 00 is reserved so that the 0 byte can uniquely specify an empty tree
// depth: six bits, zero indicates a leaf
// hash: 32 bytes
// sum: <length of sum encoding>
//
// For a leaf, this is followed by an encoding of the element. For an
// internal node, the left child is encoded followed by the right child.
// For a pruned internal node, it is followed by nothing.
//
impl<T> Writeable for SumTree<T>
where
T: Summable + Writeable,
{
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
match self.root {
None => writer.write_u8(0),
Some(ref node) => node.write(writer),
}
}
}
impl<T> Writeable for NodeData<T>
where
T: Summable + Writeable,
{
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
assert!(self.depth < 64);
// Compute depth byte: 0x80 means full, 0xc0 means unpruned
let mut depth = 0;
if self.full {
depth |= 0x80;
}
if let Node::Pruned(_) = self.node {
} else {
depth |= 0xc0;
}
depth |= self.depth;
// Encode node
try!(writer.write_u8(depth));
try!(self.hash.write(writer));
match self.node {
Node::Pruned(ref sum) => sum.write(writer),
Node::Leaf(ref sum) => sum.write(writer),
Node::Internal {
ref lchild,
ref rchild,
ref sum,
} => {
try!(sum.write(writer));
try!(lchild.write(writer));
rchild.write(writer)
}
}
}
}
fn node_read_recurse<T>(
reader: &mut Reader,
index: &mut HashMap<Hash, usize>,
tree_index: &mut usize,
) -> Result<NodeData<T>, ser::Error>
where
T: Summable + Readable + Hashed,
{
// Read depth byte
let depth = try!(reader.read_u8());
let full = depth & 0x80 == 0x80;
let pruned = depth & 0xc0!= 0xc0;
let depth = depth & 0x3f;
// Sanity-check for zero byte
if pruned &&!full {
return Err(ser::Error::CorruptedData);
}
// Read remainder of node
let hash = try!(Readable::read(reader));
let sum = try!(Readable::read(reader));
let data = match (depth, pruned) {
(_, true) => {
*tree_index += 1 << depth as usize;
Node::Pruned(sum)
}
(0, _) => {
index.insert(hash, *tree_index);
*tree_index += 1;
Node::Leaf(sum)
}
(_, _) => {
Node::Internal {
lchild: Box::new(try!(node_read_recurse(reader, index, tree_index))),
rchild: Box::new(try!(node_read_recurse(reader, index, tree_index))),
sum: sum,
}
}
};
Ok(NodeData {
full: full,
node: data,
hash: hash,
depth: depth,
})
}
impl<T> Readable for SumTree<T>
where
T: Summable + Writeable + Readable + Hashed,
{
fn read(reader: &mut Reader) -> Result<SumTree<T>, ser::Error> {
// Read depth byte of root node
let depth = try!(reader.read_u8());
let full = depth & 0x80 == 0x80;
let pruned = depth & 0xc0!= 0xc0;
let depth = depth & 0x3f;
// Special-case the zero byte
if pruned &&!full {
return Ok(SumTree {
index: HashMap::new(),
root: None,
});
}
// Otherwise continue reading it
let mut index = HashMap::new();
let hash = try!(Readable::read(reader));
let sum = try!(Readable::read(reader));
let data = match (depth, pruned) {
(_, true) => Node::Pruned(sum),
(0, _) => Node::Leaf(sum),
(_, _) => {
let mut tree_index = 0;
Node::Internal {
lchild: Box::new(try!(node_read_recurse(reader, &mut index, &mut tree_index))),
rchild: Box::new(try!(node_read_recurse(reader, &mut index, &mut tree_index))),
sum: sum,
}
}
};
Ok(SumTree {
index: index,
root: Some(NodeData {
full: full,
node: data,
hash: hash,
depth: depth,
}),
})
}
}
/// This is used to as a scratch space during root calculation so that we can
/// keep everything on the stack in a fixed-size array. It reflects a maximum
/// tree capacity of 2^48, which is not practically reachable.
const MAX_MMR_HEIGHT: usize = 48;
/// This algorithm is based on Peter Todd's in
/// https://github.com/opentimestamps/opentimestamps-server/blob/master/python-opentimestamps/opentimestamps/core/timestamp.py#L324
///
fn compute_peaks<S, I>(iter: I, peaks: &mut [Option<(u8, Hash, S)>])
where
S: Clone + ops::Add<Output = S> + Writeable,
I: Iterator<Item = (u8, Hash, S)>,
{
for peak in peaks.iter_mut() {
*peak = None;
}
for (mut new_depth, mut new_hash, mut new_sum) in iter {
let mut index = 0;
while let Some((old_depth, old_hash, old_sum)) = peaks[index].take() {
// Erase current peak (done by `take()` above), then combine
// it with the new addition, to be inserted one higher
index += 1;
new_depth = old_depth + 1;
new_sum = old_sum.clone() + new_sum.clone();
new_hash = (new_depth, &new_sum, old_hash, new_hash).hash();
}
peaks[index] = Some((new_depth, new_hash, new_sum));
}
}
/// Directly compute the Merkle root of a sum-tree whose contents are given
/// explicitly in the passed iterator.
pub fn compute_root<'a, T, I>(iter: I) -> Option<(Hash, T::Sum)>
where
T: 'a + Summable + Writeable,
I: Iterator<Item = &'a T>,
{
let mut peaks = vec![None; MAX_MMR_HEIGHT];
compute_peaks(
iter.map(|elem| {
let depth = 0u8;
let sum = elem.sum();
let hash = (depth, &sum, Hashed::hash(elem)).hash();
(depth, hash, sum)
}),
&mut peaks,
);
let mut ret = None;
for peak in peaks {
ret = match (peak, ret) {
(None, x) => x,
(Some((_, hash, sum)), None) => Some((hash, sum)),
(Some((depth, lhash, lsum)), Some((rhash, rsum))) => {
let sum = lsum + rsum;
let hash = (depth + 1, &sum, lhash, rhash).hash();
Some((hash, sum))
}
};
}
ret
}
// a couple functions that help debugging
#[allow(dead_code)]
fn print_node<T>(node: &NodeData<T>, tab_level: usize)
where
T: Summable + Writeable,
T::Sum: std::fmt::Debug,
{
for _ in 0..tab_level {
print!(" ");
}
print!("[{:03}] {} {:?}", node.depth, node.hash, node.sum());
match node.node {
Node::Pruned(_) => println!(" X"),
Node::Leaf(_) => println!(" L"),
Node::Internal {
ref lchild,
ref rchild,
..
} => {
println!(":");
print_node(lchild, tab_level + 1);
print_node(rchild, tab_level + 1);
}
}
}
#[allow(dead_code)]
#[allow(missing_docs)]
pub fn print_tree<T>(tree: &SumTree<T>)
where
T: Summable + Writeable,
T::Sum: std::fmt::Debug,
{
match tree.root {
None => println!("[empty tree]"),
Some(ref node) => {
print_node(node, 0);
}
}
}
#[cfg(test)]
mod test {
use rand::{thread_rng, Rng};
use core::hash::Hashed;
use ser;
use super::*;
#[derive(Copy, Clone, Debug)]
struct TestElem([u32; 4]);
impl Summable for TestElem {
type Sum = u64;
fn sum(&self) -> u64 {
// sums are not allowed to overflow, so we use this simple
// non-injective "sum" function that will still be homomorphic
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 +
self.0[3] as u64
}
}
impl Writeable for TestElem {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
try!(writer.write_u32(self.0[0]));
try!(writer.write_u32(self.0[1]));
try!(writer.write_u32(self.0[2]));
writer.write_u32(self.0[3])
}
}
fn sumtree_create_(prune: bool) {
let mut tree = SumTree::new();
macro_rules! leaf {
($data: expr) => ({
(0u8, $data.sum(), $data.hash())
})
};
macro_rules! node {
($left: expr, $right: expr) => (
($left.0 + 1, $left.1 + $right.1, $left.hash(), $right.hash())
)
};
macro_rules! prune {
($prune: expr, $tree: expr, $elem: expr) => {
if $prune {
assert_eq!($tree.len(), 1);
$tree.prune(&$elem);
assert_eq!($tree.len(), 0);
// double-pruning shouldn't hurt anything
$tree.prune(&$elem);
assert_eq!($tree.len(), 0);
} else {
assert_eq!($tree.len(), $tree.unpruned_len());
}
}
};
let mut elems = [
TestElem([0, 0, 0, 1]), | TestElem([0, 0, 0, 3]),
TestElem([0, 0, 0, 4]),
TestElem([0, 0, 0, 5]),
TestElem([0, 0, 0, 6]),
TestElem([0, 0, 0, 7]),
TestElem([1, 0, 0, 0]),
];
assert_eq!(tree.root_sum(), None);
assert_eq!(tree.root_sum(), compute_root(elems[0..0].iter()));
assert_eq!(tree.len(), 0);
assert_eq!(tree.contains(&elems[0]), None);
assert!(tree.push(elems[0]));
assert_eq!(tree.contains(&elems[0]), Some(0));
// One element
let expected = leaf!(elems[0]).hash();
assert_eq!(tree.root_sum(), Some((expected, 1)));
assert_eq!(tree.root_sum(), compute_root(elems[0..1].iter()));
assert_eq!(tree.unpruned_len(), 1);
prune!(prune, tree, elems[0]);
// Two elements
assert_eq!(tree.contains(&elems[1]), None);
assert!(tree.push(elems[1]));
assert_eq!(tree.contains(&elems[1]), Some(1));
let expected = node!(leaf!(elems[0]), leaf!(elems[1])).hash();
assert_eq!(tree.root_sum(), Some((expected, 3)));
assert_eq!(tree.root_sum(), compute_root(elems[0..2].iter()));
assert_eq!(tree.unpruned_len(), 2);
prune!(prune, tree, elems[1]);
// Three elements
assert_eq!(tree.contains(&elems[2]), None);
assert!(tree.push(elems[2]));
assert_eq!(tree.contains(&elems[2]), Some(2));
let expected = node!(node!(leaf!(elems[0]), leaf!(elems[1])), leaf!(elems[2])).hash();
assert_eq!(tree.root_sum(), Some((expected, 6)));
assert_eq!(tree.root_sum(), compute_root(elems[0..3].iter()));
assert_eq!(tree.unpruned_len(), 3);
prune!(prune, tree, elems[2]);
// Four elements
assert_eq!(tree.contains(&elems[3]), None);
assert!(tree.push(elems[3]));
assert_eq!(tree.contains(&elems[3]), Some(3));
let expected = node!(
node!(leaf!(elems[0]), leaf!(elems[1])),
node!(leaf!(elems[2]), leaf!(elems[3]))
).hash();
assert_eq!(tree.root_sum(), Some((expected, 10)));
assert_eq!(tree.root_sum(), compute_root(elems[0..4].iter()));
assert_eq!(tree.unpruned_len(), 4);
prune!(prune, tree, elems[3]);
// Five elements
assert_eq!(tree.contains(&elems[4]), None);
assert!(tree.push(elems[4]));
assert_eq!(tree.contains(&elems[4]), Some(4));
let expected = node!(
node!(
node!(leaf!(elems[0]), leaf!(elems[1])),
node!(leaf!(elems[2]), leaf!(elems[3]))
),
leaf!(elems[4])
).hash();
assert_eq!(tree.root_sum(), Some((expected, 15)));
assert_eq!(tree.root_sum(), compute_root(elems[0..5].iter()));
assert_eq!(tree.unpruned_len(), 5);
prune!(prune, tree, elems[4]);
// Six elements
assert_eq!(tree.contains(&elems[5]), None);
assert!(tree.push(elems[5]));
assert_eq!(tree.contains(&elems[5]), Some(5));
let expected = node!(
node!(
node!(leaf!(elems[0]), leaf!(elems[1])),
node!(leaf!(elems[2]), leaf!(elems[3]))
),
node!(leaf!(elems[4]), leaf!(elems[5]))
).hash();
assert_eq!(tree.root_sum(), Some((expected, 21)));
assert_eq!(tree.root_sum(), compute_root( | TestElem([0, 0, 0, 2]), | random_line_split |
ply.rs | file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::color;
use crate::errors::*;
use crate::Point;
use byteorder::{ByteOrder, LittleEndian};
use cgmath::Vector3;
use num_traits::identities::Zero;
use std::fs::File;
use std::io::{BufRead, BufReader, Seek, SeekFrom};
use std::ops::Index;
use std::path::Path;
use std::str;
#[derive(Debug)]
struct | {
format: Format,
elements: Vec<Element>,
offset: Vector3<f64>,
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum DataType {
Int8,
Uint8,
Int16,
Uint16,
Int32,
Uint32,
Float32,
Float64,
}
impl DataType {
fn from_str(input: &str) -> Result<Self> {
match input {
"float" | "float32" => Ok(DataType::Float32),
"double" | "float64" => Ok(DataType::Float64),
"char" | "int8" => Ok(DataType::Int8),
"uchar" | "uint8" => Ok(DataType::Uint8),
"short" | "int16" => Ok(DataType::Int16),
"ushort" | "uint16" => Ok(DataType::Uint16),
"int" | "int32" => Ok(DataType::Int32),
"uint" | "uint32" => Ok(DataType::Uint32),
_ => Err(ErrorKind::InvalidInput(format!("Invalid data type: {}", input)).into()),
}
}
}
impl Header {
fn has_element(&self, name: &str) -> bool {
self.elements.iter().any(|e| e.name == name)
}
}
impl<'a> Index<&'a str> for Header {
type Output = Element;
fn index(&self, name: &'a str) -> &Self::Output {
for element in &self.elements {
if element.name == name {
return element;
}
}
panic!("Element {} does not exist.", name);
}
}
#[derive(Debug, PartialEq)]
enum Format {
BinaryLittleEndianV1,
BinaryBigEndianV1,
AsciiV1,
}
// TODO(hrapp): Maybe support list properties too?
#[derive(Debug)]
struct ScalarProperty {
name: String,
data_type: DataType,
}
#[derive(Debug)]
struct Element {
name: String,
count: i64,
properties: Vec<ScalarProperty>,
}
impl<'a> Index<&'a str> for Element {
type Output = ScalarProperty;
fn index(&self, name: &'a str) -> &Self::Output {
for p in &self.properties {
if p.name == name {
return p;
}
}
panic!("Property does not exist!")
}
}
fn parse_header<R: BufRead>(reader: &mut R) -> Result<(Header, usize)> {
use crate::errors::ErrorKind::InvalidInput;
let mut header_len = 0;
let mut line = String::new();
header_len += reader.read_line(&mut line)?;
if line.trim()!= "ply" {
return Err(InvalidInput("Not a PLY file".to_string()).into());
}
let mut format = None;
let mut current_element = None;
let mut offset = Vector3::zero();
let mut elements = Vec::new();
loop {
line.clear();
header_len += reader.read_line(&mut line)?;
let entries: Vec<&str> = line.trim().split_whitespace().collect();
match entries[0] {
"format" if entries.len() == 3 => {
if entries[2]!= "1.0" {
return Err(InvalidInput(format!("Invalid version: {}", entries[2])).into());
}
format = Some(match entries[1] {
"ascii" => Format::AsciiV1,
"binary_little_endian" => Format::BinaryLittleEndianV1,
"binary_big_endian" => Format::BinaryBigEndianV1,
_ => return Err(InvalidInput(format!("Invalid format: {}", entries[1])).into()),
});
}
"element" if entries.len() == 3 => {
if let Some(element) = current_element.take() {
elements.push(element);
}
current_element = Some(Element {
name: entries[1].to_string(),
count: entries[2]
.parse::<i64>()
.chain_err(|| InvalidInput(format!("Invalid count: {}", entries[2])))?,
properties: Vec::new(),
});
}
"property" => {
if current_element.is_none() {
return Err(
InvalidInput(format!("property outside of element: {}", line)).into(),
);
};
let property = match entries[1] {
"list" if entries.len() == 5 => {
// We do not support list properties.
continue;
}
data_type_str if entries.len() == 3 => {
let data_type = DataType::from_str(data_type_str)?;
ScalarProperty {
name: entries[2].to_string(),
data_type,
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
};
current_element.as_mut().unwrap().properties.push(property);
}
"end_header" => break,
"comment" => {
if entries.len() == 5 && entries[1] == "offset:" {
let x = entries[2]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[2])))?;
let y = entries[3]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[3])))?;
let z = entries[4]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[4])))?;
offset = Vector3::new(x, y, z)
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
}
}
if let Some(element) = current_element {
elements.push(element);
}
if format.is_none() {
return Err(InvalidInput("No format specified".into()).into());
}
Ok((
Header {
elements,
format: format.unwrap(),
offset,
},
header_len,
))
}
type ReadingFn = fn(nread: &mut usize, buf: &[u8], val: &mut Point);
// The two macros create a 'ReadingFn' that reads a value of '$data_type' out of a reader, and
// calls '$assign' with it while casting it to the correct type. I did not find a way of doing this
// purely using generic programming, so I resorted to this macro.
macro_rules! create_and_return_reading_fn {
($assign:expr, $size:ident, $num_bytes:expr, $reading_fn:expr) => {{
$size += $num_bytes;
|nread: &mut usize, buf: &[u8], point: &mut Point| {
#[allow(clippy::cast_lossless)]
$assign(point, $reading_fn(buf) as _);
*nread += $num_bytes;
}
}};
}
macro_rules! read_casted_property {
($data_type:expr, $assign:expr, &mut $size:ident) => {
match $data_type {
DataType::Uint8 => {
create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0])
}
DataType::Int8 => create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0]),
DataType::Uint16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_u16)
}
DataType::Int16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_i16)
}
DataType::Uint32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_u32)
}
DataType::Int32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_i32)
}
DataType::Float32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_f32)
}
DataType::Float64 => {
create_and_return_reading_fn!($assign, $size, 8, LittleEndian::read_f64)
}
}
};
}
// Similar to 'create_and_return_reading_fn', but creates a function that just advances the read
// pointer.
macro_rules! create_skip_fn {
(&mut $size:ident, $num_bytes:expr) => {{
$size += $num_bytes;
fn _read_fn(nread: &mut usize, _: &[u8], _: &mut Point) {
*nread += $num_bytes;
}
_read_fn
}};
}
/// Abstraction to read binary points from ply files into points.
pub struct PlyIterator {
reader: BufReader<File>,
readers: Vec<ReadingFn>,
pub num_total_points: i64,
offset: Vector3<f64>,
point_count: usize,
}
impl PlyIterator {
pub fn from_file<P: AsRef<Path>>(ply_file: P) -> Result<Self> {
let mut file = File::open(ply_file).chain_err(|| "Could not open input file.")?;
let mut reader = BufReader::new(file);
let (header, header_len) = parse_header(&mut reader)?;
file = reader.into_inner();
file.seek(SeekFrom::Start(header_len as u64))?;
if!header.has_element("vertex") {
panic!("Header does not have element'vertex'");
}
if header.format!= Format::BinaryLittleEndianV1 {
panic!("Unsupported PLY format: {:?}", header.format);
}
let vertex = &header["vertex"];
let mut seen_x = false;
let mut seen_y = false;
let mut seen_z = false;
let mut readers: Vec<ReadingFn> = Vec::new();
let mut num_bytes_per_point = 0;
for prop in &vertex.properties {
match &prop.name as &str {
"x" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.x = val,
&mut num_bytes_per_point
));
seen_x = true;
}
"y" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.y = val,
&mut num_bytes_per_point
));
seen_y = true;
}
"z" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.z = val,
&mut num_bytes_per_point
));
seen_z = true;
}
"r" | "red" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: u8| p.color.red = val,
&mut num_bytes_per_point
));
}
"g" | "green" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: u8| p.color.green = val,
&mut num_bytes_per_point
));
}
"b" | "blue" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: u8| p.color.blue = val,
&mut num_bytes_per_point
));
}
"intensity" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val| p.intensity = Some(val),
&mut num_bytes_per_point
));
}
other => {
println!("Will ignore property '{}' on'vertex'.", other);
use self::DataType::*;
match prop.data_type {
Uint8 | Int8 => readers.push(create_skip_fn!(&mut num_bytes_per_point, 1)),
Uint16 | Int16 => {
readers.push(create_skip_fn!(&mut num_bytes_per_point, 2))
}
Uint32 | Int32 | Float32 => {
readers.push(create_skip_fn!(&mut num_bytes_per_point, 4))
}
Float64 => readers.push(create_skip_fn!(&mut num_bytes_per_point, 8)),
}
}
}
}
if!seen_x ||!seen_y ||!seen_z {
panic!("PLY must contain properties 'x', 'y', 'z' for'vertex'.");
}
// We align the buffer of this 'BufReader' to points, so that we can index this buffer and know
// that it will always contain full points to parse.
Ok(PlyIterator {
reader: BufReader::with_capacity(num_bytes_per_point * 1024, file),
readers,
num_total_points: header["vertex"].count,
offset: header.offset,
point_count: 0,
})
}
}
impl Iterator for PlyIterator {
type Item = Point;
fn size_hint(&self) -> (usize, Option<usize>) {
let size = self.num_total_points as usize;
(size, Some(size))
}
fn next(&mut self) -> Option<Point> {
if self.point_count == self.num_total_points as usize {
return None;
}
let mut point = Point {
position: Vector3::zero(),
color: color::WHITE.to_u8(),
intensity: None,
};
let mut nread = 0;
// We made sure before that the internal buffer of'reader' is aligned to the number of
// bytes for a single point, therefore we can access it here and know that we can always
// read into it and are sure that it contains at least a full point.
{
let buf = self.reader.fill_buf().unwrap();
for r in &self.readers {
let cnread = nread;
r(&mut nread, &buf[cnread..], &mut point);
}
}
point.position += self.offset;
self.reader.consume(nread);
self.point_count += 1;
Some(point)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn points_from_file<P: AsRef<Path>>(path: P) -> Vec<Point> {
let iterator = PlyIterator::from_file(path).unwrap();
let mut points = Vec::new();
iterator.for_each(|p| {
points.push(p);
});
points
}
#[test]
fn test_xyz_f32_rgb_u8_le() {
let points = points_from_file("src/test_data/xyz_f32_rgb_u8_le.ply");
assert_eq!(8, points.len());
assert_eq!(points[0].position.x, 1.);
assert_eq!(points[7].position.x, 22.);
assert_eq!(points[0].color.red, 255);
assert_eq!(points[7].color.red, 234);
}
#[test]
fn test_xyz_f32_rgba_u8_le() {
let points = points_from_file("src/test_data/xyz_f32_rgba_u8_le.ply");
assert_eq!(8, points.len());
assert_eq!(points[0].position.x, 1.);
assert_eq!(points[7].position.x, 22.);
assert_eq!(points[0].color.red, 255);
assert_eq!(points[7].color.red, 227);
}
#[test]
fn test_xyz_f32_rgb_u8_intensity_f32_le() {
// All intensities in this file are NaN, but set.
let points = points_from_file("src/test_data/xyz_f32_rgb_u8_intensity_f32.ply");
assert_eq!(8, points.len());
assert_eq!(points[0].position.x, 1.);
assert!(points[0].intensity.is_some());
assert_eq!(points[7].position | Header | identifier_name |
ply.rs | this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::color;
use crate::errors::*;
use crate::Point;
use byteorder::{ByteOrder, LittleEndian};
use cgmath::Vector3;
use num_traits::identities::Zero;
use std::fs::File;
use std::io::{BufRead, BufReader, Seek, SeekFrom};
use std::ops::Index;
use std::path::Path;
use std::str;
#[derive(Debug)]
struct Header {
format: Format,
elements: Vec<Element>,
offset: Vector3<f64>,
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum DataType {
Int8,
Uint8,
Int16,
Uint16,
Int32,
Uint32,
Float32,
Float64,
}
impl DataType {
fn from_str(input: &str) -> Result<Self> {
match input {
"float" | "float32" => Ok(DataType::Float32),
"double" | "float64" => Ok(DataType::Float64),
"char" | "int8" => Ok(DataType::Int8),
"uchar" | "uint8" => Ok(DataType::Uint8),
"short" | "int16" => Ok(DataType::Int16),
"ushort" | "uint16" => Ok(DataType::Uint16),
"int" | "int32" => Ok(DataType::Int32),
"uint" | "uint32" => Ok(DataType::Uint32),
_ => Err(ErrorKind::InvalidInput(format!("Invalid data type: {}", input)).into()),
}
}
}
impl Header {
fn has_element(&self, name: &str) -> bool {
self.elements.iter().any(|e| e.name == name)
}
}
impl<'a> Index<&'a str> for Header {
type Output = Element;
fn index(&self, name: &'a str) -> &Self::Output {
for element in &self.elements {
if element.name == name {
return element;
}
}
panic!("Element {} does not exist.", name);
}
}
#[derive(Debug, PartialEq)]
enum Format {
BinaryLittleEndianV1,
BinaryBigEndianV1,
AsciiV1,
}
// TODO(hrapp): Maybe support list properties too?
#[derive(Debug)]
struct ScalarProperty {
name: String,
data_type: DataType,
}
#[derive(Debug)]
struct Element {
name: String,
count: i64,
properties: Vec<ScalarProperty>,
}
impl<'a> Index<&'a str> for Element {
type Output = ScalarProperty;
fn index(&self, name: &'a str) -> &Self::Output {
for p in &self.properties {
if p.name == name {
return p;
}
}
panic!("Property does not exist!")
}
}
fn parse_header<R: BufRead>(reader: &mut R) -> Result<(Header, usize)> {
use crate::errors::ErrorKind::InvalidInput;
let mut header_len = 0;
let mut line = String::new();
header_len += reader.read_line(&mut line)?;
if line.trim()!= "ply" {
return Err(InvalidInput("Not a PLY file".to_string()).into());
}
let mut format = None;
let mut current_element = None;
let mut offset = Vector3::zero();
let mut elements = Vec::new();
loop {
line.clear();
header_len += reader.read_line(&mut line)?;
let entries: Vec<&str> = line.trim().split_whitespace().collect();
match entries[0] {
"format" if entries.len() == 3 => {
if entries[2]!= "1.0" {
return Err(InvalidInput(format!("Invalid version: {}", entries[2])).into());
}
format = Some(match entries[1] {
"ascii" => Format::AsciiV1,
"binary_little_endian" => Format::BinaryLittleEndianV1,
"binary_big_endian" => Format::BinaryBigEndianV1,
_ => return Err(InvalidInput(format!("Invalid format: {}", entries[1])).into()),
});
}
"element" if entries.len() == 3 => {
if let Some(element) = current_element.take() {
elements.push(element);
}
current_element = Some(Element {
name: entries[1].to_string(),
count: entries[2]
.parse::<i64>()
.chain_err(|| InvalidInput(format!("Invalid count: {}", entries[2])))?,
properties: Vec::new(),
});
}
"property" => {
if current_element.is_none() {
return Err(
InvalidInput(format!("property outside of element: {}", line)).into(),
);
};
let property = match entries[1] {
"list" if entries.len() == 5 => {
// We do not support list properties.
continue;
}
data_type_str if entries.len() == 3 => {
let data_type = DataType::from_str(data_type_str)?;
ScalarProperty {
name: entries[2].to_string(),
data_type,
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
};
current_element.as_mut().unwrap().properties.push(property);
}
"end_header" => break,
"comment" => {
if entries.len() == 5 && entries[1] == "offset:" {
let x = entries[2]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[2])))?;
let y = entries[3]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[3])))?;
let z = entries[4]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[4])))?;
offset = Vector3::new(x, y, z)
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
}
}
if let Some(element) = current_element {
elements.push(element);
}
if format.is_none() {
return Err(InvalidInput("No format specified".into()).into());
}
Ok((
Header {
elements,
format: format.unwrap(),
offset,
},
header_len,
))
}
type ReadingFn = fn(nread: &mut usize, buf: &[u8], val: &mut Point);
// The two macros create a 'ReadingFn' that reads a value of '$data_type' out of a reader, and
// calls '$assign' with it while casting it to the correct type. I did not find a way of doing this
// purely using generic programming, so I resorted to this macro.
macro_rules! create_and_return_reading_fn {
($assign:expr, $size:ident, $num_bytes:expr, $reading_fn:expr) => {{
$size += $num_bytes;
|nread: &mut usize, buf: &[u8], point: &mut Point| {
#[allow(clippy::cast_lossless)]
$assign(point, $reading_fn(buf) as _);
*nread += $num_bytes;
}
}};
}
macro_rules! read_casted_property {
($data_type:expr, $assign:expr, &mut $size:ident) => {
match $data_type {
DataType::Uint8 => {
create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0])
}
DataType::Int8 => create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0]),
DataType::Uint16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_u16)
}
DataType::Int16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_i16)
}
DataType::Uint32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_u32)
}
DataType::Int32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_i32)
}
DataType::Float32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_f32) | create_and_return_reading_fn!($assign, $size, 8, LittleEndian::read_f64)
}
}
};
}
// Similar to 'create_and_return_reading_fn', but creates a function that just advances the read
// pointer.
macro_rules! create_skip_fn {
(&mut $size:ident, $num_bytes:expr) => {{
$size += $num_bytes;
fn _read_fn(nread: &mut usize, _: &[u8], _: &mut Point) {
*nread += $num_bytes;
}
_read_fn
}};
}
/// Abstraction to read binary points from ply files into points.
pub struct PlyIterator {
reader: BufReader<File>,
readers: Vec<ReadingFn>,
pub num_total_points: i64,
offset: Vector3<f64>,
point_count: usize,
}
impl PlyIterator {
pub fn from_file<P: AsRef<Path>>(ply_file: P) -> Result<Self> {
let mut file = File::open(ply_file).chain_err(|| "Could not open input file.")?;
let mut reader = BufReader::new(file);
let (header, header_len) = parse_header(&mut reader)?;
file = reader.into_inner();
file.seek(SeekFrom::Start(header_len as u64))?;
if!header.has_element("vertex") {
panic!("Header does not have element'vertex'");
}
if header.format!= Format::BinaryLittleEndianV1 {
panic!("Unsupported PLY format: {:?}", header.format);
}
let vertex = &header["vertex"];
let mut seen_x = false;
let mut seen_y = false;
let mut seen_z = false;
let mut readers: Vec<ReadingFn> = Vec::new();
let mut num_bytes_per_point = 0;
for prop in &vertex.properties {
match &prop.name as &str {
"x" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.x = val,
&mut num_bytes_per_point
));
seen_x = true;
}
"y" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.y = val,
&mut num_bytes_per_point
));
seen_y = true;
}
"z" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.z = val,
&mut num_bytes_per_point
));
seen_z = true;
}
"r" | "red" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: u8| p.color.red = val,
&mut num_bytes_per_point
));
}
"g" | "green" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: u8| p.color.green = val,
&mut num_bytes_per_point
));
}
"b" | "blue" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: u8| p.color.blue = val,
&mut num_bytes_per_point
));
}
"intensity" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val| p.intensity = Some(val),
&mut num_bytes_per_point
));
}
other => {
println!("Will ignore property '{}' on'vertex'.", other);
use self::DataType::*;
match prop.data_type {
Uint8 | Int8 => readers.push(create_skip_fn!(&mut num_bytes_per_point, 1)),
Uint16 | Int16 => {
readers.push(create_skip_fn!(&mut num_bytes_per_point, 2))
}
Uint32 | Int32 | Float32 => {
readers.push(create_skip_fn!(&mut num_bytes_per_point, 4))
}
Float64 => readers.push(create_skip_fn!(&mut num_bytes_per_point, 8)),
}
}
}
}
if!seen_x ||!seen_y ||!seen_z {
panic!("PLY must contain properties 'x', 'y', 'z' for'vertex'.");
}
// We align the buffer of this 'BufReader' to points, so that we can index this buffer and know
// that it will always contain full points to parse.
Ok(PlyIterator {
reader: BufReader::with_capacity(num_bytes_per_point * 1024, file),
readers,
num_total_points: header["vertex"].count,
offset: header.offset,
point_count: 0,
})
}
}
impl Iterator for PlyIterator {
type Item = Point;
fn size_hint(&self) -> (usize, Option<usize>) {
let size = self.num_total_points as usize;
(size, Some(size))
}
fn next(&mut self) -> Option<Point> {
if self.point_count == self.num_total_points as usize {
return None;
}
let mut point = Point {
position: Vector3::zero(),
color: color::WHITE.to_u8(),
intensity: None,
};
let mut nread = 0;
// We made sure before that the internal buffer of'reader' is aligned to the number of
// bytes for a single point, therefore we can access it here and know that we can always
// read into it and are sure that it contains at least a full point.
{
let buf = self.reader.fill_buf().unwrap();
for r in &self.readers {
let cnread = nread;
r(&mut nread, &buf[cnread..], &mut point);
}
}
point.position += self.offset;
self.reader.consume(nread);
self.point_count += 1;
Some(point)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn points_from_file<P: AsRef<Path>>(path: P) -> Vec<Point> {
let iterator = PlyIterator::from_file(path).unwrap();
let mut points = Vec::new();
iterator.for_each(|p| {
points.push(p);
});
points
}
#[test]
fn test_xyz_f32_rgb_u8_le() {
let points = points_from_file("src/test_data/xyz_f32_rgb_u8_le.ply");
assert_eq!(8, points.len());
assert_eq!(points[0].position.x, 1.);
assert_eq!(points[7].position.x, 22.);
assert_eq!(points[0].color.red, 255);
assert_eq!(points[7].color.red, 234);
}
#[test]
fn test_xyz_f32_rgba_u8_le() {
let points = points_from_file("src/test_data/xyz_f32_rgba_u8_le.ply");
assert_eq!(8, points.len());
assert_eq!(points[0].position.x, 1.);
assert_eq!(points[7].position.x, 22.);
assert_eq!(points[0].color.red, 255);
assert_eq!(points[7].color.red, 227);
}
#[test]
fn test_xyz_f32_rgb_u8_intensity_f32_le() {
// All intensities in this file are NaN, but set.
let points = points_from_file("src/test_data/xyz_f32_rgb_u8_intensity_f32.ply");
assert_eq!(8, points.len());
assert_eq!(points[0].position.x, 1.);
assert!(points[0].intensity.is_some());
assert_eq!(points[7].position.x | }
DataType::Float64 => { | random_line_split |
ply.rs | file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::color;
use crate::errors::*;
use crate::Point;
use byteorder::{ByteOrder, LittleEndian};
use cgmath::Vector3;
use num_traits::identities::Zero;
use std::fs::File;
use std::io::{BufRead, BufReader, Seek, SeekFrom};
use std::ops::Index;
use std::path::Path;
use std::str;
#[derive(Debug)]
struct Header {
format: Format,
elements: Vec<Element>,
offset: Vector3<f64>,
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum DataType {
Int8,
Uint8,
Int16,
Uint16,
Int32,
Uint32,
Float32,
Float64,
}
impl DataType {
fn from_str(input: &str) -> Result<Self> {
match input {
"float" | "float32" => Ok(DataType::Float32),
"double" | "float64" => Ok(DataType::Float64),
"char" | "int8" => Ok(DataType::Int8),
"uchar" | "uint8" => Ok(DataType::Uint8),
"short" | "int16" => Ok(DataType::Int16),
"ushort" | "uint16" => Ok(DataType::Uint16),
"int" | "int32" => Ok(DataType::Int32),
"uint" | "uint32" => Ok(DataType::Uint32),
_ => Err(ErrorKind::InvalidInput(format!("Invalid data type: {}", input)).into()),
}
}
}
impl Header {
fn has_element(&self, name: &str) -> bool {
self.elements.iter().any(|e| e.name == name)
}
}
impl<'a> Index<&'a str> for Header {
type Output = Element;
fn index(&self, name: &'a str) -> &Self::Output {
for element in &self.elements {
if element.name == name {
return element;
}
}
panic!("Element {} does not exist.", name);
}
}
#[derive(Debug, PartialEq)]
enum Format {
BinaryLittleEndianV1,
BinaryBigEndianV1,
AsciiV1,
}
// TODO(hrapp): Maybe support list properties too?
#[derive(Debug)]
struct ScalarProperty {
name: String,
data_type: DataType,
}
#[derive(Debug)]
struct Element {
name: String,
count: i64,
properties: Vec<ScalarProperty>,
}
impl<'a> Index<&'a str> for Element {
type Output = ScalarProperty;
fn index(&self, name: &'a str) -> &Self::Output {
for p in &self.properties {
if p.name == name {
return p;
}
}
panic!("Property does not exist!")
}
}
fn parse_header<R: BufRead>(reader: &mut R) -> Result<(Header, usize)> {
use crate::errors::ErrorKind::InvalidInput;
let mut header_len = 0;
let mut line = String::new();
header_len += reader.read_line(&mut line)?;
if line.trim()!= "ply" {
return Err(InvalidInput("Not a PLY file".to_string()).into());
}
let mut format = None;
let mut current_element = None;
let mut offset = Vector3::zero();
let mut elements = Vec::new();
loop {
line.clear();
header_len += reader.read_line(&mut line)?;
let entries: Vec<&str> = line.trim().split_whitespace().collect();
match entries[0] {
"format" if entries.len() == 3 => {
if entries[2]!= "1.0" {
return Err(InvalidInput(format!("Invalid version: {}", entries[2])).into());
}
format = Some(match entries[1] {
"ascii" => Format::AsciiV1,
"binary_little_endian" => Format::BinaryLittleEndianV1,
"binary_big_endian" => Format::BinaryBigEndianV1,
_ => return Err(InvalidInput(format!("Invalid format: {}", entries[1])).into()),
});
}
"element" if entries.len() == 3 => {
if let Some(element) = current_element.take() {
elements.push(element);
}
current_element = Some(Element {
name: entries[1].to_string(),
count: entries[2]
.parse::<i64>()
.chain_err(|| InvalidInput(format!("Invalid count: {}", entries[2])))?,
properties: Vec::new(),
});
}
"property" => | current_element.as_mut().unwrap().properties.push(property);
}
"end_header" => break,
"comment" => {
if entries.len() == 5 && entries[1] == "offset:" {
let x = entries[2]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[2])))?;
let y = entries[3]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[3])))?;
let z = entries[4]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[4])))?;
offset = Vector3::new(x, y, z)
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
}
}
if let Some(element) = current_element {
elements.push(element);
}
if format.is_none() {
return Err(InvalidInput("No format specified".into()).into());
}
Ok((
Header {
elements,
format: format.unwrap(),
offset,
},
header_len,
))
}
type ReadingFn = fn(nread: &mut usize, buf: &[u8], val: &mut Point);
// The two macros create a 'ReadingFn' that reads a value of '$data_type' out of a reader, and
// calls '$assign' with it while casting it to the correct type. I did not find a way of doing this
// purely using generic programming, so I resorted to this macro.
macro_rules! create_and_return_reading_fn {
($assign:expr, $size:ident, $num_bytes:expr, $reading_fn:expr) => {{
$size += $num_bytes;
|nread: &mut usize, buf: &[u8], point: &mut Point| {
#[allow(clippy::cast_lossless)]
$assign(point, $reading_fn(buf) as _);
*nread += $num_bytes;
}
}};
}
macro_rules! read_casted_property {
($data_type:expr, $assign:expr, &mut $size:ident) => {
match $data_type {
DataType::Uint8 => {
create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0])
}
DataType::Int8 => create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0]),
DataType::Uint16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_u16)
}
DataType::Int16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_i16)
}
DataType::Uint32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_u32)
}
DataType::Int32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_i32)
}
DataType::Float32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_f32)
}
DataType::Float64 => {
create_and_return_reading_fn!($assign, $size, 8, LittleEndian::read_f64)
}
}
};
}
// Similar to 'create_and_return_reading_fn', but creates a function that just advances the read
// pointer.
macro_rules! create_skip_fn {
(&mut $size:ident, $num_bytes:expr) => {{
$size += $num_bytes;
fn _read_fn(nread: &mut usize, _: &[u8], _: &mut Point) {
*nread += $num_bytes;
}
_read_fn
}};
}
/// Abstraction to read binary points from ply files into points.
pub struct PlyIterator {
reader: BufReader<File>,
readers: Vec<ReadingFn>,
pub num_total_points: i64,
offset: Vector3<f64>,
point_count: usize,
}
impl PlyIterator {
pub fn from_file<P: AsRef<Path>>(ply_file: P) -> Result<Self> {
let mut file = File::open(ply_file).chain_err(|| "Could not open input file.")?;
let mut reader = BufReader::new(file);
let (header, header_len) = parse_header(&mut reader)?;
file = reader.into_inner();
file.seek(SeekFrom::Start(header_len as u64))?;
if!header.has_element("vertex") {
panic!("Header does not have element'vertex'");
}
if header.format!= Format::BinaryLittleEndianV1 {
panic!("Unsupported PLY format: {:?}", header.format);
}
let vertex = &header["vertex"];
let mut seen_x = false;
let mut seen_y = false;
let mut seen_z = false;
let mut readers: Vec<ReadingFn> = Vec::new();
let mut num_bytes_per_point = 0;
for prop in &vertex.properties {
match &prop.name as &str {
"x" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.x = val,
&mut num_bytes_per_point
));
seen_x = true;
}
"y" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.y = val,
&mut num_bytes_per_point
));
seen_y = true;
}
"z" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.z = val,
&mut num_bytes_per_point
));
seen_z = true;
}
"r" | "red" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: u8| p.color.red = val,
&mut num_bytes_per_point
));
}
"g" | "green" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: u8| p.color.green = val,
&mut num_bytes_per_point
));
}
"b" | "blue" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: u8| p.color.blue = val,
&mut num_bytes_per_point
));
}
"intensity" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val| p.intensity = Some(val),
&mut num_bytes_per_point
));
}
other => {
println!("Will ignore property '{}' on'vertex'.", other);
use self::DataType::*;
match prop.data_type {
Uint8 | Int8 => readers.push(create_skip_fn!(&mut num_bytes_per_point, 1)),
Uint16 | Int16 => {
readers.push(create_skip_fn!(&mut num_bytes_per_point, 2))
}
Uint32 | Int32 | Float32 => {
readers.push(create_skip_fn!(&mut num_bytes_per_point, 4))
}
Float64 => readers.push(create_skip_fn!(&mut num_bytes_per_point, 8)),
}
}
}
}
if!seen_x ||!seen_y ||!seen_z {
panic!("PLY must contain properties 'x', 'y', 'z' for'vertex'.");
}
// We align the buffer of this 'BufReader' to points, so that we can index this buffer and know
// that it will always contain full points to parse.
Ok(PlyIterator {
reader: BufReader::with_capacity(num_bytes_per_point * 1024, file),
readers,
num_total_points: header["vertex"].count,
offset: header.offset,
point_count: 0,
})
}
}
impl Iterator for PlyIterator {
type Item = Point;
fn size_hint(&self) -> (usize, Option<usize>) {
let size = self.num_total_points as usize;
(size, Some(size))
}
fn next(&mut self) -> Option<Point> {
if self.point_count == self.num_total_points as usize {
return None;
}
let mut point = Point {
position: Vector3::zero(),
color: color::WHITE.to_u8(),
intensity: None,
};
let mut nread = 0;
// We made sure before that the internal buffer of'reader' is aligned to the number of
// bytes for a single point, therefore we can access it here and know that we can always
// read into it and are sure that it contains at least a full point.
{
let buf = self.reader.fill_buf().unwrap();
for r in &self.readers {
let cnread = nread;
r(&mut nread, &buf[cnread..], &mut point);
}
}
point.position += self.offset;
self.reader.consume(nread);
self.point_count += 1;
Some(point)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn points_from_file<P: AsRef<Path>>(path: P) -> Vec<Point> {
let iterator = PlyIterator::from_file(path).unwrap();
let mut points = Vec::new();
iterator.for_each(|p| {
points.push(p);
});
points
}
#[test]
fn test_xyz_f32_rgb_u8_le() {
let points = points_from_file("src/test_data/xyz_f32_rgb_u8_le.ply");
assert_eq!(8, points.len());
assert_eq!(points[0].position.x, 1.);
assert_eq!(points[7].position.x, 22.);
assert_eq!(points[0].color.red, 255);
assert_eq!(points[7].color.red, 234);
}
#[test]
fn test_xyz_f32_rgba_u8_le() {
let points = points_from_file("src/test_data/xyz_f32_rgba_u8_le.ply");
assert_eq!(8, points.len());
assert_eq!(points[0].position.x, 1.);
assert_eq!(points[7].position.x, 22.);
assert_eq!(points[0].color.red, 255);
assert_eq!(points[7].color.red, 227);
}
#[test]
fn test_xyz_f32_rgb_u8_intensity_f32_le() {
// All intensities in this file are NaN, but set.
let points = points_from_file("src/test_data/xyz_f32_rgb_u8_intensity_f32.ply");
assert_eq!(8, points.len());
assert_eq!(points[0].position.x, 1.);
assert!(points[0].intensity.is_some());
assert_eq!(points[7].position | {
if current_element.is_none() {
return Err(
InvalidInput(format!("property outside of element: {}", line)).into(),
);
};
let property = match entries[1] {
"list" if entries.len() == 5 => {
// We do not support list properties.
continue;
}
data_type_str if entries.len() == 3 => {
let data_type = DataType::from_str(data_type_str)?;
ScalarProperty {
name: entries[2].to_string(),
data_type,
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
}; | conditional_block |
main.rs | let (border, (bw, bh)) = (self.border, self.button_dim);
if y >= border && y < border + bh && x >= border && (x - border) % (bw + border) < bw {
Some((x - border) / (bw + border))
} else {
None
}
}
pub fn button_bounds(&self, i: usize) -> (i32, i32, i32, i32) {
let (border, (bw, bh)) = (self.border, self.button_dim);
let left = border + i * (bw + border);
let right = left + bw;
let top = border;
let bottom = top + bh;
(left as i32, right as i32, top as i32, bottom as i32)
}
}
#[derive(Debug, Clone, Copy)]
pub struct Argb(pub u32);
static ARGB_FORMAT_MSG: &str =
"Argb must be specified by a '#' followed by exactly 3, 4, 6, or 8 digits";
impl FromStr for Argb {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self> {
if!s.starts_with('#') ||!s[1..].chars().all(|c| c.is_ascii_hexdigit()) {
return Err(anyhow!(ARGB_FORMAT_MSG));
}
let s = &s[1..];
let dup = |s: &str| {
s.chars().fold(String::new(), |mut s, c| {
s.push(c);
s.push(c);
s
})
};
match s.len() {
8 => Ok(Argb(u32::from_str_radix(s, 16)?)),
6 => Ok(Argb(u32::from_str_radix(s, 16)? | 0xff000000)),
4 => Ok(Argb(u32::from_str_radix(&dup(s), 16)?)),
3 => Ok(Argb(u32::from_str_radix(&dup(s), 16)? | 0xff000000)),
_ => Err(anyhow!(ARGB_FORMAT_MSG)),
}
}
}
}
use conf::{Argb, Config};
use font::Font;
mod font {
use anyhow::{Context, Result};
use rusttype::{self, point, Font as rtFont, Point, PositionedGlyph, Scale};
#[derive(Debug)]
pub struct Font {
font: rtFont<'static>,
scale: Scale,
offset: Point<f32>,
}
#[derive(Debug)]
pub struct Glyphs<'f> {
glyphs: Vec<PositionedGlyph<'f>>,
pub width: f32,
pub height: f32,
}
impl Default for Font {
fn default() -> Self {
let font =
rtFont::try_from_bytes(include_bytes!("../SourceCodePro-Regular.otf") as &[u8])
.expect("Failed constructing a Font from bytes");
Font::new(font)
}
}
impl Font {
fn new(font: rtFont<'static>) -> Self {
let scale = Scale::uniform(40.0);
let v_metrics = font.v_metrics(scale);
let offset = point(0.0, v_metrics.ascent);
Font {
font,
scale,
offset,
}
}
pub fn load<P: AsRef<std::path::Path>>(name: &P) -> Result<Font> {
let bytes = std::fs::read(name)?;
let font = rtFont::try_from_vec(bytes).context("Failed loading the default font")?;
Ok(Self::new(font))
}
pub fn glyphs(&self, s: &str) -> Glyphs {
let glyphs: Vec<_> = self.font.layout(s, self.scale, self.offset).collect();
let width = glyphs
.last()
.map(|g| g.position().x as f32 + g.unpositioned().h_metrics().advance_width)
.unwrap_or(0.0);
Glyphs {
glyphs,
width,
height: self.scale.y,
}
}
}
impl<'f> Glyphs<'f> {
pub fn render(self, mut d: impl FnMut(usize, usize, u8)) {
let (width, height) = (self.width.ceil(), self.height.ceil());
self.glyphs
.iter()
.filter_map(|g| g.pixel_bounding_box().map(|bb| (g, bb)))
.for_each(|(g, bb)| {
g.draw(|x, y, v| {
let v = (v * 255.0).ceil() as u8;
let x = x as i32 + bb.min.x;
let y = y as i32 + bb.min.y;
if x >= 0 && x < width as i32 && y >= 0 && y < height as i32 {
d(x as usize, y as usize, v);
}
})
})
}
}
}
#[derive(Debug)]
struct Registry {
compositor: Main<WlCompositor>,
seat: Main<WlSeat>,
shm: Main<WlShm>,
wmbase: Main<XdgWmBase>,
layer_shell: Main<LayerShell>,
}
#[derive(Debug, Default)]
struct Pointer {
pos: Option<(f64, f64)>,
pos_prev: Option<(f64, f64)>,
btn: Option<wl_pointer::ButtonState>,
btn_prev: Option<wl_pointer::ButtonState>,
frame: bool,
}
#[derive(Debug)]
struct Surface {
wl: Main<WlSurface>,
layer: Main<LayerSurface>,
committed: bool,
configured: bool,
}
#[derive(Debug)]
struct Data {
cfg: Config,
registry: Registry,
ptr: Pointer,
seat_cap: wl_seat::Capability,
shm_formats: Vec<wl_shm::Format>,
buffer: ShmPixelBuffer,
surface: Surface,
rendered: bool,
}
impl Data {
fn new(cfg: Config, mut registry: Registry) -> Data {
let seat = &mut registry.seat;
filter!(seat, data,
wl_seat::Event::Capabilities{capabilities} => data.seat_cap = capabilities
);
let pointer = seat.get_pointer();
filter!(pointer, data,
wl_pointer::Event::Enter { surface_x, surface_y,.. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Leave {.. } => {
data.ptr.pos.take();
data.ptr.btn.take();
},
wl_pointer::Event::Motion { surface_x, surface_y,.. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Button { button: 0x110, state,.. } => {
// 0x110 is BUTTON1
data.ptr.btn.replace(state);
},
wl_pointer::Event::Frame => {
data.ptr.frame = true;
}
);
let wmbase = &mut registry.wmbase;
filter!(wmbase, data,
xdg_wm_base::Event::Ping { serial } => data.registry.wmbase.detach().pong(serial)
);
let shm = &mut registry.shm;
filter!(shm, data,
wl_shm::Event::Format { format } => data.shm_formats.push(format)
);
let (width, height) = cfg.buttons_bounds();
let shmbuffer = create_shmbuffer(width, height, shm).expect("failed to create shm");
let (width, height) = cfg.buttons_bounds();
let surface =
Data::create_surface(width, height, ®istry.compositor, ®istry.layer_shell);
let mut data = Data {
cfg,
registry,
ptr: Pointer::default(),
buffer: shmbuffer,
surface: surface,
seat_cap: wl_seat::Capability::from_raw(0).unwrap(),
shm_formats: vec![],
rendered: false,
};
data.render();
data
}
fn create_surface(
width: usize,
height: usize,
compositor: &Main<WlCompositor>,
layer_shell: &Main<LayerShell>,
) -> Surface {
let wl = compositor.create_surface();
let (width, height) = (width as i32, height as i32);
let namespace = String::from("wtmenu");
let layer = layer_shell.get_layer_surface(&wl.detach(), None, Layer::Overlay, namespace);
layer.set_size(width as u32, height as u32);
filter!(layer, data,
layer_surface::Event::Configure { serial,.. } => {
data.surface.layer.detach().ack_configure(serial);
data.surface.configured = true;
},
layer_surface::Event::Closed => {
data.cfg.should_close = true;
}
);
wl.commit();
Surface {
wl,
layer,
committed: false,
configured: false,
}
}
fn render(&mut self) | } else {
self.cfg.nb
};
} else {
shm[(i, j)] = (self.cfg.nb & 0xffffff) | 0x22000000;
}
}
}
let scale = |v: u8, s: u8| ((v as u32 * s as u32) / 255) as u8;
let (nf, sf) = (self.cfg.nf, self.cfg.sf);
let rendered = self.rendered;
for i in 0..self.cfg.options.len() {
let opt = self.cfg.options.get(i).unwrap();
let g = self.cfg.font.glyphs(opt);
let (left, right, top, bottom) = self.cfg.button_bounds(i);
let trans_x: i32 = max(left, left - (g.width.ceil() as i32 - bw as i32) / 2);
let trans_y: i32 = max(top, top - (g.height.ceil() as i32 - bh as i32) / 2);
let (mut warn_btn, mut warn_buf) = (false, false);
g.render(|x, y, v| {
let (x, y) = (x as i32 + trans_x, y as i32 + trans_y);
if x < 0 || x as usize >= shm.width || y < 0 || y as usize >= shm.height {
if!rendered &&!warn_buf {
eprintln!(
"glyph for {:?} exceeds buffer boundaries: {:?} {:?}",
opt,
(x, y),
(shm.width, shm.height)
);
warn_buf = true;
}
return;
}
if x < left || x >= right || y < top || y >= bottom {
if!rendered &&!warn_btn {
eprintln!(
"glyph for {:?} exceeds button boundaries: {:?} {:?}",
opt,
(x, y),
(left, right, top, bottom)
);
warn_btn = true;
}
return;
}
let pixi = (x as usize, y as usize);
let [a, rb, gb, bb] = shm[pixi].to_be_bytes();
let [_, rf, gf, bf] = if Some(i) == focus {
sf.to_be_bytes()
} else {
nf.to_be_bytes()
};
shm[pixi] = u32::from_be_bytes([
a,
max(rb, scale(v, rf)),
max(gb, scale(v, gf)),
max(bb, scale(v, bf)),
]);
});
}
let (ww, wh) = self.cfg.buttons_bounds();
self.surface.wl.damage(0, 0, ww as i32, wh as i32);
self.surface.committed = false;
self.rendered = true;
}
}
mod pixbuf {
use super::Data;
use anyhow::{Context, Result};
use wayland_client::protocol::{
wl_buffer::{self, WlBuffer},
wl_shm::{self, WlShm},
};
use wayland_client::{Filter, Main};
#[derive(Debug)]
pub struct ShmPixelBuffer {
pub wl: Main<WlBuffer>,
pub locked: bool,
pub width: usize,
pub height: usize,
addr: *mut u32,
}
impl std::ops::Index<(usize, usize)> for ShmPixelBuffer {
type Output = u32;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
if x >= self.width || y >= self.height {
panic!(
"index ({}, {}) out of bounds (0..{}, 0..{})",
x, y, self.width, self.height
);
}
unsafe {
self.addr
.offset((x + y * self.width) as isize)
.as_ref()
.unwrap()
}
}
}
impl std::ops::IndexMut<(usize, usize)> for ShmPixelBuffer {
fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output {
if x >= self.width || y >= self.height {
panic!(
"index ({}, {}) out of bounds (0..{}, 0..{})",
x, y, self.width, self.height
);
}
unsafe {
self.addr
.offset((x + y * self.width) as isize)
.as_mut()
.unwrap()
}
}
}
pub fn create_shmbuffer(
width: usize,
height: usize,
shm: &Main<WlShm>,
) -> Result<ShmPixelBuffer> {
let fd = nix::unistd::mkstemp("/dev/shm/shmbuf_XXXXXX")
.and_then(|(fd, path)| nix::unistd::unlink(path.as_path()).and(Ok(fd)))
.context("Failed to create temp file fd for shm")?;
let (format, pixel_size) = (wl_shm::Format::Argb8888, 4);
let stride: i32 = width as i32 * pixel_size;
let size: usize = stride as usize * height;
nix::unistd::ftruncate(fd, size as i64).context("Failed calling ftruncate")?;
let shmdata: *mut u32 = unsafe {
let data = libc::mmap(
std::ptr::null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd,
0,
);
// checking for null is not in the manpage example, can you mmap 0x0?
if data == libc::MAP_FAILED || data.is_null() {
libc::close(fd);
panic!("map failed");
}
data as *mut u32
};
let pool = shm.create_pool(fd, size as i32);
let buffer = pool.create_buffer(0, width as i32, height as i32, stride, format);
pool.destroy();
filter!(buffer, data,
wl_buffer::Event::Release => {
data.buffer.locked = false;
}
);
Ok(ShmPixelBuffer {
wl: buffer,
locked: false,
addr: shmdata,
width: width,
height: height,
})
}
}
use pixbuf::{create_shmbuffer, ShmPixelBuffer};
fn init_registry(display: &Display, event_queue: &mut EventQueue) -> Result<Registry> {
let disp_proxy = display.attach(event_queue.token());
let gm = GlobalManager::new(&disp_proxy);
event_queue.dispatch(&mut (), |_, _, _| {})?;
let compositor: Main<WlCompositor> = gm
.instantiate_exact(4)
.context("Failed to get compositor handle")?;
let seat: Main<WlSeat> = gm
.instantiate_exact(5)
.context("Failed to get seat handle")?;
let wmbase: Main<XdgWmBase> = gm
| {
if self.buffer.locked {
return;
}
let shm = &mut self.buffer;
let (bw, bh) = self.cfg.button_dim;
let focus = {
let cfg = &self.cfg;
(self.ptr.btn)
.filter(|s| s == &wl_pointer::ButtonState::Pressed)
.and(self.ptr.pos)
.and_then(|(x, y)| cfg.in_button(x.ceil() as usize, y.ceil() as usize))
};
for i in 0..shm.width {
for j in 0..shm.height {
if let Some(opti) = self.cfg.in_button(i, j) {
shm[(i, j)] = if Some(opti) == focus {
self.cfg.sb | identifier_body |
main.rs | let (border, (bw, bh)) = (self.border, self.button_dim);
if y >= border && y < border + bh && x >= border && (x - border) % (bw + border) < bw {
Some((x - border) / (bw + border))
} else {
None
}
}
pub fn button_bounds(&self, i: usize) -> (i32, i32, i32, i32) {
let (border, (bw, bh)) = (self.border, self.button_dim);
let left = border + i * (bw + border);
let right = left + bw;
let top = border;
let bottom = top + bh;
(left as i32, right as i32, top as i32, bottom as i32)
}
}
#[derive(Debug, Clone, Copy)]
pub struct Argb(pub u32);
static ARGB_FORMAT_MSG: &str =
"Argb must be specified by a '#' followed by exactly 3, 4, 6, or 8 digits";
impl FromStr for Argb {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self> {
if!s.starts_with('#') ||!s[1..].chars().all(|c| c.is_ascii_hexdigit()) {
return Err(anyhow!(ARGB_FORMAT_MSG));
}
let s = &s[1..];
let dup = |s: &str| {
s.chars().fold(String::new(), |mut s, c| {
s.push(c);
s.push(c);
s
})
};
match s.len() {
8 => Ok(Argb(u32::from_str_radix(s, 16)?)),
6 => Ok(Argb(u32::from_str_radix(s, 16)? | 0xff000000)),
4 => Ok(Argb(u32::from_str_radix(&dup(s), 16)?)),
3 => Ok(Argb(u32::from_str_radix(&dup(s), 16)? | 0xff000000)),
_ => Err(anyhow!(ARGB_FORMAT_MSG)),
}
}
}
}
use conf::{Argb, Config};
use font::Font;
mod font {
use anyhow::{Context, Result};
use rusttype::{self, point, Font as rtFont, Point, PositionedGlyph, Scale};
#[derive(Debug)]
pub struct Font {
font: rtFont<'static>,
scale: Scale,
offset: Point<f32>,
}
#[derive(Debug)]
pub struct Glyphs<'f> {
glyphs: Vec<PositionedGlyph<'f>>,
pub width: f32,
pub height: f32,
}
impl Default for Font {
fn default() -> Self {
let font =
rtFont::try_from_bytes(include_bytes!("../SourceCodePro-Regular.otf") as &[u8])
.expect("Failed constructing a Font from bytes");
Font::new(font)
}
}
impl Font {
fn new(font: rtFont<'static>) -> Self {
let scale = Scale::uniform(40.0);
let v_metrics = font.v_metrics(scale);
let offset = point(0.0, v_metrics.ascent);
Font {
font,
scale,
offset,
}
}
pub fn load<P: AsRef<std::path::Path>>(name: &P) -> Result<Font> {
let bytes = std::fs::read(name)?;
let font = rtFont::try_from_vec(bytes).context("Failed loading the default font")?;
Ok(Self::new(font))
}
pub fn glyphs(&self, s: &str) -> Glyphs {
let glyphs: Vec<_> = self.font.layout(s, self.scale, self.offset).collect();
let width = glyphs
.last()
.map(|g| g.position().x as f32 + g.unpositioned().h_metrics().advance_width)
.unwrap_or(0.0);
Glyphs {
glyphs,
width,
height: self.scale.y,
}
}
}
impl<'f> Glyphs<'f> {
pub fn render(self, mut d: impl FnMut(usize, usize, u8)) {
let (width, height) = (self.width.ceil(), self.height.ceil());
self.glyphs
.iter()
.filter_map(|g| g.pixel_bounding_box().map(|bb| (g, bb)))
.for_each(|(g, bb)| {
g.draw(|x, y, v| {
let v = (v * 255.0).ceil() as u8;
let x = x as i32 + bb.min.x;
let y = y as i32 + bb.min.y;
if x >= 0 && x < width as i32 && y >= 0 && y < height as i32 {
d(x as usize, y as usize, v);
}
})
})
}
}
}
#[derive(Debug)]
struct Registry {
compositor: Main<WlCompositor>,
seat: Main<WlSeat>,
shm: Main<WlShm>,
wmbase: Main<XdgWmBase>,
layer_shell: Main<LayerShell>,
}
#[derive(Debug, Default)]
struct Pointer {
pos: Option<(f64, f64)>,
pos_prev: Option<(f64, f64)>,
btn: Option<wl_pointer::ButtonState>,
btn_prev: Option<wl_pointer::ButtonState>,
frame: bool,
}
#[derive(Debug)]
struct Surface {
wl: Main<WlSurface>,
layer: Main<LayerSurface>,
committed: bool,
configured: bool,
}
#[derive(Debug)]
struct Data {
cfg: Config,
registry: Registry,
ptr: Pointer,
seat_cap: wl_seat::Capability,
shm_formats: Vec<wl_shm::Format>,
buffer: ShmPixelBuffer,
surface: Surface,
rendered: bool,
}
impl Data {
fn new(cfg: Config, mut registry: Registry) -> Data {
let seat = &mut registry.seat;
filter!(seat, data,
wl_seat::Event::Capabilities{capabilities} => data.seat_cap = capabilities
);
let pointer = seat.get_pointer();
filter!(pointer, data,
wl_pointer::Event::Enter { surface_x, surface_y,.. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Leave {.. } => {
data.ptr.pos.take();
data.ptr.btn.take();
},
wl_pointer::Event::Motion { surface_x, surface_y,.. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Button { button: 0x110, state,.. } => {
// 0x110 is BUTTON1
data.ptr.btn.replace(state);
},
wl_pointer::Event::Frame => {
data.ptr.frame = true;
}
);
let wmbase = &mut registry.wmbase;
filter!(wmbase, data,
xdg_wm_base::Event::Ping { serial } => data.registry.wmbase.detach().pong(serial)
);
let shm = &mut registry.shm;
filter!(shm, data,
wl_shm::Event::Format { format } => data.shm_formats.push(format)
);
let (width, height) = cfg.buttons_bounds();
let shmbuffer = create_shmbuffer(width, height, shm).expect("failed to create shm");
let (width, height) = cfg.buttons_bounds();
let surface =
Data::create_surface(width, height, ®istry.compositor, ®istry.layer_shell);
let mut data = Data {
cfg,
registry,
ptr: Pointer::default(),
buffer: shmbuffer,
surface: surface,
seat_cap: wl_seat::Capability::from_raw(0).unwrap(),
shm_formats: vec![],
rendered: false,
};
data.render();
data
}
fn create_surface(
width: usize,
height: usize,
compositor: &Main<WlCompositor>,
layer_shell: &Main<LayerShell>,
) -> Surface {
let wl = compositor.create_surface();
let (width, height) = (width as i32, height as i32);
let namespace = String::from("wtmenu");
let layer = layer_shell.get_layer_surface(&wl.detach(), None, Layer::Overlay, namespace);
layer.set_size(width as u32, height as u32);
filter!(layer, data,
layer_surface::Event::Configure { serial,.. } => {
data.surface.layer.detach().ack_configure(serial);
data.surface.configured = true;
},
layer_surface::Event::Closed => {
data.cfg.should_close = true;
}
);
wl.commit(); |
Surface {
wl,
layer,
committed: false,
configured: false,
}
}
fn render(&mut self) {
if self.buffer.locked {
return;
}
let shm = &mut self.buffer;
let (bw, bh) = self.cfg.button_dim;
let focus = {
let cfg = &self.cfg;
(self.ptr.btn)
.filter(|s| s == &wl_pointer::ButtonState::Pressed)
.and(self.ptr.pos)
.and_then(|(x, y)| cfg.in_button(x.ceil() as usize, y.ceil() as usize))
};
for i in 0..shm.width {
for j in 0..shm.height {
if let Some(opti) = self.cfg.in_button(i, j) {
shm[(i, j)] = if Some(opti) == focus {
self.cfg.sb
} else {
self.cfg.nb
};
} else {
shm[(i, j)] = (self.cfg.nb & 0xffffff) | 0x22000000;
}
}
}
let scale = |v: u8, s: u8| ((v as u32 * s as u32) / 255) as u8;
let (nf, sf) = (self.cfg.nf, self.cfg.sf);
let rendered = self.rendered;
for i in 0..self.cfg.options.len() {
let opt = self.cfg.options.get(i).unwrap();
let g = self.cfg.font.glyphs(opt);
let (left, right, top, bottom) = self.cfg.button_bounds(i);
let trans_x: i32 = max(left, left - (g.width.ceil() as i32 - bw as i32) / 2);
let trans_y: i32 = max(top, top - (g.height.ceil() as i32 - bh as i32) / 2);
let (mut warn_btn, mut warn_buf) = (false, false);
g.render(|x, y, v| {
let (x, y) = (x as i32 + trans_x, y as i32 + trans_y);
if x < 0 || x as usize >= shm.width || y < 0 || y as usize >= shm.height {
if!rendered &&!warn_buf {
eprintln!(
"glyph for {:?} exceeds buffer boundaries: {:?} {:?}",
opt,
(x, y),
(shm.width, shm.height)
);
warn_buf = true;
}
return;
}
if x < left || x >= right || y < top || y >= bottom {
if!rendered &&!warn_btn {
eprintln!(
"glyph for {:?} exceeds button boundaries: {:?} {:?}",
opt,
(x, y),
(left, right, top, bottom)
);
warn_btn = true;
}
return;
}
let pixi = (x as usize, y as usize);
let [a, rb, gb, bb] = shm[pixi].to_be_bytes();
let [_, rf, gf, bf] = if Some(i) == focus {
sf.to_be_bytes()
} else {
nf.to_be_bytes()
};
shm[pixi] = u32::from_be_bytes([
a,
max(rb, scale(v, rf)),
max(gb, scale(v, gf)),
max(bb, scale(v, bf)),
]);
});
}
let (ww, wh) = self.cfg.buttons_bounds();
self.surface.wl.damage(0, 0, ww as i32, wh as i32);
self.surface.committed = false;
self.rendered = true;
}
}
mod pixbuf {
use super::Data;
use anyhow::{Context, Result};
use wayland_client::protocol::{
wl_buffer::{self, WlBuffer},
wl_shm::{self, WlShm},
};
use wayland_client::{Filter, Main};
#[derive(Debug)]
pub struct ShmPixelBuffer {
pub wl: Main<WlBuffer>,
pub locked: bool,
pub width: usize,
pub height: usize,
addr: *mut u32,
}
impl std::ops::Index<(usize, usize)> for ShmPixelBuffer {
type Output = u32;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
if x >= self.width || y >= self.height {
panic!(
"index ({}, {}) out of bounds (0..{}, 0..{})",
x, y, self.width, self.height
);
}
unsafe {
self.addr
.offset((x + y * self.width) as isize)
.as_ref()
.unwrap()
}
}
}
impl std::ops::IndexMut<(usize, usize)> for ShmPixelBuffer {
fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output {
if x >= self.width || y >= self.height {
panic!(
"index ({}, {}) out of bounds (0..{}, 0..{})",
x, y, self.width, self.height
);
}
unsafe {
self.addr
.offset((x + y * self.width) as isize)
.as_mut()
.unwrap()
}
}
}
pub fn create_shmbuffer(
width: usize,
height: usize,
shm: &Main<WlShm>,
) -> Result<ShmPixelBuffer> {
let fd = nix::unistd::mkstemp("/dev/shm/shmbuf_XXXXXX")
.and_then(|(fd, path)| nix::unistd::unlink(path.as_path()).and(Ok(fd)))
.context("Failed to create temp file fd for shm")?;
let (format, pixel_size) = (wl_shm::Format::Argb8888, 4);
let stride: i32 = width as i32 * pixel_size;
let size: usize = stride as usize * height;
nix::unistd::ftruncate(fd, size as i64).context("Failed calling ftruncate")?;
let shmdata: *mut u32 = unsafe {
let data = libc::mmap(
std::ptr::null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd,
0,
);
// checking for null is not in the manpage example, can you mmap 0x0?
if data == libc::MAP_FAILED || data.is_null() {
libc::close(fd);
panic!("map failed");
}
data as *mut u32
};
let pool = shm.create_pool(fd, size as i32);
let buffer = pool.create_buffer(0, width as i32, height as i32, stride, format);
pool.destroy();
filter!(buffer, data,
wl_buffer::Event::Release => {
data.buffer.locked = false;
}
);
Ok(ShmPixelBuffer {
wl: buffer,
locked: false,
addr: shmdata,
width: width,
height: height,
})
}
}
use pixbuf::{create_shmbuffer, ShmPixelBuffer};
fn init_registry(display: &Display, event_queue: &mut EventQueue) -> Result<Registry> {
let disp_proxy = display.attach(event_queue.token());
let gm = GlobalManager::new(&disp_proxy);
event_queue.dispatch(&mut (), |_, _, _| {})?;
let compositor: Main<WlCompositor> = gm
.instantiate_exact(4)
.context("Failed to get compositor handle")?;
let seat: Main<WlSeat> = gm
.instantiate_exact(5)
.context("Failed to get seat handle")?;
let wmbase: Main<XdgWmBase> = gm
| random_line_split |
|
main.rs | let (border, (bw, bh)) = (self.border, self.button_dim);
if y >= border && y < border + bh && x >= border && (x - border) % (bw + border) < bw {
Some((x - border) / (bw + border))
} else {
None
}
}
pub fn button_bounds(&self, i: usize) -> (i32, i32, i32, i32) {
let (border, (bw, bh)) = (self.border, self.button_dim);
let left = border + i * (bw + border);
let right = left + bw;
let top = border;
let bottom = top + bh;
(left as i32, right as i32, top as i32, bottom as i32)
}
}
#[derive(Debug, Clone, Copy)]
pub struct | (pub u32);
static ARGB_FORMAT_MSG: &str =
"Argb must be specified by a '#' followed by exactly 3, 4, 6, or 8 digits";
impl FromStr for Argb {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self> {
if!s.starts_with('#') ||!s[1..].chars().all(|c| c.is_ascii_hexdigit()) {
return Err(anyhow!(ARGB_FORMAT_MSG));
}
let s = &s[1..];
let dup = |s: &str| {
s.chars().fold(String::new(), |mut s, c| {
s.push(c);
s.push(c);
s
})
};
match s.len() {
8 => Ok(Argb(u32::from_str_radix(s, 16)?)),
6 => Ok(Argb(u32::from_str_radix(s, 16)? | 0xff000000)),
4 => Ok(Argb(u32::from_str_radix(&dup(s), 16)?)),
3 => Ok(Argb(u32::from_str_radix(&dup(s), 16)? | 0xff000000)),
_ => Err(anyhow!(ARGB_FORMAT_MSG)),
}
}
}
}
use conf::{Argb, Config};
use font::Font;
mod font {
use anyhow::{Context, Result};
use rusttype::{self, point, Font as rtFont, Point, PositionedGlyph, Scale};
#[derive(Debug)]
pub struct Font {
font: rtFont<'static>,
scale: Scale,
offset: Point<f32>,
}
#[derive(Debug)]
pub struct Glyphs<'f> {
glyphs: Vec<PositionedGlyph<'f>>,
pub width: f32,
pub height: f32,
}
impl Default for Font {
fn default() -> Self {
let font =
rtFont::try_from_bytes(include_bytes!("../SourceCodePro-Regular.otf") as &[u8])
.expect("Failed constructing a Font from bytes");
Font::new(font)
}
}
impl Font {
fn new(font: rtFont<'static>) -> Self {
let scale = Scale::uniform(40.0);
let v_metrics = font.v_metrics(scale);
let offset = point(0.0, v_metrics.ascent);
Font {
font,
scale,
offset,
}
}
pub fn load<P: AsRef<std::path::Path>>(name: &P) -> Result<Font> {
let bytes = std::fs::read(name)?;
let font = rtFont::try_from_vec(bytes).context("Failed loading the default font")?;
Ok(Self::new(font))
}
pub fn glyphs(&self, s: &str) -> Glyphs {
let glyphs: Vec<_> = self.font.layout(s, self.scale, self.offset).collect();
let width = glyphs
.last()
.map(|g| g.position().x as f32 + g.unpositioned().h_metrics().advance_width)
.unwrap_or(0.0);
Glyphs {
glyphs,
width,
height: self.scale.y,
}
}
}
impl<'f> Glyphs<'f> {
pub fn render(self, mut d: impl FnMut(usize, usize, u8)) {
let (width, height) = (self.width.ceil(), self.height.ceil());
self.glyphs
.iter()
.filter_map(|g| g.pixel_bounding_box().map(|bb| (g, bb)))
.for_each(|(g, bb)| {
g.draw(|x, y, v| {
let v = (v * 255.0).ceil() as u8;
let x = x as i32 + bb.min.x;
let y = y as i32 + bb.min.y;
if x >= 0 && x < width as i32 && y >= 0 && y < height as i32 {
d(x as usize, y as usize, v);
}
})
})
}
}
}
#[derive(Debug)]
struct Registry {
compositor: Main<WlCompositor>,
seat: Main<WlSeat>,
shm: Main<WlShm>,
wmbase: Main<XdgWmBase>,
layer_shell: Main<LayerShell>,
}
#[derive(Debug, Default)]
struct Pointer {
pos: Option<(f64, f64)>,
pos_prev: Option<(f64, f64)>,
btn: Option<wl_pointer::ButtonState>,
btn_prev: Option<wl_pointer::ButtonState>,
frame: bool,
}
#[derive(Debug)]
struct Surface {
wl: Main<WlSurface>,
layer: Main<LayerSurface>,
committed: bool,
configured: bool,
}
#[derive(Debug)]
struct Data {
cfg: Config,
registry: Registry,
ptr: Pointer,
seat_cap: wl_seat::Capability,
shm_formats: Vec<wl_shm::Format>,
buffer: ShmPixelBuffer,
surface: Surface,
rendered: bool,
}
impl Data {
fn new(cfg: Config, mut registry: Registry) -> Data {
let seat = &mut registry.seat;
filter!(seat, data,
wl_seat::Event::Capabilities{capabilities} => data.seat_cap = capabilities
);
let pointer = seat.get_pointer();
filter!(pointer, data,
wl_pointer::Event::Enter { surface_x, surface_y,.. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Leave {.. } => {
data.ptr.pos.take();
data.ptr.btn.take();
},
wl_pointer::Event::Motion { surface_x, surface_y,.. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Button { button: 0x110, state,.. } => {
// 0x110 is BUTTON1
data.ptr.btn.replace(state);
},
wl_pointer::Event::Frame => {
data.ptr.frame = true;
}
);
let wmbase = &mut registry.wmbase;
filter!(wmbase, data,
xdg_wm_base::Event::Ping { serial } => data.registry.wmbase.detach().pong(serial)
);
let shm = &mut registry.shm;
filter!(shm, data,
wl_shm::Event::Format { format } => data.shm_formats.push(format)
);
let (width, height) = cfg.buttons_bounds();
let shmbuffer = create_shmbuffer(width, height, shm).expect("failed to create shm");
let (width, height) = cfg.buttons_bounds();
let surface =
Data::create_surface(width, height, ®istry.compositor, ®istry.layer_shell);
let mut data = Data {
cfg,
registry,
ptr: Pointer::default(),
buffer: shmbuffer,
surface: surface,
seat_cap: wl_seat::Capability::from_raw(0).unwrap(),
shm_formats: vec![],
rendered: false,
};
data.render();
data
}
fn create_surface(
width: usize,
height: usize,
compositor: &Main<WlCompositor>,
layer_shell: &Main<LayerShell>,
) -> Surface {
let wl = compositor.create_surface();
let (width, height) = (width as i32, height as i32);
let namespace = String::from("wtmenu");
let layer = layer_shell.get_layer_surface(&wl.detach(), None, Layer::Overlay, namespace);
layer.set_size(width as u32, height as u32);
filter!(layer, data,
layer_surface::Event::Configure { serial,.. } => {
data.surface.layer.detach().ack_configure(serial);
data.surface.configured = true;
},
layer_surface::Event::Closed => {
data.cfg.should_close = true;
}
);
wl.commit();
Surface {
wl,
layer,
committed: false,
configured: false,
}
}
fn render(&mut self) {
if self.buffer.locked {
return;
}
let shm = &mut self.buffer;
let (bw, bh) = self.cfg.button_dim;
let focus = {
let cfg = &self.cfg;
(self.ptr.btn)
.filter(|s| s == &wl_pointer::ButtonState::Pressed)
.and(self.ptr.pos)
.and_then(|(x, y)| cfg.in_button(x.ceil() as usize, y.ceil() as usize))
};
for i in 0..shm.width {
for j in 0..shm.height {
if let Some(opti) = self.cfg.in_button(i, j) {
shm[(i, j)] = if Some(opti) == focus {
self.cfg.sb
} else {
self.cfg.nb
};
} else {
shm[(i, j)] = (self.cfg.nb & 0xffffff) | 0x22000000;
}
}
}
let scale = |v: u8, s: u8| ((v as u32 * s as u32) / 255) as u8;
let (nf, sf) = (self.cfg.nf, self.cfg.sf);
let rendered = self.rendered;
for i in 0..self.cfg.options.len() {
let opt = self.cfg.options.get(i).unwrap();
let g = self.cfg.font.glyphs(opt);
let (left, right, top, bottom) = self.cfg.button_bounds(i);
let trans_x: i32 = max(left, left - (g.width.ceil() as i32 - bw as i32) / 2);
let trans_y: i32 = max(top, top - (g.height.ceil() as i32 - bh as i32) / 2);
let (mut warn_btn, mut warn_buf) = (false, false);
g.render(|x, y, v| {
let (x, y) = (x as i32 + trans_x, y as i32 + trans_y);
if x < 0 || x as usize >= shm.width || y < 0 || y as usize >= shm.height {
if!rendered &&!warn_buf {
eprintln!(
"glyph for {:?} exceeds buffer boundaries: {:?} {:?}",
opt,
(x, y),
(shm.width, shm.height)
);
warn_buf = true;
}
return;
}
if x < left || x >= right || y < top || y >= bottom {
if!rendered &&!warn_btn {
eprintln!(
"glyph for {:?} exceeds button boundaries: {:?} {:?}",
opt,
(x, y),
(left, right, top, bottom)
);
warn_btn = true;
}
return;
}
let pixi = (x as usize, y as usize);
let [a, rb, gb, bb] = shm[pixi].to_be_bytes();
let [_, rf, gf, bf] = if Some(i) == focus {
sf.to_be_bytes()
} else {
nf.to_be_bytes()
};
shm[pixi] = u32::from_be_bytes([
a,
max(rb, scale(v, rf)),
max(gb, scale(v, gf)),
max(bb, scale(v, bf)),
]);
});
}
let (ww, wh) = self.cfg.buttons_bounds();
self.surface.wl.damage(0, 0, ww as i32, wh as i32);
self.surface.committed = false;
self.rendered = true;
}
}
mod pixbuf {
use super::Data;
use anyhow::{Context, Result};
use wayland_client::protocol::{
wl_buffer::{self, WlBuffer},
wl_shm::{self, WlShm},
};
use wayland_client::{Filter, Main};
#[derive(Debug)]
pub struct ShmPixelBuffer {
pub wl: Main<WlBuffer>,
pub locked: bool,
pub width: usize,
pub height: usize,
addr: *mut u32,
}
impl std::ops::Index<(usize, usize)> for ShmPixelBuffer {
type Output = u32;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
if x >= self.width || y >= self.height {
panic!(
"index ({}, {}) out of bounds (0..{}, 0..{})",
x, y, self.width, self.height
);
}
unsafe {
self.addr
.offset((x + y * self.width) as isize)
.as_ref()
.unwrap()
}
}
}
impl std::ops::IndexMut<(usize, usize)> for ShmPixelBuffer {
fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output {
if x >= self.width || y >= self.height {
panic!(
"index ({}, {}) out of bounds (0..{}, 0..{})",
x, y, self.width, self.height
);
}
unsafe {
self.addr
.offset((x + y * self.width) as isize)
.as_mut()
.unwrap()
}
}
}
pub fn create_shmbuffer(
width: usize,
height: usize,
shm: &Main<WlShm>,
) -> Result<ShmPixelBuffer> {
let fd = nix::unistd::mkstemp("/dev/shm/shmbuf_XXXXXX")
.and_then(|(fd, path)| nix::unistd::unlink(path.as_path()).and(Ok(fd)))
.context("Failed to create temp file fd for shm")?;
let (format, pixel_size) = (wl_shm::Format::Argb8888, 4);
let stride: i32 = width as i32 * pixel_size;
let size: usize = stride as usize * height;
nix::unistd::ftruncate(fd, size as i64).context("Failed calling ftruncate")?;
let shmdata: *mut u32 = unsafe {
let data = libc::mmap(
std::ptr::null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd,
0,
);
// checking for null is not in the manpage example, can you mmap 0x0?
if data == libc::MAP_FAILED || data.is_null() {
libc::close(fd);
panic!("map failed");
}
data as *mut u32
};
let pool = shm.create_pool(fd, size as i32);
let buffer = pool.create_buffer(0, width as i32, height as i32, stride, format);
pool.destroy();
filter!(buffer, data,
wl_buffer::Event::Release => {
data.buffer.locked = false;
}
);
Ok(ShmPixelBuffer {
wl: buffer,
locked: false,
addr: shmdata,
width: width,
height: height,
})
}
}
use pixbuf::{create_shmbuffer, ShmPixelBuffer};
fn init_registry(display: &Display, event_queue: &mut EventQueue) -> Result<Registry> {
let disp_proxy = display.attach(event_queue.token());
let gm = GlobalManager::new(&disp_proxy);
event_queue.dispatch(&mut (), |_, _, _| {})?;
let compositor: Main<WlCompositor> = gm
.instantiate_exact(4)
.context("Failed to get compositor handle")?;
let seat: Main<WlSeat> = gm
.instantiate_exact(5)
.context("Failed to get seat handle")?;
let wmbase: Main<XdgWmBase> = gm
| Argb | identifier_name |
lib.rs | #![cfg_attr(feature = "nightly", feature(const_panic))]
extern crate proc_macro;
use proc_macro2::TokenStream;
use quote::{quote, quote_spanned};
use syn::parse_macro_input;
use syn::Item;
/// Generate B1, B2,.., B64 with implementation of the Specifier trait
#[proc_macro]
pub fn generate_bit_specifiers(_input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let mut output = TokenStream::new();
let bit_specifiers = (1usize..=64).map(|idx| {
let ident = syn::Ident::new(&format!("B{}", idx), proc_macro2::Span::call_site());
let size_type = size_to_type(idx);
quote! (
pub enum #ident {}
impl Specifier for #ident {
const BITS: usize = #idx;
type IntType = #size_type;
type Interface = #size_type;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
int_val as Self::Interface
}
}
)
});
// Implement LastByte trait for integer primitives with `as u8`
output.extend(impl_last_byte());
// Implement BitOps
output.extend(bit_ops_impl());
output.extend(bit_specifiers);
output.into()
}
/// Implement LastByte trait for integer primitives `u8`, `u16`,.., `u128` using `as u8`
fn impl_last_byte() -> TokenStream {
let int_types = [
quote!(u8),
quote!(u16),
quote!(u32),
quote!(u64),
quote!(u128),
];
// Implement LastByte trait for primitives
quote!(
#[doc = "Implement last byte for integer primitives using `as u8`"]
#(impl LastByte for #int_types {
fn last_byte(self) -> u8 {
self as u8
}
})*
)
}
/// Match a given number of bits to the narrowest unsigned integer type that can hold it
fn size_to_type(bits: usize) -> TokenStream {
match bits {
1..=8 => quote!(u8),
9..=16 => quote!(u16),
17..=32 => quote!(u32),
33..=64 => quote!(u64),
65..=128 => quote!(u128),
_ => unreachable!(),
}
}
/// Defines BitOps trait and implement it for `u8`
fn bit_ops_impl() -> TokenStream {
quote!(
#[doc = "Simple trait to extract bits from primitive integer type"]
trait BitOps {
fn first(self, n: usize) -> u8;
fn last(self, n: usize) -> u8;
fn mid(self, start: usize, len: usize) -> u8;
}
#[doc = "Ops to extract bits from `u8` byte"]
impl BitOps for u8 {
fn first(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & ((1 << n) - 1),
_ => self,
}
}
fn last(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self &!((1 << (8 - n)) - 1),
_ => self,
}
}
fn mid(self, start: usize, len: usize) -> u8 {
match (start, start + len) {
(0, _) => self.first(len),
(_, l) if l >= 8 => self.last(8 - start),
_ => self & (((1 << len) - 1) << start),
}
}
}
)
}
/// syn helper struct to parse bits attributes
struct BitAttribute {
bits: syn::LitInt,
}
/// Parses the following attribute:
/// ```
/// #[bits=8]
/// ^^
/// ```
impl syn::parse::Parse for BitAttribute {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let _: syn::Token![=] = input.parse()?;
let bits: syn::LitInt = input.parse()?;
Ok(Self { bits })
}
}
/// Main macro `bitfield`
/// Parses a Struct, validates field sizes,
#[proc_macro_attribute]
pub fn bitfield(
args: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let _ = args;
let item = parse_macro_input!(input as syn::Item);
match item {
Item::Struct(s) => {
let ident = &s.ident;
let fields_ty = s.fields.iter().map(|field| &field.ty);
// Check that fields with #[bits=X] attribute have a type of size `X`
// We use an array size check to validate the size is correct
let bits_attrs_check = s
.fields
.iter()
.filter_map(|field| {
let ty = &field.ty;
let attrs = &field.attrs;
for attr in attrs {
// #[bits=..]
// ^^^^
if attr.path.is_ident("bits") |
}
None
});
let getters_setters = define_getters_setters(&s.fields);
// Total size calculated as the sum of the inner `<T as Specifier>::BITS` associated consts
let total_bit_size = quote!(0 #(+ <#fields_ty as Specifier>::BITS)*);
// Formatted error message for the size check
let error = format!(
"#[bitfield] on `{}` requires the total bit size to be a multiple of 8 bits.",
ident.to_string()
);
quote!(
#[doc = "Converted bitfield struct"]
pub struct #ident {
data: [u8; ( #total_bit_size ) / 8],
}
#(#bits_attrs_check)*
// Conditional consts and panic in consts requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( ( #total_bit_size ) % 8 == 0 ) {
0
}else{
panic!(#error)
};
impl #ident {
pub fn new() -> Self {
Self { data: [0u8; ( #total_bit_size ) / 8] }
}
#getters_setters
}
)
.into()
}
_ => unimplemented!("Only struct"),
}
}
fn define_getters_setters(fields: &syn::Fields) -> TokenStream {
let getters = fields.iter().scan(quote!(0), |offset, field| {
let ident = field.ident.as_ref().expect("Namef field");
// get_[field name] and set_[field name] idents
let get_ident = quote::format_ident!("get_{}", ident);
let set_ident = quote::format_ident!("set_{}", ident);
let ty = &field.ty;
let output = quote!(
pub fn #get_ident(&self) -> <#ty as Specifier>::Interface {
#ty::get(&self.data, #offset)
}
pub fn #set_ident(&mut self, val: <#ty as Specifier>::Interface) {
#ty::set(&mut self.data, #offset, val)
}
);
// Move the offset by the number of bits in the current type
*offset = quote!((#offset + <#ty as Specifier>::BITS));
Some(output)
});
quote!(#(#getters)*)
}
/// Derive BitfieldSpecifier macro for Enums
/// Parses enums and implements Specifier for it.
// In particular, constructs `to_interface` to match a discriminant to its variant.
// Compile time checks: number of variants is a power of two and discriminant size within bit range
#[proc_macro_derive(BitfieldSpecifier)]
pub fn derive_bitfield_specifier(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast = parse_macro_input!(input as syn::DeriveInput);
let enum_ident = ast.ident;
// No attributes
let attrs = ast.attrs;
assert!(attrs.is_empty());
// Variants of the enum
// returns an error if underlying `syn::Data` is not an Enum
let variants = match ast.data {
syn::Data::Enum(e) => e.variants,
// Struct / Union
_ => {
return syn::Error::new_spanned(enum_ident, "BitfieldSpecifier only supports enum")
.to_compile_error()
.into()
}
};
// Check that the number of variants is a power of two.
// If not, return an error.
let variant_count = variants.len();
if!variant_count.is_power_of_two() {
return syn::Error::new(
proc_macro2::Span::call_site(),
"BitfieldSpecifier expected a number of variants which is a power of 2",
)
.to_compile_error()
.into();
}
// Number of bits (i.e. which power of two) is the number of trailing zeros
let bits = variant_count.trailing_zeros() as usize;
let size_type = size_to_type(bits);
// Build match patterns for variants
let match_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
// Create a new ident `[enum]_[variant]` to be used in the match patterns
// Not really needed but clearer in expansion
let unique_ident = syn::Ident::new(
&format!(
"{}_{}",
enum_ident.to_string().to_lowercase(),
ident.to_string().to_lowercase()
),
ident.span(),
);
// Rely on the pattern:
// x if x == Enum::Variant as Enum::IntType
quote!( #unique_ident if #unique_ident == Self::#ident as Self::IntType => Self::#ident )
});
// Iterator of full variant name: `Enum::Variant`
let enum_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
quote!( #enum_ident::#ident )
});
// Formatted error message in case the discriminant of a variant is outside of the allowed bit range.
let error = format!(
"\nError in BitfieldSpecifier for {}.\nBitfieldSpecifier expects discriminants in the range 0..2^BITS.\nOutside of range:",
enum_ident.to_string()
);
quote!(
// Compile time checks on the size of the discriminant
#(
// Conditional consts and panic in const requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( (#enum_variants as usize) < #variant_count) {
0
}else{
panic!(concat!(#error, stringify!(#enum_variants), " >= ", #variant_count, "\n"))
};
)*
impl From<#enum_ident> for #size_type {
fn from(x: #enum_ident) -> #size_type {
x as #size_type
}
}
impl Specifier for #enum_ident {
const BITS: usize = #bits;
type IntType = #size_type;
type Interface = Self;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
match int_val {
#(#match_variants),*,
_ => panic!("Not supported"),
}
}
}
)
.into()
}
| {
// At this point `attr.tokens` is the following part of the attribute:
// #[bits=..]
// ^^^
let bits = syn::parse2::<BitAttribute>(attr.tokens.clone()).ok()?.bits;
return Some(
quote_spanned!(bits.span() => const _: [(); #bits] = [(); <#ty as Specifier>::BITS];),
);
} | conditional_block |
lib.rs | #![cfg_attr(feature = "nightly", feature(const_panic))]
extern crate proc_macro;
use proc_macro2::TokenStream;
use quote::{quote, quote_spanned};
use syn::parse_macro_input;
use syn::Item;
/// Generate B1, B2,.., B64 with implementation of the Specifier trait
#[proc_macro]
pub fn generate_bit_specifiers(_input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let mut output = TokenStream::new();
let bit_specifiers = (1usize..=64).map(|idx| {
let ident = syn::Ident::new(&format!("B{}", idx), proc_macro2::Span::call_site());
let size_type = size_to_type(idx);
quote! (
pub enum #ident {}
impl Specifier for #ident {
const BITS: usize = #idx;
type IntType = #size_type;
type Interface = #size_type;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
int_val as Self::Interface
}
}
)
});
// Implement LastByte trait for integer primitives with `as u8`
output.extend(impl_last_byte());
// Implement BitOps
output.extend(bit_ops_impl());
output.extend(bit_specifiers);
output.into()
}
/// Implement LastByte trait for integer primitives `u8`, `u16`,.., `u128` using `as u8`
fn impl_last_byte() -> TokenStream {
let int_types = [
quote!(u8),
quote!(u16),
quote!(u32),
quote!(u64),
quote!(u128),
];
// Implement LastByte trait for primitives
quote!(
#[doc = "Implement last byte for integer primitives using `as u8`"]
#(impl LastByte for #int_types {
fn last_byte(self) -> u8 {
self as u8
}
})*
)
}
/// Match a given number of bits to the narrowest unsigned integer type that can hold it
fn size_to_type(bits: usize) -> TokenStream {
match bits {
1..=8 => quote!(u8),
9..=16 => quote!(u16),
17..=32 => quote!(u32),
33..=64 => quote!(u64),
65..=128 => quote!(u128),
_ => unreachable!(),
}
}
/// Defines BitOps trait and implement it for `u8`
fn bit_ops_impl() -> TokenStream {
quote!(
#[doc = "Simple trait to extract bits from primitive integer type"]
trait BitOps {
fn first(self, n: usize) -> u8;
fn last(self, n: usize) -> u8;
fn mid(self, start: usize, len: usize) -> u8;
}
#[doc = "Ops to extract bits from `u8` byte"]
impl BitOps for u8 {
fn first(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & ((1 << n) - 1),
_ => self,
}
}
fn last(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self &!((1 << (8 - n)) - 1),
_ => self,
}
}
fn mid(self, start: usize, len: usize) -> u8 {
match (start, start + len) {
(0, _) => self.first(len),
(_, l) if l >= 8 => self.last(8 - start),
_ => self & (((1 << len) - 1) << start),
}
}
}
)
}
/// syn helper struct to parse bits attributes
struct BitAttribute {
bits: syn::LitInt,
}
/// Parses the following attribute:
/// ```
/// #[bits=8]
/// ^^
/// ```
impl syn::parse::Parse for BitAttribute {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let _: syn::Token![=] = input.parse()?;
let bits: syn::LitInt = input.parse()?;
Ok(Self { bits })
}
}
/// Main macro `bitfield`
/// Parses a Struct, validates field sizes,
#[proc_macro_attribute]
pub fn bitfield(
args: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let _ = args;
let item = parse_macro_input!(input as syn::Item);
match item {
Item::Struct(s) => {
let ident = &s.ident;
let fields_ty = s.fields.iter().map(|field| &field.ty); |
// Check that fields with #[bits=X] attribute have a type of size `X`
// We use an array size check to validate the size is correct
let bits_attrs_check = s
.fields
.iter()
.filter_map(|field| {
let ty = &field.ty;
let attrs = &field.attrs;
for attr in attrs {
// #[bits=..]
// ^^^^
if attr.path.is_ident("bits") {
// At this point `attr.tokens` is the following part of the attribute:
// #[bits=..]
// ^^^
let bits = syn::parse2::<BitAttribute>(attr.tokens.clone()).ok()?.bits;
return Some(
quote_spanned!(bits.span() => const _: [(); #bits] = [(); <#ty as Specifier>::BITS];),
);
}
}
None
});
let getters_setters = define_getters_setters(&s.fields);
// Total size calculated as the sum of the inner `<T as Specifier>::BITS` associated consts
let total_bit_size = quote!(0 #(+ <#fields_ty as Specifier>::BITS)*);
// Formatted error message for the size check
let error = format!(
"#[bitfield] on `{}` requires the total bit size to be a multiple of 8 bits.",
ident.to_string()
);
quote!(
#[doc = "Converted bitfield struct"]
pub struct #ident {
data: [u8; ( #total_bit_size ) / 8],
}
#(#bits_attrs_check)*
// Conditional consts and panic in consts requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( ( #total_bit_size ) % 8 == 0 ) {
0
}else{
panic!(#error)
};
impl #ident {
pub fn new() -> Self {
Self { data: [0u8; ( #total_bit_size ) / 8] }
}
#getters_setters
}
)
.into()
}
_ => unimplemented!("Only struct"),
}
}
fn define_getters_setters(fields: &syn::Fields) -> TokenStream {
let getters = fields.iter().scan(quote!(0), |offset, field| {
let ident = field.ident.as_ref().expect("Namef field");
// get_[field name] and set_[field name] idents
let get_ident = quote::format_ident!("get_{}", ident);
let set_ident = quote::format_ident!("set_{}", ident);
let ty = &field.ty;
let output = quote!(
pub fn #get_ident(&self) -> <#ty as Specifier>::Interface {
#ty::get(&self.data, #offset)
}
pub fn #set_ident(&mut self, val: <#ty as Specifier>::Interface) {
#ty::set(&mut self.data, #offset, val)
}
);
// Move the offset by the number of bits in the current type
*offset = quote!((#offset + <#ty as Specifier>::BITS));
Some(output)
});
quote!(#(#getters)*)
}
/// Derive BitfieldSpecifier macro for Enums
/// Parses enums and implements Specifier for it.
// In particular, constructs `to_interface` to match a discriminant to its variant.
// Compile time checks: number of variants is a power of two and discriminant size within bit range
#[proc_macro_derive(BitfieldSpecifier)]
pub fn derive_bitfield_specifier(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast = parse_macro_input!(input as syn::DeriveInput);
let enum_ident = ast.ident;
// No attributes
let attrs = ast.attrs;
assert!(attrs.is_empty());
// Variants of the enum
// returns an error if underlying `syn::Data` is not an Enum
let variants = match ast.data {
syn::Data::Enum(e) => e.variants,
// Struct / Union
_ => {
return syn::Error::new_spanned(enum_ident, "BitfieldSpecifier only supports enum")
.to_compile_error()
.into()
}
};
// Check that the number of variants is a power of two.
// If not, return an error.
let variant_count = variants.len();
if!variant_count.is_power_of_two() {
return syn::Error::new(
proc_macro2::Span::call_site(),
"BitfieldSpecifier expected a number of variants which is a power of 2",
)
.to_compile_error()
.into();
}
// Number of bits (i.e. which power of two) is the number of trailing zeros
let bits = variant_count.trailing_zeros() as usize;
let size_type = size_to_type(bits);
// Build match patterns for variants
let match_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
// Create a new ident `[enum]_[variant]` to be used in the match patterns
// Not really needed but clearer in expansion
let unique_ident = syn::Ident::new(
&format!(
"{}_{}",
enum_ident.to_string().to_lowercase(),
ident.to_string().to_lowercase()
),
ident.span(),
);
// Rely on the pattern:
// x if x == Enum::Variant as Enum::IntType
quote!( #unique_ident if #unique_ident == Self::#ident as Self::IntType => Self::#ident )
});
// Iterator of full variant name: `Enum::Variant`
let enum_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
quote!( #enum_ident::#ident )
});
// Formatted error message in case the discriminant of a variant is outside of the allowed bit range.
let error = format!(
"\nError in BitfieldSpecifier for {}.\nBitfieldSpecifier expects discriminants in the range 0..2^BITS.\nOutside of range:",
enum_ident.to_string()
);
quote!(
// Compile time checks on the size of the discriminant
#(
// Conditional consts and panic in const requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( (#enum_variants as usize) < #variant_count) {
0
}else{
panic!(concat!(#error, stringify!(#enum_variants), " >= ", #variant_count, "\n"))
};
)*
impl From<#enum_ident> for #size_type {
fn from(x: #enum_ident) -> #size_type {
x as #size_type
}
}
impl Specifier for #enum_ident {
const BITS: usize = #bits;
type IntType = #size_type;
type Interface = Self;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
match int_val {
#(#match_variants),*,
_ => panic!("Not supported"),
}
}
}
)
.into()
} | random_line_split |
|
lib.rs | #![cfg_attr(feature = "nightly", feature(const_panic))]
extern crate proc_macro;
use proc_macro2::TokenStream;
use quote::{quote, quote_spanned};
use syn::parse_macro_input;
use syn::Item;
/// Generate B1, B2,.., B64 with implementation of the Specifier trait
#[proc_macro]
pub fn generate_bit_specifiers(_input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let mut output = TokenStream::new();
let bit_specifiers = (1usize..=64).map(|idx| {
let ident = syn::Ident::new(&format!("B{}", idx), proc_macro2::Span::call_site());
let size_type = size_to_type(idx);
quote! (
pub enum #ident {}
impl Specifier for #ident {
const BITS: usize = #idx;
type IntType = #size_type;
type Interface = #size_type;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
int_val as Self::Interface
}
}
)
});
// Implement LastByte trait for integer primitives with `as u8`
output.extend(impl_last_byte());
// Implement BitOps
output.extend(bit_ops_impl());
output.extend(bit_specifiers);
output.into()
}
/// Implement LastByte trait for integer primitives `u8`, `u16`,.., `u128` using `as u8`
fn impl_last_byte() -> TokenStream {
let int_types = [
quote!(u8),
quote!(u16),
quote!(u32),
quote!(u64),
quote!(u128),
];
// Implement LastByte trait for primitives
quote!(
#[doc = "Implement last byte for integer primitives using `as u8`"]
#(impl LastByte for #int_types {
fn last_byte(self) -> u8 {
self as u8
}
})*
)
}
/// Match a given number of bits to the narrowest unsigned integer type that can hold it
fn size_to_type(bits: usize) -> TokenStream {
match bits {
1..=8 => quote!(u8),
9..=16 => quote!(u16),
17..=32 => quote!(u32),
33..=64 => quote!(u64),
65..=128 => quote!(u128),
_ => unreachable!(),
}
}
/// Defines BitOps trait and implement it for `u8`
fn | () -> TokenStream {
quote!(
#[doc = "Simple trait to extract bits from primitive integer type"]
trait BitOps {
fn first(self, n: usize) -> u8;
fn last(self, n: usize) -> u8;
fn mid(self, start: usize, len: usize) -> u8;
}
#[doc = "Ops to extract bits from `u8` byte"]
impl BitOps for u8 {
fn first(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & ((1 << n) - 1),
_ => self,
}
}
fn last(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self &!((1 << (8 - n)) - 1),
_ => self,
}
}
fn mid(self, start: usize, len: usize) -> u8 {
match (start, start + len) {
(0, _) => self.first(len),
(_, l) if l >= 8 => self.last(8 - start),
_ => self & (((1 << len) - 1) << start),
}
}
}
)
}
/// syn helper struct to parse bits attributes
struct BitAttribute {
bits: syn::LitInt,
}
/// Parses the following attribute:
/// ```
/// #[bits=8]
/// ^^
/// ```
impl syn::parse::Parse for BitAttribute {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let _: syn::Token![=] = input.parse()?;
let bits: syn::LitInt = input.parse()?;
Ok(Self { bits })
}
}
/// Main macro `bitfield`
/// Parses a Struct, validates field sizes,
#[proc_macro_attribute]
pub fn bitfield(
args: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let _ = args;
let item = parse_macro_input!(input as syn::Item);
match item {
Item::Struct(s) => {
let ident = &s.ident;
let fields_ty = s.fields.iter().map(|field| &field.ty);
// Check that fields with #[bits=X] attribute have a type of size `X`
// We use an array size check to validate the size is correct
let bits_attrs_check = s
.fields
.iter()
.filter_map(|field| {
let ty = &field.ty;
let attrs = &field.attrs;
for attr in attrs {
// #[bits=..]
// ^^^^
if attr.path.is_ident("bits") {
// At this point `attr.tokens` is the following part of the attribute:
// #[bits=..]
// ^^^
let bits = syn::parse2::<BitAttribute>(attr.tokens.clone()).ok()?.bits;
return Some(
quote_spanned!(bits.span() => const _: [(); #bits] = [(); <#ty as Specifier>::BITS];),
);
}
}
None
});
let getters_setters = define_getters_setters(&s.fields);
// Total size calculated as the sum of the inner `<T as Specifier>::BITS` associated consts
let total_bit_size = quote!(0 #(+ <#fields_ty as Specifier>::BITS)*);
// Formatted error message for the size check
let error = format!(
"#[bitfield] on `{}` requires the total bit size to be a multiple of 8 bits.",
ident.to_string()
);
quote!(
#[doc = "Converted bitfield struct"]
pub struct #ident {
data: [u8; ( #total_bit_size ) / 8],
}
#(#bits_attrs_check)*
// Conditional consts and panic in consts requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( ( #total_bit_size ) % 8 == 0 ) {
0
}else{
panic!(#error)
};
impl #ident {
pub fn new() -> Self {
Self { data: [0u8; ( #total_bit_size ) / 8] }
}
#getters_setters
}
)
.into()
}
_ => unimplemented!("Only struct"),
}
}
fn define_getters_setters(fields: &syn::Fields) -> TokenStream {
let getters = fields.iter().scan(quote!(0), |offset, field| {
let ident = field.ident.as_ref().expect("Namef field");
// get_[field name] and set_[field name] idents
let get_ident = quote::format_ident!("get_{}", ident);
let set_ident = quote::format_ident!("set_{}", ident);
let ty = &field.ty;
let output = quote!(
pub fn #get_ident(&self) -> <#ty as Specifier>::Interface {
#ty::get(&self.data, #offset)
}
pub fn #set_ident(&mut self, val: <#ty as Specifier>::Interface) {
#ty::set(&mut self.data, #offset, val)
}
);
// Move the offset by the number of bits in the current type
*offset = quote!((#offset + <#ty as Specifier>::BITS));
Some(output)
});
quote!(#(#getters)*)
}
/// Derive BitfieldSpecifier macro for Enums
/// Parses enums and implements Specifier for it.
// In particular, constructs `to_interface` to match a discriminant to its variant.
// Compile time checks: number of variants is a power of two and discriminant size within bit range
#[proc_macro_derive(BitfieldSpecifier)]
pub fn derive_bitfield_specifier(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast = parse_macro_input!(input as syn::DeriveInput);
let enum_ident = ast.ident;
// No attributes
let attrs = ast.attrs;
assert!(attrs.is_empty());
// Variants of the enum
// returns an error if underlying `syn::Data` is not an Enum
let variants = match ast.data {
syn::Data::Enum(e) => e.variants,
// Struct / Union
_ => {
return syn::Error::new_spanned(enum_ident, "BitfieldSpecifier only supports enum")
.to_compile_error()
.into()
}
};
// Check that the number of variants is a power of two.
// If not, return an error.
let variant_count = variants.len();
if!variant_count.is_power_of_two() {
return syn::Error::new(
proc_macro2::Span::call_site(),
"BitfieldSpecifier expected a number of variants which is a power of 2",
)
.to_compile_error()
.into();
}
// Number of bits (i.e. which power of two) is the number of trailing zeros
let bits = variant_count.trailing_zeros() as usize;
let size_type = size_to_type(bits);
// Build match patterns for variants
let match_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
// Create a new ident `[enum]_[variant]` to be used in the match patterns
// Not really needed but clearer in expansion
let unique_ident = syn::Ident::new(
&format!(
"{}_{}",
enum_ident.to_string().to_lowercase(),
ident.to_string().to_lowercase()
),
ident.span(),
);
// Rely on the pattern:
// x if x == Enum::Variant as Enum::IntType
quote!( #unique_ident if #unique_ident == Self::#ident as Self::IntType => Self::#ident )
});
// Iterator of full variant name: `Enum::Variant`
let enum_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
quote!( #enum_ident::#ident )
});
// Formatted error message in case the discriminant of a variant is outside of the allowed bit range.
let error = format!(
"\nError in BitfieldSpecifier for {}.\nBitfieldSpecifier expects discriminants in the range 0..2^BITS.\nOutside of range:",
enum_ident.to_string()
);
quote!(
// Compile time checks on the size of the discriminant
#(
// Conditional consts and panic in const requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( (#enum_variants as usize) < #variant_count) {
0
}else{
panic!(concat!(#error, stringify!(#enum_variants), " >= ", #variant_count, "\n"))
};
)*
impl From<#enum_ident> for #size_type {
fn from(x: #enum_ident) -> #size_type {
x as #size_type
}
}
impl Specifier for #enum_ident {
const BITS: usize = #bits;
type IntType = #size_type;
type Interface = Self;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
match int_val {
#(#match_variants),*,
_ => panic!("Not supported"),
}
}
}
)
.into()
}
| bit_ops_impl | identifier_name |
lib.rs | #![cfg_attr(feature = "nightly", feature(const_panic))]
extern crate proc_macro;
use proc_macro2::TokenStream;
use quote::{quote, quote_spanned};
use syn::parse_macro_input;
use syn::Item;
/// Generate B1, B2,.., B64 with implementation of the Specifier trait
#[proc_macro]
pub fn generate_bit_specifiers(_input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let mut output = TokenStream::new();
let bit_specifiers = (1usize..=64).map(|idx| {
let ident = syn::Ident::new(&format!("B{}", idx), proc_macro2::Span::call_site());
let size_type = size_to_type(idx);
quote! (
pub enum #ident {}
impl Specifier for #ident {
const BITS: usize = #idx;
type IntType = #size_type;
type Interface = #size_type;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
int_val as Self::Interface
}
}
)
});
// Implement LastByte trait for integer primitives with `as u8`
output.extend(impl_last_byte());
// Implement BitOps
output.extend(bit_ops_impl());
output.extend(bit_specifiers);
output.into()
}
/// Implement LastByte trait for integer primitives `u8`, `u16`,.., `u128` using `as u8`
fn impl_last_byte() -> TokenStream {
let int_types = [
quote!(u8),
quote!(u16),
quote!(u32),
quote!(u64),
quote!(u128),
];
// Implement LastByte trait for primitives
quote!(
#[doc = "Implement last byte for integer primitives using `as u8`"]
#(impl LastByte for #int_types {
fn last_byte(self) -> u8 {
self as u8
}
})*
)
}
/// Match a given number of bits to the narrowest unsigned integer type that can hold it
fn size_to_type(bits: usize) -> TokenStream {
match bits {
1..=8 => quote!(u8),
9..=16 => quote!(u16),
17..=32 => quote!(u32),
33..=64 => quote!(u64),
65..=128 => quote!(u128),
_ => unreachable!(),
}
}
/// Defines BitOps trait and implement it for `u8`
fn bit_ops_impl() -> TokenStream {
quote!(
#[doc = "Simple trait to extract bits from primitive integer type"]
trait BitOps {
fn first(self, n: usize) -> u8;
fn last(self, n: usize) -> u8;
fn mid(self, start: usize, len: usize) -> u8;
}
#[doc = "Ops to extract bits from `u8` byte"]
impl BitOps for u8 {
fn first(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & ((1 << n) - 1),
_ => self,
}
}
fn last(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self &!((1 << (8 - n)) - 1),
_ => self,
}
}
fn mid(self, start: usize, len: usize) -> u8 {
match (start, start + len) {
(0, _) => self.first(len),
(_, l) if l >= 8 => self.last(8 - start),
_ => self & (((1 << len) - 1) << start),
}
}
}
)
}
/// syn helper struct to parse bits attributes
struct BitAttribute {
bits: syn::LitInt,
}
/// Parses the following attribute:
/// ```
/// #[bits=8]
/// ^^
/// ```
impl syn::parse::Parse for BitAttribute {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let _: syn::Token![=] = input.parse()?;
let bits: syn::LitInt = input.parse()?;
Ok(Self { bits })
}
}
/// Main macro `bitfield`
/// Parses a Struct, validates field sizes,
#[proc_macro_attribute]
pub fn bitfield(
args: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream | if attr.path.is_ident("bits") {
// At this point `attr.tokens` is the following part of the attribute:
// #[bits=..]
// ^^^
let bits = syn::parse2::<BitAttribute>(attr.tokens.clone()).ok()?.bits;
return Some(
quote_spanned!(bits.span() => const _: [(); #bits] = [(); <#ty as Specifier>::BITS];),
);
}
}
None
});
let getters_setters = define_getters_setters(&s.fields);
// Total size calculated as the sum of the inner `<T as Specifier>::BITS` associated consts
let total_bit_size = quote!(0 #(+ <#fields_ty as Specifier>::BITS)*);
// Formatted error message for the size check
let error = format!(
"#[bitfield] on `{}` requires the total bit size to be a multiple of 8 bits.",
ident.to_string()
);
quote!(
#[doc = "Converted bitfield struct"]
pub struct #ident {
data: [u8; ( #total_bit_size ) / 8],
}
#(#bits_attrs_check)*
// Conditional consts and panic in consts requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( ( #total_bit_size ) % 8 == 0 ) {
0
}else{
panic!(#error)
};
impl #ident {
pub fn new() -> Self {
Self { data: [0u8; ( #total_bit_size ) / 8] }
}
#getters_setters
}
)
.into()
}
_ => unimplemented!("Only struct"),
}
}
fn define_getters_setters(fields: &syn::Fields) -> TokenStream {
let getters = fields.iter().scan(quote!(0), |offset, field| {
let ident = field.ident.as_ref().expect("Namef field");
// get_[field name] and set_[field name] idents
let get_ident = quote::format_ident!("get_{}", ident);
let set_ident = quote::format_ident!("set_{}", ident);
let ty = &field.ty;
let output = quote!(
pub fn #get_ident(&self) -> <#ty as Specifier>::Interface {
#ty::get(&self.data, #offset)
}
pub fn #set_ident(&mut self, val: <#ty as Specifier>::Interface) {
#ty::set(&mut self.data, #offset, val)
}
);
// Move the offset by the number of bits in the current type
*offset = quote!((#offset + <#ty as Specifier>::BITS));
Some(output)
});
quote!(#(#getters)*)
}
/// Derive BitfieldSpecifier macro for Enums
/// Parses enums and implements Specifier for it.
// In particular, constructs `to_interface` to match a discriminant to its variant.
// Compile time checks: number of variants is a power of two and discriminant size within bit range
#[proc_macro_derive(BitfieldSpecifier)]
pub fn derive_bitfield_specifier(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast = parse_macro_input!(input as syn::DeriveInput);
let enum_ident = ast.ident;
// No attributes
let attrs = ast.attrs;
assert!(attrs.is_empty());
// Variants of the enum
// returns an error if underlying `syn::Data` is not an Enum
let variants = match ast.data {
syn::Data::Enum(e) => e.variants,
// Struct / Union
_ => {
return syn::Error::new_spanned(enum_ident, "BitfieldSpecifier only supports enum")
.to_compile_error()
.into()
}
};
// Check that the number of variants is a power of two.
// If not, return an error.
let variant_count = variants.len();
if!variant_count.is_power_of_two() {
return syn::Error::new(
proc_macro2::Span::call_site(),
"BitfieldSpecifier expected a number of variants which is a power of 2",
)
.to_compile_error()
.into();
}
// Number of bits (i.e. which power of two) is the number of trailing zeros
let bits = variant_count.trailing_zeros() as usize;
let size_type = size_to_type(bits);
// Build match patterns for variants
let match_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
// Create a new ident `[enum]_[variant]` to be used in the match patterns
// Not really needed but clearer in expansion
let unique_ident = syn::Ident::new(
&format!(
"{}_{}",
enum_ident.to_string().to_lowercase(),
ident.to_string().to_lowercase()
),
ident.span(),
);
// Rely on the pattern:
// x if x == Enum::Variant as Enum::IntType
quote!( #unique_ident if #unique_ident == Self::#ident as Self::IntType => Self::#ident )
});
// Iterator of full variant name: `Enum::Variant`
let enum_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
quote!( #enum_ident::#ident )
});
// Formatted error message in case the discriminant of a variant is outside of the allowed bit range.
let error = format!(
"\nError in BitfieldSpecifier for {}.\nBitfieldSpecifier expects discriminants in the range 0..2^BITS.\nOutside of range:",
enum_ident.to_string()
);
quote!(
// Compile time checks on the size of the discriminant
#(
// Conditional consts and panic in const requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( (#enum_variants as usize) < #variant_count) {
0
}else{
panic!(concat!(#error, stringify!(#enum_variants), " >= ", #variant_count, "\n"))
};
)*
impl From<#enum_ident> for #size_type {
fn from(x: #enum_ident) -> #size_type {
x as #size_type
}
}
impl Specifier for #enum_ident {
const BITS: usize = #bits;
type IntType = #size_type;
type Interface = Self;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
match int_val {
#(#match_variants),*,
_ => panic!("Not supported"),
}
}
}
)
.into()
}
| {
let _ = args;
let item = parse_macro_input!(input as syn::Item);
match item {
Item::Struct(s) => {
let ident = &s.ident;
let fields_ty = s.fields.iter().map(|field| &field.ty);
// Check that fields with #[bits=X] attribute have a type of size `X`
// We use an array size check to validate the size is correct
let bits_attrs_check = s
.fields
.iter()
.filter_map(|field| {
let ty = &field.ty;
let attrs = &field.attrs;
for attr in attrs {
// #[bits=..]
// ^^^^ | identifier_body |
frontend.rs | //! General algorithms for frontends.
//!
//! The frontend is concerned with executing the abstract behaviours given by the backend in terms
//! of the actions of the frontend types. This means translating Redirect errors to the correct
//! Redirect http response for example or optionally sending internal errors to loggers.
//!
//! To ensure the adherence to the oauth2 rfc and the improve general implementations, some control
//! flow of incoming packets is specified here instead of the frontend implementations.
//! Instead, traits are offered to make this compatible with other frontends. In theory, this makes
//! the frontend pluggable which could improve testing.
use std::borrow::Cow;
use std::collections::HashMap;
use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::str::from_utf8;
use primitives::registrar::PreGrant;
use super::backend::{AccessTokenRequest, CodeRef, CodeRequest, CodeError, ErrorUrl, IssuerError, IssuerRef};
use super::backend::{AccessError, GuardRequest, GuardRef};
use url::Url;
use base64;
/// Holds the decode query fragments from the url
struct AuthorizationParameter<'a> {
valid: bool,
method: Option<Cow<'a, str>>,
client_id: Option<Cow<'a, str>>,
scope: Option<Cow<'a, str>>,
redirect_url: Option<Cow<'a, str>>,
state: Option<Cow<'a, str>>,
}
/// Answer from OwnerAuthorizer to indicate the owners choice. | pub enum Authentication {
Failed,
InProgress,
Authenticated(String),
}
struct AccessTokenParameter<'a> {
valid: bool,
client_id: Option<Cow<'a, str>>,
redirect_url: Option<Cow<'a, str>>,
grant_type: Option<Cow<'a, str>>,
code: Option<Cow<'a, str>>,
authorization: Option<(String, Vec<u8>)>,
}
struct GuardParameter<'a> {
valid: bool,
token: Option<Cow<'a, str>>,
}
/// Abstraction of web requests with several different abstractions and constructors needed by this
/// frontend. It is assumed to originate from an HTTP request, as defined in the scope of the rfc,
/// but theoretically other requests are possible.
pub trait WebRequest {
/// The error generated from access of malformed or invalid requests.
type Error: From<OAuthError>;
type Response: WebResponse<Error=Self::Error>;
/// Retrieve a parsed version of the url query. An Err return value indicates a malformed query
/// or an otherwise malformed WebRequest. Note that an empty query should result in
/// `Ok(HashMap::new())` instead of an Err.
fn query(&mut self) -> Result<HashMap<String, Vec<String>>, ()>;
/// Retriev the parsed `application/x-form-urlencoded` body of the request. An Err value
/// indicates a malformed body or a different Content-Type.
fn urlbody(&mut self) -> Result<&HashMap<String, Vec<String>>, ()>;
/// Contents of the authorization header or none if none exists. An Err value indicates a
/// malformed header or request.
fn authheader(&mut self) -> Result<Option<Cow<str>>, ()>;
}
/// Response representation into which the Request is transformed by the code_grant types.
pub trait WebResponse where Self: Sized {
/// The error generated when trying to construct an unhandled or invalid response.
type Error: From<OAuthError>;
/// A response which will redirect the user-agent to which the response is issued.
fn redirect(url: Url) -> Result<Self, Self::Error>;
/// A pure text response with no special media type set.
fn text(text: &str) -> Result<Self, Self::Error>;
/// Json repsonse data, with media type `aplication/json.
fn json(data: &str) -> Result<Self, Self::Error>;
/// Construct a redirect for the error. Here the response may choose to augment the error with
/// additional information (such as help websites, description strings), hence the default
/// implementation which does not do any of that.
fn redirect_error(target: ErrorUrl) -> Result<Self, Self::Error> {
Self::redirect(target.into())
}
/// Set the response status to 400
fn as_client_error(self) -> Result<Self, Self::Error>;
/// Set the response status to 401
fn as_unauthorized(self) -> Result<Self, Self::Error>;
/// Add an Authorization header
fn with_authorization(self, kind: &str) -> Result<Self, Self::Error>;
}
pub trait OwnerAuthorizer {
type Request: WebRequest;
fn get_owner_authorization(&self, &mut Self::Request, &PreGrant)
-> Result<(Authentication, <Self::Request as WebRequest>::Response), <Self::Request as WebRequest>::Error>;
}
pub struct AuthorizationFlow;
pub struct PreparedAuthorization<'l, Req> where
Req: WebRequest + 'l,
{
request: &'l mut Req,
urldecoded: AuthorizationParameter<'l>,
}
fn extract_parameters(params: HashMap<String, Vec<String>>) -> AuthorizationParameter<'static> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<&str, &str>>();
AuthorizationParameter{
valid: true,
method: map.get("response_type").map(|method| method.to_string().into()),
client_id: map.get("client_id").map(|client| client.to_string().into()),
scope: map.get("scope").map(|scope| scope.to_string().into()),
redirect_url: map.get("redirect_url").map(|url| url.to_string().into()),
state: map.get("state").map(|state| state.to_string().into()),
}
}
impl<'s> CodeRequest for AuthorizationParameter<'s> {
fn valid(&self) -> bool { self.valid }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.as_ref().map(|c| c.as_ref().into()) }
fn scope(&self) -> Option<Cow<str>> { self.scope.as_ref().map(|c| c.as_ref().into()) }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.as_ref().map(|c| c.as_ref().into()) }
fn state(&self) -> Option<Cow<str>> { self.state.as_ref().map(|c| c.as_ref().into()) }
fn method(&self) -> Option<Cow<str>> { self.method.as_ref().map(|c| c.as_ref().into()) }
}
impl<'s> AuthorizationParameter<'s> {
fn invalid() -> Self {
AuthorizationParameter { valid: false, method: None, client_id: None, scope: None,
redirect_url: None, state: None }
}
}
impl AuthorizationFlow {
/// Idempotent data processing, checks formats.
pub fn prepare<W: WebRequest>(incoming: &mut W) -> Result<PreparedAuthorization<W>, W::Error> {
let urldecoded = incoming.query()
.map(extract_parameters)
.unwrap_or_else(|_| AuthorizationParameter::invalid());
Ok(PreparedAuthorization{request: incoming, urldecoded})
}
pub fn handle<'c, Req>(granter: CodeRef<'c>, prepared: PreparedAuthorization<'c, Req>, page_handler: &OwnerAuthorizer<Request=Req>)
-> Result<Req::Response, Req::Error> where
Req: WebRequest,
{
let PreparedAuthorization { request: req, urldecoded } = prepared;
let negotiated = match granter.negotiate(&urldecoded) {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
let authorization = match page_handler.get_owner_authorization(req, negotiated.pre_grant())? {
(Authentication::Failed, _)
=> negotiated.deny(),
(Authentication::InProgress, response)
=> return Ok(response),
(Authentication::Authenticated(owner), _)
=> negotiated.authorize(owner.into()),
};
let redirect_to = match authorization {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
Req::Response::redirect(redirect_to)
}
}
pub struct GrantFlow;
pub struct PreparedGrant<'l, Req> where
Req: WebRequest + 'l,
{
params: AccessTokenParameter<'l>,
req: PhantomData<Req>,
}
fn extract_access_token<'l>(params: &'l HashMap<String, Vec<String>>) -> AccessTokenParameter<'l> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<_, _>>();
AccessTokenParameter {
valid: true,
client_id: map.get("client_id").map(|v| (*v).into()),
code: map.get("code").map(|v| (*v).into()),
redirect_url: map.get("redirect_url").map(|v| (*v).into()),
grant_type: map.get("grant_type").map(|v| (*v).into()),
authorization: None,
}
}
impl<'l> AccessTokenRequest for AccessTokenParameter<'l> {
fn valid(&self) -> bool { self.valid }
fn code(&self) -> Option<Cow<str>> { self.code.clone() }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.clone() }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.clone() }
fn grant_type(&self) -> Option<Cow<str>> { self.grant_type.clone() }
fn authorization(&self) -> Option<(Cow<str>, Cow<[u8]>)> {
match self.authorization {
None => None,
Some((ref id, ref pass))
=> Some((id.as_str().into(), pass.as_slice().into())),
}
}
}
impl<'l> AccessTokenParameter<'l> {
fn invalid() -> Self {
AccessTokenParameter { valid: false, code: None, client_id: None, redirect_url: None,
grant_type: None, authorization: None }
}
}
impl GrantFlow {
pub fn prepare<W: WebRequest>(req: &mut W) -> Result<PreparedGrant<W>, W::Error> {
let params = GrantFlow::create_valid_params(req)
.unwrap_or(AccessTokenParameter::invalid());
Ok(PreparedGrant { params: params, req: PhantomData })
}
fn create_valid_params<'a, W: WebRequest>(req: &'a mut W) -> Option<AccessTokenParameter<'a>> {
let authorization = match req.authheader() {
Err(_) => return None,
Ok(None) => None,
Ok(Some(ref header)) => {
if!header.starts_with("Basic ") {
return None
}
let combined = match base64::decode(&header[6..]) {
Err(_) => return None,
Ok(vec) => vec,
};
let mut split = combined.splitn(2, |&c| c == b':');
let client_bin = match split.next() {
None => return None,
Some(client) => client,
};
let passwd = match split.next() {
None => return None,
Some(passwd64) => passwd64,
};
let client = match from_utf8(client_bin) {
Err(_) => return None,
Ok(client) => client,
};
Some((client.to_string(), passwd.to_vec()))
},
};
let mut params = match req.urlbody() {
Err(_) => return None,
Ok(body) => extract_access_token(body),
};
params.authorization = authorization;
Some(params)
}
pub fn handle<Req>(mut issuer: IssuerRef, prepared: PreparedGrant<Req>)
-> Result<Req::Response, Req::Error> where Req: WebRequest
{
let PreparedGrant { params,.. } = prepared;
match issuer.use_code(¶ms) {
Err(IssuerError::Invalid(json_data))
=> return Req::Response::json(&json_data.to_json())?.as_client_error(),
Err(IssuerError::Unauthorized(json_data, scheme))
=> return Req::Response::json(&json_data.to_json())?.as_unauthorized()?.with_authorization(&scheme),
Ok(token) => Req::Response::json(&token.to_json()),
}
}
}
pub struct AccessFlow;
pub struct PreparedAccess<'l, Req> where
Req: WebRequest + 'l,
{
params: GuardParameter<'l>,
req: PhantomData<Req>,
}
impl<'l> GuardRequest for GuardParameter<'l> {
fn valid(&self) -> bool { self.valid }
fn token(&self) -> Option<Cow<str>> { self.token.clone() }
}
impl<'l> GuardParameter<'l> {
fn invalid() -> Self {
GuardParameter { valid: false, token: None }
}
}
impl AccessFlow {
fn create_valid_params<W: WebRequest>(req: &mut W) -> Option<GuardParameter> {
let token = match req.authheader() {
Err(_) => return None,
Ok(None) => None,
Ok(Some(header)) => {
if!header.starts_with("Bearer ") {
return None
}
match header {
Cow::Borrowed(v) => Some(Cow::Borrowed(&v[7..])),
Cow::Owned(v) => Some(Cow::Owned(v[7..].to_string())),
}
}
};
Some(GuardParameter { valid: true, token })
}
pub fn prepare<W: WebRequest>(req: &mut W) -> Result<PreparedAccess<W>, W::Error> {
let params = AccessFlow::create_valid_params(req)
.unwrap_or_else(|| GuardParameter::invalid());
Ok(PreparedAccess { params: params, req: PhantomData })
}
pub fn handle<Req>(guard: GuardRef, prepared: PreparedAccess<Req>)
-> Result<(), Req::Error> where Req: WebRequest {
guard.protect(&prepared.params).map_err(|err| {
match err {
AccessError::InvalidRequest => OAuthError::InternalAccessError(),
AccessError::AccessDenied => OAuthError::AccessDenied,
}.into()
})
}
}
/// Errors which should not or need not be communicated to the requesting party but which are of
/// interest to the server. See the documentation for each enum variant for more documentation on
/// each as some may have an expected response. These include badly formatted headers or url encoded
/// body, unexpected parameters, or security relevant required parameters.
#[derive(Debug)]
pub enum OAuthError {
InternalCodeError(),
InternalAccessError(),
AccessDenied,
}
impl fmt::Display for OAuthError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt.write_str("OAuthError")
}
}
impl error::Error for OAuthError {
fn description(&self) -> &str {
"OAuthError"
}
} | #[derive(Clone)] | random_line_split |
frontend.rs | //! General algorithms for frontends.
//!
//! The frontend is concerned with executing the abstract behaviours given by the backend in terms
//! of the actions of the frontend types. This means translating Redirect errors to the correct
//! Redirect http response for example or optionally sending internal errors to loggers.
//!
//! To ensure the adherence to the oauth2 rfc and the improve general implementations, some control
//! flow of incoming packets is specified here instead of the frontend implementations.
//! Instead, traits are offered to make this compatible with other frontends. In theory, this makes
//! the frontend pluggable which could improve testing.
use std::borrow::Cow;
use std::collections::HashMap;
use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::str::from_utf8;
use primitives::registrar::PreGrant;
use super::backend::{AccessTokenRequest, CodeRef, CodeRequest, CodeError, ErrorUrl, IssuerError, IssuerRef};
use super::backend::{AccessError, GuardRequest, GuardRef};
use url::Url;
use base64;
/// Holds the decode query fragments from the url
struct AuthorizationParameter<'a> {
valid: bool,
method: Option<Cow<'a, str>>,
client_id: Option<Cow<'a, str>>,
scope: Option<Cow<'a, str>>,
redirect_url: Option<Cow<'a, str>>,
state: Option<Cow<'a, str>>,
}
/// Answer from OwnerAuthorizer to indicate the owners choice.
#[derive(Clone)]
pub enum Authentication {
Failed,
InProgress,
Authenticated(String),
}
struct AccessTokenParameter<'a> {
valid: bool,
client_id: Option<Cow<'a, str>>,
redirect_url: Option<Cow<'a, str>>,
grant_type: Option<Cow<'a, str>>,
code: Option<Cow<'a, str>>,
authorization: Option<(String, Vec<u8>)>,
}
struct GuardParameter<'a> {
valid: bool,
token: Option<Cow<'a, str>>,
}
/// Abstraction of web requests with several different abstractions and constructors needed by this
/// frontend. It is assumed to originate from an HTTP request, as defined in the scope of the rfc,
/// but theoretically other requests are possible.
pub trait WebRequest {
/// The error generated from access of malformed or invalid requests.
type Error: From<OAuthError>;
type Response: WebResponse<Error=Self::Error>;
/// Retrieve a parsed version of the url query. An Err return value indicates a malformed query
/// or an otherwise malformed WebRequest. Note that an empty query should result in
/// `Ok(HashMap::new())` instead of an Err.
fn query(&mut self) -> Result<HashMap<String, Vec<String>>, ()>;
/// Retriev the parsed `application/x-form-urlencoded` body of the request. An Err value
/// indicates a malformed body or a different Content-Type.
fn urlbody(&mut self) -> Result<&HashMap<String, Vec<String>>, ()>;
/// Contents of the authorization header or none if none exists. An Err value indicates a
/// malformed header or request.
fn authheader(&mut self) -> Result<Option<Cow<str>>, ()>;
}
/// Response representation into which the Request is transformed by the code_grant types.
pub trait WebResponse where Self: Sized {
/// The error generated when trying to construct an unhandled or invalid response.
type Error: From<OAuthError>;
/// A response which will redirect the user-agent to which the response is issued.
fn redirect(url: Url) -> Result<Self, Self::Error>;
/// A pure text response with no special media type set.
fn text(text: &str) -> Result<Self, Self::Error>;
/// Json repsonse data, with media type `aplication/json.
fn json(data: &str) -> Result<Self, Self::Error>;
/// Construct a redirect for the error. Here the response may choose to augment the error with
/// additional information (such as help websites, description strings), hence the default
/// implementation which does not do any of that.
fn redirect_error(target: ErrorUrl) -> Result<Self, Self::Error> {
Self::redirect(target.into())
}
/// Set the response status to 400
fn as_client_error(self) -> Result<Self, Self::Error>;
/// Set the response status to 401
fn as_unauthorized(self) -> Result<Self, Self::Error>;
/// Add an Authorization header
fn with_authorization(self, kind: &str) -> Result<Self, Self::Error>;
}
pub trait OwnerAuthorizer {
type Request: WebRequest;
fn get_owner_authorization(&self, &mut Self::Request, &PreGrant)
-> Result<(Authentication, <Self::Request as WebRequest>::Response), <Self::Request as WebRequest>::Error>;
}
pub struct AuthorizationFlow;
pub struct PreparedAuthorization<'l, Req> where
Req: WebRequest + 'l,
{
request: &'l mut Req,
urldecoded: AuthorizationParameter<'l>,
}
fn extract_parameters(params: HashMap<String, Vec<String>>) -> AuthorizationParameter<'static> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<&str, &str>>();
AuthorizationParameter{
valid: true,
method: map.get("response_type").map(|method| method.to_string().into()),
client_id: map.get("client_id").map(|client| client.to_string().into()),
scope: map.get("scope").map(|scope| scope.to_string().into()),
redirect_url: map.get("redirect_url").map(|url| url.to_string().into()),
state: map.get("state").map(|state| state.to_string().into()),
}
}
impl<'s> CodeRequest for AuthorizationParameter<'s> {
fn valid(&self) -> bool { self.valid }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.as_ref().map(|c| c.as_ref().into()) }
fn scope(&self) -> Option<Cow<str>> { self.scope.as_ref().map(|c| c.as_ref().into()) }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.as_ref().map(|c| c.as_ref().into()) }
fn state(&self) -> Option<Cow<str>> { self.state.as_ref().map(|c| c.as_ref().into()) }
fn method(&self) -> Option<Cow<str>> |
}
impl<'s> AuthorizationParameter<'s> {
fn invalid() -> Self {
AuthorizationParameter { valid: false, method: None, client_id: None, scope: None,
redirect_url: None, state: None }
}
}
impl AuthorizationFlow {
/// Idempotent data processing, checks formats.
pub fn prepare<W: WebRequest>(incoming: &mut W) -> Result<PreparedAuthorization<W>, W::Error> {
let urldecoded = incoming.query()
.map(extract_parameters)
.unwrap_or_else(|_| AuthorizationParameter::invalid());
Ok(PreparedAuthorization{request: incoming, urldecoded})
}
pub fn handle<'c, Req>(granter: CodeRef<'c>, prepared: PreparedAuthorization<'c, Req>, page_handler: &OwnerAuthorizer<Request=Req>)
-> Result<Req::Response, Req::Error> where
Req: WebRequest,
{
let PreparedAuthorization { request: req, urldecoded } = prepared;
let negotiated = match granter.negotiate(&urldecoded) {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
let authorization = match page_handler.get_owner_authorization(req, negotiated.pre_grant())? {
(Authentication::Failed, _)
=> negotiated.deny(),
(Authentication::InProgress, response)
=> return Ok(response),
(Authentication::Authenticated(owner), _)
=> negotiated.authorize(owner.into()),
};
let redirect_to = match authorization {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
Req::Response::redirect(redirect_to)
}
}
pub struct GrantFlow;
pub struct PreparedGrant<'l, Req> where
Req: WebRequest + 'l,
{
params: AccessTokenParameter<'l>,
req: PhantomData<Req>,
}
fn extract_access_token<'l>(params: &'l HashMap<String, Vec<String>>) -> AccessTokenParameter<'l> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<_, _>>();
AccessTokenParameter {
valid: true,
client_id: map.get("client_id").map(|v| (*v).into()),
code: map.get("code").map(|v| (*v).into()),
redirect_url: map.get("redirect_url").map(|v| (*v).into()),
grant_type: map.get("grant_type").map(|v| (*v).into()),
authorization: None,
}
}
impl<'l> AccessTokenRequest for AccessTokenParameter<'l> {
fn valid(&self) -> bool { self.valid }
fn code(&self) -> Option<Cow<str>> { self.code.clone() }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.clone() }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.clone() }
fn grant_type(&self) -> Option<Cow<str>> { self.grant_type.clone() }
fn authorization(&self) -> Option<(Cow<str>, Cow<[u8]>)> {
match self.authorization {
None => None,
Some((ref id, ref pass))
=> Some((id.as_str().into(), pass.as_slice().into())),
}
}
}
impl<'l> AccessTokenParameter<'l> {
fn invalid() -> Self {
AccessTokenParameter { valid: false, code: None, client_id: None, redirect_url: None,
grant_type: None, authorization: None }
}
}
impl GrantFlow {
pub fn prepare<W: WebRequest>(req: &mut W) -> Result<PreparedGrant<W>, W::Error> {
let params = GrantFlow::create_valid_params(req)
.unwrap_or(AccessTokenParameter::invalid());
Ok(PreparedGrant { params: params, req: PhantomData })
}
fn create_valid_params<'a, W: WebRequest>(req: &'a mut W) -> Option<AccessTokenParameter<'a>> {
let authorization = match req.authheader() {
Err(_) => return None,
Ok(None) => None,
Ok(Some(ref header)) => {
if!header.starts_with("Basic ") {
return None
}
let combined = match base64::decode(&header[6..]) {
Err(_) => return None,
Ok(vec) => vec,
};
let mut split = combined.splitn(2, |&c| c == b':');
let client_bin = match split.next() {
None => return None,
Some(client) => client,
};
let passwd = match split.next() {
None => return None,
Some(passwd64) => passwd64,
};
let client = match from_utf8(client_bin) {
Err(_) => return None,
Ok(client) => client,
};
Some((client.to_string(), passwd.to_vec()))
},
};
let mut params = match req.urlbody() {
Err(_) => return None,
Ok(body) => extract_access_token(body),
};
params.authorization = authorization;
Some(params)
}
pub fn handle<Req>(mut issuer: IssuerRef, prepared: PreparedGrant<Req>)
-> Result<Req::Response, Req::Error> where Req: WebRequest
{
let PreparedGrant { params,.. } = prepared;
match issuer.use_code(¶ms) {
Err(IssuerError::Invalid(json_data))
=> return Req::Response::json(&json_data.to_json())?.as_client_error(),
Err(IssuerError::Unauthorized(json_data, scheme))
=> return Req::Response::json(&json_data.to_json())?.as_unauthorized()?.with_authorization(&scheme),
Ok(token) => Req::Response::json(&token.to_json()),
}
}
}
pub struct AccessFlow;
pub struct PreparedAccess<'l, Req> where
Req: WebRequest + 'l,
{
params: GuardParameter<'l>,
req: PhantomData<Req>,
}
impl<'l> GuardRequest for GuardParameter<'l> {
fn valid(&self) -> bool { self.valid }
fn token(&self) -> Option<Cow<str>> { self.token.clone() }
}
impl<'l> GuardParameter<'l> {
fn invalid() -> Self {
GuardParameter { valid: false, token: None }
}
}
impl AccessFlow {
fn create_valid_params<W: WebRequest>(req: &mut W) -> Option<GuardParameter> {
let token = match req.authheader() {
Err(_) => return None,
Ok(None) => None,
Ok(Some(header)) => {
if!header.starts_with("Bearer ") {
return None
}
match header {
Cow::Borrowed(v) => Some(Cow::Borrowed(&v[7..])),
Cow::Owned(v) => Some(Cow::Owned(v[7..].to_string())),
}
}
};
Some(GuardParameter { valid: true, token })
}
pub fn prepare<W: WebRequest>(req: &mut W) -> Result<PreparedAccess<W>, W::Error> {
let params = AccessFlow::create_valid_params(req)
.unwrap_or_else(|| GuardParameter::invalid());
Ok(PreparedAccess { params: params, req: PhantomData })
}
pub fn handle<Req>(guard: GuardRef, prepared: PreparedAccess<Req>)
-> Result<(), Req::Error> where Req: WebRequest {
guard.protect(&prepared.params).map_err(|err| {
match err {
AccessError::InvalidRequest => OAuthError::InternalAccessError(),
AccessError::AccessDenied => OAuthError::AccessDenied,
}.into()
})
}
}
/// Errors which should not or need not be communicated to the requesting party but which are of
/// interest to the server. See the documentation for each enum variant for more documentation on
/// each as some may have an expected response. These include badly formatted headers or url encoded
/// body, unexpected parameters, or security relevant required parameters.
#[derive(Debug)]
pub enum OAuthError {
InternalCodeError(),
InternalAccessError(),
AccessDenied,
}
impl fmt::Display for OAuthError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt.write_str("OAuthError")
}
}
impl error::Error for OAuthError {
fn description(&self) -> &str {
"OAuthError"
}
}
| { self.method.as_ref().map(|c| c.as_ref().into()) } | identifier_body |
frontend.rs | //! General algorithms for frontends.
//!
//! The frontend is concerned with executing the abstract behaviours given by the backend in terms
//! of the actions of the frontend types. This means translating Redirect errors to the correct
//! Redirect http response for example or optionally sending internal errors to loggers.
//!
//! To ensure the adherence to the oauth2 rfc and the improve general implementations, some control
//! flow of incoming packets is specified here instead of the frontend implementations.
//! Instead, traits are offered to make this compatible with other frontends. In theory, this makes
//! the frontend pluggable which could improve testing.
use std::borrow::Cow;
use std::collections::HashMap;
use std::error;
use std::fmt;
use std::marker::PhantomData;
use std::str::from_utf8;
use primitives::registrar::PreGrant;
use super::backend::{AccessTokenRequest, CodeRef, CodeRequest, CodeError, ErrorUrl, IssuerError, IssuerRef};
use super::backend::{AccessError, GuardRequest, GuardRef};
use url::Url;
use base64;
/// Holds the decode query fragments from the url
struct AuthorizationParameter<'a> {
valid: bool,
method: Option<Cow<'a, str>>,
client_id: Option<Cow<'a, str>>,
scope: Option<Cow<'a, str>>,
redirect_url: Option<Cow<'a, str>>,
state: Option<Cow<'a, str>>,
}
/// Answer from OwnerAuthorizer to indicate the owners choice.
#[derive(Clone)]
pub enum Authentication {
Failed,
InProgress,
Authenticated(String),
}
struct AccessTokenParameter<'a> {
valid: bool,
client_id: Option<Cow<'a, str>>,
redirect_url: Option<Cow<'a, str>>,
grant_type: Option<Cow<'a, str>>,
code: Option<Cow<'a, str>>,
authorization: Option<(String, Vec<u8>)>,
}
struct GuardParameter<'a> {
valid: bool,
token: Option<Cow<'a, str>>,
}
/// Abstraction of web requests with several different abstractions and constructors needed by this
/// frontend. It is assumed to originate from an HTTP request, as defined in the scope of the rfc,
/// but theoretically other requests are possible.
pub trait WebRequest {
/// The error generated from access of malformed or invalid requests.
type Error: From<OAuthError>;
type Response: WebResponse<Error=Self::Error>;
/// Retrieve a parsed version of the url query. An Err return value indicates a malformed query
/// or an otherwise malformed WebRequest. Note that an empty query should result in
/// `Ok(HashMap::new())` instead of an Err.
fn query(&mut self) -> Result<HashMap<String, Vec<String>>, ()>;
/// Retriev the parsed `application/x-form-urlencoded` body of the request. An Err value
/// indicates a malformed body or a different Content-Type.
fn urlbody(&mut self) -> Result<&HashMap<String, Vec<String>>, ()>;
/// Contents of the authorization header or none if none exists. An Err value indicates a
/// malformed header or request.
fn authheader(&mut self) -> Result<Option<Cow<str>>, ()>;
}
/// Response representation into which the Request is transformed by the code_grant types.
pub trait WebResponse where Self: Sized {
/// The error generated when trying to construct an unhandled or invalid response.
type Error: From<OAuthError>;
/// A response which will redirect the user-agent to which the response is issued.
fn redirect(url: Url) -> Result<Self, Self::Error>;
/// A pure text response with no special media type set.
fn text(text: &str) -> Result<Self, Self::Error>;
/// Json repsonse data, with media type `aplication/json.
fn json(data: &str) -> Result<Self, Self::Error>;
/// Construct a redirect for the error. Here the response may choose to augment the error with
/// additional information (such as help websites, description strings), hence the default
/// implementation which does not do any of that.
fn redirect_error(target: ErrorUrl) -> Result<Self, Self::Error> {
Self::redirect(target.into())
}
/// Set the response status to 400
fn as_client_error(self) -> Result<Self, Self::Error>;
/// Set the response status to 401
fn as_unauthorized(self) -> Result<Self, Self::Error>;
/// Add an Authorization header
fn with_authorization(self, kind: &str) -> Result<Self, Self::Error>;
}
pub trait OwnerAuthorizer {
type Request: WebRequest;
fn get_owner_authorization(&self, &mut Self::Request, &PreGrant)
-> Result<(Authentication, <Self::Request as WebRequest>::Response), <Self::Request as WebRequest>::Error>;
}
pub struct AuthorizationFlow;
pub struct | <'l, Req> where
Req: WebRequest + 'l,
{
request: &'l mut Req,
urldecoded: AuthorizationParameter<'l>,
}
fn extract_parameters(params: HashMap<String, Vec<String>>) -> AuthorizationParameter<'static> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<&str, &str>>();
AuthorizationParameter{
valid: true,
method: map.get("response_type").map(|method| method.to_string().into()),
client_id: map.get("client_id").map(|client| client.to_string().into()),
scope: map.get("scope").map(|scope| scope.to_string().into()),
redirect_url: map.get("redirect_url").map(|url| url.to_string().into()),
state: map.get("state").map(|state| state.to_string().into()),
}
}
impl<'s> CodeRequest for AuthorizationParameter<'s> {
fn valid(&self) -> bool { self.valid }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.as_ref().map(|c| c.as_ref().into()) }
fn scope(&self) -> Option<Cow<str>> { self.scope.as_ref().map(|c| c.as_ref().into()) }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.as_ref().map(|c| c.as_ref().into()) }
fn state(&self) -> Option<Cow<str>> { self.state.as_ref().map(|c| c.as_ref().into()) }
fn method(&self) -> Option<Cow<str>> { self.method.as_ref().map(|c| c.as_ref().into()) }
}
impl<'s> AuthorizationParameter<'s> {
fn invalid() -> Self {
AuthorizationParameter { valid: false, method: None, client_id: None, scope: None,
redirect_url: None, state: None }
}
}
impl AuthorizationFlow {
/// Idempotent data processing, checks formats.
pub fn prepare<W: WebRequest>(incoming: &mut W) -> Result<PreparedAuthorization<W>, W::Error> {
let urldecoded = incoming.query()
.map(extract_parameters)
.unwrap_or_else(|_| AuthorizationParameter::invalid());
Ok(PreparedAuthorization{request: incoming, urldecoded})
}
pub fn handle<'c, Req>(granter: CodeRef<'c>, prepared: PreparedAuthorization<'c, Req>, page_handler: &OwnerAuthorizer<Request=Req>)
-> Result<Req::Response, Req::Error> where
Req: WebRequest,
{
let PreparedAuthorization { request: req, urldecoded } = prepared;
let negotiated = match granter.negotiate(&urldecoded) {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
let authorization = match page_handler.get_owner_authorization(req, negotiated.pre_grant())? {
(Authentication::Failed, _)
=> negotiated.deny(),
(Authentication::InProgress, response)
=> return Ok(response),
(Authentication::Authenticated(owner), _)
=> negotiated.authorize(owner.into()),
};
let redirect_to = match authorization {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
Req::Response::redirect(redirect_to)
}
}
pub struct GrantFlow;
pub struct PreparedGrant<'l, Req> where
Req: WebRequest + 'l,
{
params: AccessTokenParameter<'l>,
req: PhantomData<Req>,
}
fn extract_access_token<'l>(params: &'l HashMap<String, Vec<String>>) -> AccessTokenParameter<'l> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<_, _>>();
AccessTokenParameter {
valid: true,
client_id: map.get("client_id").map(|v| (*v).into()),
code: map.get("code").map(|v| (*v).into()),
redirect_url: map.get("redirect_url").map(|v| (*v).into()),
grant_type: map.get("grant_type").map(|v| (*v).into()),
authorization: None,
}
}
impl<'l> AccessTokenRequest for AccessTokenParameter<'l> {
fn valid(&self) -> bool { self.valid }
fn code(&self) -> Option<Cow<str>> { self.code.clone() }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.clone() }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.clone() }
fn grant_type(&self) -> Option<Cow<str>> { self.grant_type.clone() }
fn authorization(&self) -> Option<(Cow<str>, Cow<[u8]>)> {
match self.authorization {
None => None,
Some((ref id, ref pass))
=> Some((id.as_str().into(), pass.as_slice().into())),
}
}
}
impl<'l> AccessTokenParameter<'l> {
fn invalid() -> Self {
AccessTokenParameter { valid: false, code: None, client_id: None, redirect_url: None,
grant_type: None, authorization: None }
}
}
impl GrantFlow {
pub fn prepare<W: WebRequest>(req: &mut W) -> Result<PreparedGrant<W>, W::Error> {
let params = GrantFlow::create_valid_params(req)
.unwrap_or(AccessTokenParameter::invalid());
Ok(PreparedGrant { params: params, req: PhantomData })
}
fn create_valid_params<'a, W: WebRequest>(req: &'a mut W) -> Option<AccessTokenParameter<'a>> {
let authorization = match req.authheader() {
Err(_) => return None,
Ok(None) => None,
Ok(Some(ref header)) => {
if!header.starts_with("Basic ") {
return None
}
let combined = match base64::decode(&header[6..]) {
Err(_) => return None,
Ok(vec) => vec,
};
let mut split = combined.splitn(2, |&c| c == b':');
let client_bin = match split.next() {
None => return None,
Some(client) => client,
};
let passwd = match split.next() {
None => return None,
Some(passwd64) => passwd64,
};
let client = match from_utf8(client_bin) {
Err(_) => return None,
Ok(client) => client,
};
Some((client.to_string(), passwd.to_vec()))
},
};
let mut params = match req.urlbody() {
Err(_) => return None,
Ok(body) => extract_access_token(body),
};
params.authorization = authorization;
Some(params)
}
pub fn handle<Req>(mut issuer: IssuerRef, prepared: PreparedGrant<Req>)
-> Result<Req::Response, Req::Error> where Req: WebRequest
{
let PreparedGrant { params,.. } = prepared;
match issuer.use_code(¶ms) {
Err(IssuerError::Invalid(json_data))
=> return Req::Response::json(&json_data.to_json())?.as_client_error(),
Err(IssuerError::Unauthorized(json_data, scheme))
=> return Req::Response::json(&json_data.to_json())?.as_unauthorized()?.with_authorization(&scheme),
Ok(token) => Req::Response::json(&token.to_json()),
}
}
}
pub struct AccessFlow;
pub struct PreparedAccess<'l, Req> where
Req: WebRequest + 'l,
{
params: GuardParameter<'l>,
req: PhantomData<Req>,
}
impl<'l> GuardRequest for GuardParameter<'l> {
fn valid(&self) -> bool { self.valid }
fn token(&self) -> Option<Cow<str>> { self.token.clone() }
}
impl<'l> GuardParameter<'l> {
fn invalid() -> Self {
GuardParameter { valid: false, token: None }
}
}
impl AccessFlow {
fn create_valid_params<W: WebRequest>(req: &mut W) -> Option<GuardParameter> {
let token = match req.authheader() {
Err(_) => return None,
Ok(None) => None,
Ok(Some(header)) => {
if!header.starts_with("Bearer ") {
return None
}
match header {
Cow::Borrowed(v) => Some(Cow::Borrowed(&v[7..])),
Cow::Owned(v) => Some(Cow::Owned(v[7..].to_string())),
}
}
};
Some(GuardParameter { valid: true, token })
}
pub fn prepare<W: WebRequest>(req: &mut W) -> Result<PreparedAccess<W>, W::Error> {
let params = AccessFlow::create_valid_params(req)
.unwrap_or_else(|| GuardParameter::invalid());
Ok(PreparedAccess { params: params, req: PhantomData })
}
pub fn handle<Req>(guard: GuardRef, prepared: PreparedAccess<Req>)
-> Result<(), Req::Error> where Req: WebRequest {
guard.protect(&prepared.params).map_err(|err| {
match err {
AccessError::InvalidRequest => OAuthError::InternalAccessError(),
AccessError::AccessDenied => OAuthError::AccessDenied,
}.into()
})
}
}
/// Errors which should not or need not be communicated to the requesting party but which are of
/// interest to the server. See the documentation for each enum variant for more documentation on
/// each as some may have an expected response. These include badly formatted headers or url encoded
/// body, unexpected parameters, or security relevant required parameters.
#[derive(Debug)]
pub enum OAuthError {
InternalCodeError(),
InternalAccessError(),
AccessDenied,
}
impl fmt::Display for OAuthError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
fmt.write_str("OAuthError")
}
}
impl error::Error for OAuthError {
fn description(&self) -> &str {
"OAuthError"
}
}
| PreparedAuthorization | identifier_name |
router.rs | //! Top-level semantic block verification for Zebra.
//!
//! Verifies blocks using the [`CheckpointVerifier`] or full [`SemanticBlockVerifier`],
//! depending on the config and block height.
//!
//! # Correctness
//!
//! Block and transaction verification requests should be wrapped in a timeout, because:
//! - checkpoint verification waits for previous blocks, and
//! - full block and transaction verification wait for UTXOs from previous blocks.
//!
//! Otherwise, verification of out-of-order and invalid blocks and transactions can hang
//! indefinitely.
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use displaydoc::Display;
use futures::{FutureExt, TryFutureExt};
use thiserror::Error;
use tokio::task::JoinHandle;
use tower::{buffer::Buffer, util::BoxService, Service, ServiceExt};
use tracing::{instrument, Instrument, Span};
use zebra_chain::{
block::{self, Height},
parameters::Network,
};
use zebra_state as zs;
use crate::{
block::{Request, SemanticBlockVerifier, VerifyBlockError},
checkpoint::{CheckpointList, CheckpointVerifier, VerifyCheckpointError},
error::TransactionError,
transaction, BoxError, Config,
};
#[cfg(test)]
mod tests;
/// The bound for the chain verifier and transaction verifier buffers.
///
/// We choose the verifier buffer bound based on the maximum number of
/// concurrent verifier users, to avoid contention:
/// - the `ChainSync` block download and verify stream
/// - the `Inbound` block download and verify stream
/// - the `Mempool` transaction download and verify stream
/// - a block miner component, which we might add in future, and
/// - 1 extra slot to avoid contention.
///
/// We deliberately add extra slots, because they only cost a small amount of
/// memory, but missing slots can significantly slow down Zebra.
const VERIFIER_BUFFER_BOUND: usize = 5;
/// The block verifier router routes requests to either the checkpoint verifier or the
/// semantic block verifier, depending on the maximum checkpoint height.
///
/// # Correctness
///
/// Block verification requests should be wrapped in a timeout, so that
/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`)
/// module documentation for details.
struct BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static,
S::Future: Send +'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+'static,
V::Future: Send +'static,
{
/// The checkpointing block verifier.
///
/// Always used for blocks before `Canopy`, optionally used for the entire checkpoint list.
checkpoint: CheckpointVerifier<S>,
/// The highest permitted checkpoint block.
///
/// This height must be in the `checkpoint` verifier's checkpoint list.
max_checkpoint_height: block::Height,
/// The full semantic block verifier, used for blocks after `max_checkpoint_height`.
block: SemanticBlockVerifier<S, V>,
}
/// An error while semantically verifying a block.
//
// One or both of these error variants are at least 140 bytes
#[derive(Debug, Display, Error)]
#[allow(missing_docs)]
pub enum RouterError {
/// Block could not be checkpointed
Checkpoint { source: Box<VerifyCheckpointError> },
/// Block could not be full-verified
Block { source: Box<VerifyBlockError> },
}
impl From<VerifyCheckpointError> for RouterError {
fn from(err: VerifyCheckpointError) -> Self {
RouterError::Checkpoint {
source: Box::new(err),
}
}
}
impl From<VerifyBlockError> for RouterError {
fn from(err: VerifyBlockError) -> Self {
RouterError::Block {
source: Box::new(err),
}
}
}
impl RouterError {
/// Returns `true` if this is definitely a duplicate request.
/// Some duplicate requests might not be detected, and therefore return `false`.
pub fn is_duplicate_request(&self) -> bool {
match self {
RouterError::Checkpoint { source,.. } => source.is_duplicate_request(),
RouterError::Block { source,.. } => source.is_duplicate_request(),
}
}
}
impl<S, V> Service<Request> for BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static,
S::Future: Send +'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+'static,
V::Future: Send +'static,
{
type Response = block::Hash;
type Error = RouterError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send +'static>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// CORRECTNESS
//
// The current task must be scheduled for wakeup every time we return
// `Poll::Pending`.
//
// If either verifier is unready, this task is scheduled for wakeup when it becomes
// ready.
//
// We acquire checkpoint readiness before block readiness, to avoid an unlikely
// hang during the checkpoint to block verifier transition. If the checkpoint and
// block verifiers are contending for the same buffer/batch, we want the checkpoint
// verifier to win, so that checkpoint verification completes, and block verification
// can start. (Buffers and batches have multiple slots, so this contention is unlikely.)
use futures::ready;
// The chain verifier holds one slot in each verifier, for each concurrent task.
// Therefore, any shared buffers or batches polled by these verifiers should double
// their bounds. (For example, the state service buffer.)
ready!(self.checkpoint.poll_ready(cx))?;
ready!(self.block.poll_ready(cx))?;
Poll::Ready(Ok(()))
}
fn call(&mut self, request: Request) -> Self::Future {
let block = request.block();
match block.coinbase_height() {
#[cfg(feature = "getblocktemplate-rpcs")]
// There's currently no known use case for block proposals below the checkpoint height,
// so it's okay to immediately return an error here.
Some(height) if height <= self.max_checkpoint_height && request.is_proposal() => {
async {
// TODO: Add a `ValidateProposalError` enum with a `BelowCheckpoint` variant?
Err(VerifyBlockError::ValidateProposal(
"block proposals must be above checkpoint height".into(),
))?
}
.boxed()
}
Some(height) if height <= self.max_checkpoint_height => {
self.checkpoint.call(block).map_err(Into::into).boxed()
}
// This also covers blocks with no height, which the block verifier
// will reject immediately.
_ => self.block.call(request).map_err(Into::into).boxed(),
}
}
}
/// Initialize block and transaction verification services,
/// and pre-download Groth16 parameters if requested by the `debug_skip_parameter_preload`
/// config parameter and if the download is not already started.
///
/// Returns a block verifier, transaction verifier,
/// the Groth16 parameter download task [`JoinHandle`],
/// and the maximum configured checkpoint verification height.
///
/// The consensus configuration is specified by `config`, and the Zcash network
/// to verify blocks for is specified by `network`.
///
/// The block verification service asynchronously performs semantic verification
/// checks. Blocks that pass semantic verification are submitted to the supplied
/// `state_service` for contextual verification before being committed to the chain.
///
/// The transaction verification service asynchronously performs semantic verification
/// checks. Transactions that pass semantic verification return an `Ok` result to the caller.
///
/// Pre-downloads the Sapling and Sprout Groth16 parameters if needed,
/// checks they were downloaded correctly, and loads them into Zebra.
/// (The transaction verifier automatically downloads the parameters on first use.
/// But the parameter downloads can take around 10 minutes.
/// So we pre-download the parameters, to avoid verification timeouts.)
///
/// This function should only be called once for a particular state service.
///
/// Dropped requests are cancelled on a best-effort basis, but may continue to be processed.
///
/// # Correctness
///
/// Block and transaction verification requests should be wrapped in a timeout,
/// so that out-of-order and invalid requests do not hang indefinitely.
/// See the [`router`](`crate::router`) module documentation for details.
#[instrument(skip(state_service))]
pub async fn | <S>(
config: Config,
network: Network,
mut state_service: S,
debug_skip_parameter_preload: bool,
) -> (
Buffer<BoxService<Request, block::Hash, RouterError>, Request>,
Buffer<
BoxService<transaction::Request, transaction::Response, TransactionError>,
transaction::Request,
>,
BackgroundTaskHandles,
Height,
)
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static,
S::Future: Send +'static,
{
// Give other tasks priority before spawning the download and checkpoint tasks.
tokio::task::yield_now().await;
// Pre-download Groth16 parameters in a separate thread.
// The parameter download thread must be launched before initializing any verifiers.
// Otherwise, the download might happen on the startup thread.
let span = Span::current();
let groth16_download_handle = tokio::task::spawn_blocking(move || {
span.in_scope(|| {
if!debug_skip_parameter_preload {
// The lazy static initializer does the download, if needed,
// and the file hash checks.
lazy_static::initialize(&crate::groth16::GROTH16_PARAMETERS);
}
})
});
// Make sure the state contains the known best chain checkpoints, in a separate thread.
let checkpoint_state_service = state_service.clone();
let checkpoint_sync = config.checkpoint_sync;
let state_checkpoint_verify_handle = tokio::task::spawn(
// TODO: move this into an async function?
async move {
tracing::info!("starting state checkpoint validation");
// # Consensus
//
// We want to verify all available checkpoints, even if the node is not configured
// to use them for syncing. Zebra's checkpoints are updated with every release,
// which makes sure they include the latest settled network upgrade.
//
// > A network upgrade is settled on a given network when there is a social
// > consensus that it has activated with a given activation block hash.
// > A full validator that potentially risks Mainnet funds or displays Mainnet
// > transaction information to a user MUST do so only for a block chain that
// > includes the activation block of the most recent settled network upgrade,
// > with the corresponding activation block hash. Currently, there is social
// > consensus that NU5 has activated on the Zcash Mainnet and Testnet with the
// > activation block hashes given in § 3.12 ‘Mainnet and Testnet’ on p. 20.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockchain>
let full_checkpoints = CheckpointList::new(network);
for (height, checkpoint_hash) in full_checkpoints.iter() {
let checkpoint_state_service = checkpoint_state_service.clone();
let request = zebra_state::Request::BestChainBlockHash(*height);
match checkpoint_state_service.oneshot(request).await {
Ok(zebra_state::Response::BlockHash(Some(state_hash))) => assert_eq!(
*checkpoint_hash, state_hash,
"invalid block in state: a previous Zebra instance followed an \
incorrect chain. Delete and re-sync your state to use the best chain"
),
Ok(zebra_state::Response::BlockHash(None)) => {
if checkpoint_sync {
tracing::info!(
"state is not fully synced yet, remaining checkpoints will be \
verified during syncing"
);
} else {
tracing::warn!(
"state is not fully synced yet, remaining checkpoints will be \
verified next time Zebra starts up. Zebra will be less secure \
until it is restarted. Use consensus.checkpoint_sync = true \
in zebrad.toml to make sure you are following a valid chain"
);
}
break;
}
Ok(response) => {
unreachable!("unexpected response type: {response:?} from state request")
}
Err(e) => {
#[cfg(not(test))]
tracing::warn!(
"unexpected error: {e:?} in state request while verifying previous \
state checkpoints. Is Zebra shutting down?"
);
// This error happens a lot in some tests.
//
// TODO: fix the tests so they don't cause this error,
// or change the tracing filter
#[cfg(test)]
tracing::debug!(
"unexpected error: {e:?} in state request while verifying previous \
state checkpoints. Is Zebra shutting down?"
);
}
}
}
tracing::info!("finished state checkpoint validation");
}
.instrument(Span::current()),
);
// transaction verification
let transaction = transaction::Verifier::new(network, state_service.clone());
let transaction = Buffer::new(BoxService::new(transaction), VERIFIER_BUFFER_BOUND);
// block verification
let (list, max_checkpoint_height) = init_checkpoint_list(config, network);
let tip = match state_service
.ready()
.await
.unwrap()
.call(zs::Request::Tip)
.await
.unwrap()
{
zs::Response::Tip(tip) => tip,
_ => unreachable!("wrong response to Request::Tip"),
};
tracing::info!(
?tip,
?max_checkpoint_height,
"initializing block verifier router"
);
let block = SemanticBlockVerifier::new(network, state_service.clone(), transaction.clone());
let checkpoint = CheckpointVerifier::from_checkpoint_list(list, network, tip, state_service);
let router = BlockVerifierRouter {
checkpoint,
max_checkpoint_height,
block,
};
let router = Buffer::new(BoxService::new(router), VERIFIER_BUFFER_BOUND);
let task_handles = BackgroundTaskHandles {
groth16_download_handle,
state_checkpoint_verify_handle,
};
(router, transaction, task_handles, max_checkpoint_height)
}
/// Parses the checkpoint list for `network` and `config`.
/// Returns the checkpoint list and maximum checkpoint height.
pub fn init_checkpoint_list(config: Config, network: Network) -> (CheckpointList, Height) {
// TODO: Zebra parses the checkpoint list three times at startup.
// Instead, cache the checkpoint list for each `network`.
let list = CheckpointList::new(network);
let max_checkpoint_height = if config.checkpoint_sync {
list.max_height()
} else {
list.min_height_in_range(network.mandatory_checkpoint_height()..)
.expect("hardcoded checkpoint list extends past canopy activation")
};
(list, max_checkpoint_height)
}
/// The background task handles for `zebra-consensus` verifier initialization.
#[derive(Debug)]
pub struct BackgroundTaskHandles {
/// A handle to the Groth16 parameter download task.
/// Finishes when the parameters are downloaded and their checksums verified.
pub groth16_download_handle: JoinHandle<()>,
/// A handle to the state checkpoint verify task.
/// Finishes when all the checkpoints are verified, or when the state tip is reached.
pub state_checkpoint_verify_handle: JoinHandle<()>,
}
| init | identifier_name |
router.rs | //! Top-level semantic block verification for Zebra.
//!
//! Verifies blocks using the [`CheckpointVerifier`] or full [`SemanticBlockVerifier`],
//! depending on the config and block height.
//!
//! # Correctness
//!
//! Block and transaction verification requests should be wrapped in a timeout, because:
//! - checkpoint verification waits for previous blocks, and
//! - full block and transaction verification wait for UTXOs from previous blocks.
//!
//! Otherwise, verification of out-of-order and invalid blocks and transactions can hang
//! indefinitely.
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use displaydoc::Display;
use futures::{FutureExt, TryFutureExt};
use thiserror::Error;
use tokio::task::JoinHandle;
use tower::{buffer::Buffer, util::BoxService, Service, ServiceExt};
use tracing::{instrument, Instrument, Span};
use zebra_chain::{
block::{self, Height},
parameters::Network,
};
use zebra_state as zs;
use crate::{
block::{Request, SemanticBlockVerifier, VerifyBlockError},
checkpoint::{CheckpointList, CheckpointVerifier, VerifyCheckpointError},
error::TransactionError,
transaction, BoxError, Config,
};
#[cfg(test)]
mod tests;
/// The bound for the chain verifier and transaction verifier buffers.
///
/// We choose the verifier buffer bound based on the maximum number of
/// concurrent verifier users, to avoid contention:
/// - the `ChainSync` block download and verify stream
/// - the `Inbound` block download and verify stream
/// - the `Mempool` transaction download and verify stream
/// - a block miner component, which we might add in future, and
/// - 1 extra slot to avoid contention.
///
/// We deliberately add extra slots, because they only cost a small amount of
/// memory, but missing slots can significantly slow down Zebra.
const VERIFIER_BUFFER_BOUND: usize = 5;
/// The block verifier router routes requests to either the checkpoint verifier or the
/// semantic block verifier, depending on the maximum checkpoint height.
///
/// # Correctness
///
/// Block verification requests should be wrapped in a timeout, so that
/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`)
/// module documentation for details.
struct BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static,
S::Future: Send +'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+'static,
V::Future: Send +'static,
{
/// The checkpointing block verifier.
///
/// Always used for blocks before `Canopy`, optionally used for the entire checkpoint list.
checkpoint: CheckpointVerifier<S>,
/// The highest permitted checkpoint block.
///
/// This height must be in the `checkpoint` verifier's checkpoint list.
max_checkpoint_height: block::Height,
/// The full semantic block verifier, used for blocks after `max_checkpoint_height`.
block: SemanticBlockVerifier<S, V>,
}
/// An error while semantically verifying a block.
//
// One or both of these error variants are at least 140 bytes
#[derive(Debug, Display, Error)]
#[allow(missing_docs)]
pub enum RouterError {
/// Block could not be checkpointed
Checkpoint { source: Box<VerifyCheckpointError> },
/// Block could not be full-verified
Block { source: Box<VerifyBlockError> },
}
impl From<VerifyCheckpointError> for RouterError {
fn from(err: VerifyCheckpointError) -> Self {
RouterError::Checkpoint {
source: Box::new(err),
}
}
}
impl From<VerifyBlockError> for RouterError {
fn from(err: VerifyBlockError) -> Self {
RouterError::Block {
source: Box::new(err),
}
}
}
impl RouterError {
/// Returns `true` if this is definitely a duplicate request.
/// Some duplicate requests might not be detected, and therefore return `false`.
pub fn is_duplicate_request(&self) -> bool {
match self {
RouterError::Checkpoint { source,.. } => source.is_duplicate_request(),
RouterError::Block { source,.. } => source.is_duplicate_request(),
}
}
}
impl<S, V> Service<Request> for BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static,
S::Future: Send +'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+'static,
V::Future: Send +'static,
{
type Response = block::Hash;
type Error = RouterError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send +'static>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// CORRECTNESS
//
// The current task must be scheduled for wakeup every time we return
// `Poll::Pending`.
//
// If either verifier is unready, this task is scheduled for wakeup when it becomes
// ready.
//
// We acquire checkpoint readiness before block readiness, to avoid an unlikely
// hang during the checkpoint to block verifier transition. If the checkpoint and
// block verifiers are contending for the same buffer/batch, we want the checkpoint
// verifier to win, so that checkpoint verification completes, and block verification
// can start. (Buffers and batches have multiple slots, so this contention is unlikely.)
use futures::ready;
// The chain verifier holds one slot in each verifier, for each concurrent task.
// Therefore, any shared buffers or batches polled by these verifiers should double
// their bounds. (For example, the state service buffer.)
ready!(self.checkpoint.poll_ready(cx))?;
ready!(self.block.poll_ready(cx))?;
Poll::Ready(Ok(()))
}
fn call(&mut self, request: Request) -> Self::Future {
let block = request.block();
match block.coinbase_height() {
#[cfg(feature = "getblocktemplate-rpcs")]
// There's currently no known use case for block proposals below the checkpoint height,
// so it's okay to immediately return an error here.
Some(height) if height <= self.max_checkpoint_height && request.is_proposal() => {
async {
// TODO: Add a `ValidateProposalError` enum with a `BelowCheckpoint` variant?
Err(VerifyBlockError::ValidateProposal(
"block proposals must be above checkpoint height".into(),
))?
}
.boxed()
}
Some(height) if height <= self.max_checkpoint_height => {
self.checkpoint.call(block).map_err(Into::into).boxed()
}
// This also covers blocks with no height, which the block verifier
// will reject immediately.
_ => self.block.call(request).map_err(Into::into).boxed(),
}
}
}
/// Initialize block and transaction verification services,
/// and pre-download Groth16 parameters if requested by the `debug_skip_parameter_preload`
/// config parameter and if the download is not already started.
///
/// Returns a block verifier, transaction verifier,
/// the Groth16 parameter download task [`JoinHandle`],
/// and the maximum configured checkpoint verification height.
///
/// The consensus configuration is specified by `config`, and the Zcash network
/// to verify blocks for is specified by `network`.
///
/// The block verification service asynchronously performs semantic verification
/// checks. Blocks that pass semantic verification are submitted to the supplied
/// `state_service` for contextual verification before being committed to the chain.
///
/// The transaction verification service asynchronously performs semantic verification
/// checks. Transactions that pass semantic verification return an `Ok` result to the caller.
///
/// Pre-downloads the Sapling and Sprout Groth16 parameters if needed,
/// checks they were downloaded correctly, and loads them into Zebra.
/// (The transaction verifier automatically downloads the parameters on first use.
/// But the parameter downloads can take around 10 minutes.
/// So we pre-download the parameters, to avoid verification timeouts.)
///
/// This function should only be called once for a particular state service.
///
/// Dropped requests are cancelled on a best-effort basis, but may continue to be processed.
///
/// # Correctness
///
/// Block and transaction verification requests should be wrapped in a timeout,
/// so that out-of-order and invalid requests do not hang indefinitely.
/// See the [`router`](`crate::router`) module documentation for details.
#[instrument(skip(state_service))]
pub async fn init<S>(
config: Config,
network: Network,
mut state_service: S,
debug_skip_parameter_preload: bool,
) -> (
Buffer<BoxService<Request, block::Hash, RouterError>, Request>,
Buffer<
BoxService<transaction::Request, transaction::Response, TransactionError>,
transaction::Request,
>,
BackgroundTaskHandles,
Height,
)
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static,
S::Future: Send +'static,
{
// Give other tasks priority before spawning the download and checkpoint tasks.
tokio::task::yield_now().await;
// Pre-download Groth16 parameters in a separate thread.
// The parameter download thread must be launched before initializing any verifiers.
// Otherwise, the download might happen on the startup thread.
let span = Span::current();
let groth16_download_handle = tokio::task::spawn_blocking(move || {
span.in_scope(|| {
if!debug_skip_parameter_preload {
// The lazy static initializer does the download, if needed,
// and the file hash checks.
lazy_static::initialize(&crate::groth16::GROTH16_PARAMETERS);
}
})
});
// Make sure the state contains the known best chain checkpoints, in a separate thread.
let checkpoint_state_service = state_service.clone();
let checkpoint_sync = config.checkpoint_sync;
let state_checkpoint_verify_handle = tokio::task::spawn(
// TODO: move this into an async function?
async move {
tracing::info!("starting state checkpoint validation");
// # Consensus
//
// We want to verify all available checkpoints, even if the node is not configured
// to use them for syncing. Zebra's checkpoints are updated with every release,
// which makes sure they include the latest settled network upgrade.
//
// > A network upgrade is settled on a given network when there is a social
// > consensus that it has activated with a given activation block hash.
// > A full validator that potentially risks Mainnet funds or displays Mainnet
// > transaction information to a user MUST do so only for a block chain that
// > includes the activation block of the most recent settled network upgrade,
// > with the corresponding activation block hash. Currently, there is social
// > consensus that NU5 has activated on the Zcash Mainnet and Testnet with the
// > activation block hashes given in § 3.12 ‘Mainnet and Testnet’ on p. 20.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockchain>
let full_checkpoints = CheckpointList::new(network);
for (height, checkpoint_hash) in full_checkpoints.iter() {
let checkpoint_state_service = checkpoint_state_service.clone();
let request = zebra_state::Request::BestChainBlockHash(*height);
match checkpoint_state_service.oneshot(request).await {
Ok(zebra_state::Response::BlockHash(Some(state_hash))) => assert_eq!(
*checkpoint_hash, state_hash,
"invalid block in state: a previous Zebra instance followed an \
incorrect chain. Delete and re-sync your state to use the best chain"
),
Ok(zebra_state::Response::BlockHash(None)) => {
if checkpoint_sync {
tracing::info!(
"state is not fully synced yet, remaining checkpoints will be \
verified during syncing"
);
} else {
tracing::warn!(
"state is not fully synced yet, remaining checkpoints will be \
verified next time Zebra starts up. Zebra will be less secure \
until it is restarted. Use consensus.checkpoint_sync = true \
in zebrad.toml to make sure you are following a valid chain"
);
}
break;
}
Ok(response) => {
| Err(e) => {
#[cfg(not(test))]
tracing::warn!(
"unexpected error: {e:?} in state request while verifying previous \
state checkpoints. Is Zebra shutting down?"
);
// This error happens a lot in some tests.
//
// TODO: fix the tests so they don't cause this error,
// or change the tracing filter
#[cfg(test)]
tracing::debug!(
"unexpected error: {e:?} in state request while verifying previous \
state checkpoints. Is Zebra shutting down?"
);
}
}
}
tracing::info!("finished state checkpoint validation");
}
.instrument(Span::current()),
);
// transaction verification
let transaction = transaction::Verifier::new(network, state_service.clone());
let transaction = Buffer::new(BoxService::new(transaction), VERIFIER_BUFFER_BOUND);
// block verification
let (list, max_checkpoint_height) = init_checkpoint_list(config, network);
let tip = match state_service
.ready()
.await
.unwrap()
.call(zs::Request::Tip)
.await
.unwrap()
{
zs::Response::Tip(tip) => tip,
_ => unreachable!("wrong response to Request::Tip"),
};
tracing::info!(
?tip,
?max_checkpoint_height,
"initializing block verifier router"
);
let block = SemanticBlockVerifier::new(network, state_service.clone(), transaction.clone());
let checkpoint = CheckpointVerifier::from_checkpoint_list(list, network, tip, state_service);
let router = BlockVerifierRouter {
checkpoint,
max_checkpoint_height,
block,
};
let router = Buffer::new(BoxService::new(router), VERIFIER_BUFFER_BOUND);
let task_handles = BackgroundTaskHandles {
groth16_download_handle,
state_checkpoint_verify_handle,
};
(router, transaction, task_handles, max_checkpoint_height)
}
/// Parses the checkpoint list for `network` and `config`.
/// Returns the checkpoint list and maximum checkpoint height.
pub fn init_checkpoint_list(config: Config, network: Network) -> (CheckpointList, Height) {
// TODO: Zebra parses the checkpoint list three times at startup.
// Instead, cache the checkpoint list for each `network`.
let list = CheckpointList::new(network);
let max_checkpoint_height = if config.checkpoint_sync {
list.max_height()
} else {
list.min_height_in_range(network.mandatory_checkpoint_height()..)
.expect("hardcoded checkpoint list extends past canopy activation")
};
(list, max_checkpoint_height)
}
/// The background task handles for `zebra-consensus` verifier initialization.
#[derive(Debug)]
pub struct BackgroundTaskHandles {
/// A handle to the Groth16 parameter download task.
/// Finishes when the parameters are downloaded and their checksums verified.
pub groth16_download_handle: JoinHandle<()>,
/// A handle to the state checkpoint verify task.
/// Finishes when all the checkpoints are verified, or when the state tip is reached.
pub state_checkpoint_verify_handle: JoinHandle<()>,
}
| unreachable!("unexpected response type: {response:?} from state request")
}
| conditional_block |
router.rs | //! Top-level semantic block verification for Zebra.
//!
//! Verifies blocks using the [`CheckpointVerifier`] or full [`SemanticBlockVerifier`],
//! depending on the config and block height.
//!
//! # Correctness
//!
//! Block and transaction verification requests should be wrapped in a timeout, because:
//! - checkpoint verification waits for previous blocks, and
//! - full block and transaction verification wait for UTXOs from previous blocks.
//!
//! Otherwise, verification of out-of-order and invalid blocks and transactions can hang
//! indefinitely.
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use displaydoc::Display;
use futures::{FutureExt, TryFutureExt};
use thiserror::Error;
use tokio::task::JoinHandle;
use tower::{buffer::Buffer, util::BoxService, Service, ServiceExt};
use tracing::{instrument, Instrument, Span};
use zebra_chain::{
block::{self, Height},
parameters::Network,
};
use zebra_state as zs;
use crate::{
block::{Request, SemanticBlockVerifier, VerifyBlockError},
checkpoint::{CheckpointList, CheckpointVerifier, VerifyCheckpointError},
error::TransactionError,
transaction, BoxError, Config,
};
#[cfg(test)]
mod tests;
/// The bound for the chain verifier and transaction verifier buffers.
///
/// We choose the verifier buffer bound based on the maximum number of
/// concurrent verifier users, to avoid contention:
/// - the `ChainSync` block download and verify stream
/// - the `Inbound` block download and verify stream
/// - the `Mempool` transaction download and verify stream
/// - a block miner component, which we might add in future, and
/// - 1 extra slot to avoid contention.
///
/// We deliberately add extra slots, because they only cost a small amount of
/// memory, but missing slots can significantly slow down Zebra.
const VERIFIER_BUFFER_BOUND: usize = 5;
/// The block verifier router routes requests to either the checkpoint verifier or the
/// semantic block verifier, depending on the maximum checkpoint height.
///
/// # Correctness
///
/// Block verification requests should be wrapped in a timeout, so that
/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`)
/// module documentation for details.
struct BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static,
S::Future: Send +'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+'static,
V::Future: Send +'static,
{
/// The checkpointing block verifier.
///
/// Always used for blocks before `Canopy`, optionally used for the entire checkpoint list.
checkpoint: CheckpointVerifier<S>,
/// The highest permitted checkpoint block.
///
/// This height must be in the `checkpoint` verifier's checkpoint list.
max_checkpoint_height: block::Height,
/// The full semantic block verifier, used for blocks after `max_checkpoint_height`.
block: SemanticBlockVerifier<S, V>,
}
/// An error while semantically verifying a block.
//
// One or both of these error variants are at least 140 bytes
#[derive(Debug, Display, Error)]
#[allow(missing_docs)]
pub enum RouterError {
/// Block could not be checkpointed
Checkpoint { source: Box<VerifyCheckpointError> },
/// Block could not be full-verified
Block { source: Box<VerifyBlockError> },
}
impl From<VerifyCheckpointError> for RouterError {
fn from(err: VerifyCheckpointError) -> Self {
RouterError::Checkpoint {
source: Box::new(err),
}
}
}
impl From<VerifyBlockError> for RouterError {
fn from(err: VerifyBlockError) -> Self {
RouterError::Block {
source: Box::new(err),
}
}
}
impl RouterError {
/// Returns `true` if this is definitely a duplicate request.
/// Some duplicate requests might not be detected, and therefore return `false`.
pub fn is_duplicate_request(&self) -> bool {
match self {
RouterError::Checkpoint { source,.. } => source.is_duplicate_request(),
RouterError::Block { source,.. } => source.is_duplicate_request(),
}
}
}
impl<S, V> Service<Request> for BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static,
S::Future: Send +'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+'static,
V::Future: Send +'static,
{
type Response = block::Hash;
type Error = RouterError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send +'static>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// CORRECTNESS
//
// The current task must be scheduled for wakeup every time we return
// `Poll::Pending`.
//
// If either verifier is unready, this task is scheduled for wakeup when it becomes
// ready.
//
// We acquire checkpoint readiness before block readiness, to avoid an unlikely
// hang during the checkpoint to block verifier transition. If the checkpoint and
// block verifiers are contending for the same buffer/batch, we want the checkpoint
// verifier to win, so that checkpoint verification completes, and block verification
// can start. (Buffers and batches have multiple slots, so this contention is unlikely.)
use futures::ready;
// The chain verifier holds one slot in each verifier, for each concurrent task.
// Therefore, any shared buffers or batches polled by these verifiers should double
// their bounds. (For example, the state service buffer.)
ready!(self.checkpoint.poll_ready(cx))?;
ready!(self.block.poll_ready(cx))?;
Poll::Ready(Ok(()))
}
fn call(&mut self, request: Request) -> Self::Future {
let block = request.block();
match block.coinbase_height() {
#[cfg(feature = "getblocktemplate-rpcs")]
// There's currently no known use case for block proposals below the checkpoint height,
// so it's okay to immediately return an error here.
Some(height) if height <= self.max_checkpoint_height && request.is_proposal() => {
async {
// TODO: Add a `ValidateProposalError` enum with a `BelowCheckpoint` variant?
Err(VerifyBlockError::ValidateProposal(
"block proposals must be above checkpoint height".into(),
))?
}
.boxed()
}
Some(height) if height <= self.max_checkpoint_height => {
self.checkpoint.call(block).map_err(Into::into).boxed()
}
// This also covers blocks with no height, which the block verifier
// will reject immediately.
_ => self.block.call(request).map_err(Into::into).boxed(),
}
}
}
/// Initialize block and transaction verification services,
/// and pre-download Groth16 parameters if requested by the `debug_skip_parameter_preload`
/// config parameter and if the download is not already started.
///
/// Returns a block verifier, transaction verifier,
/// the Groth16 parameter download task [`JoinHandle`],
/// and the maximum configured checkpoint verification height.
///
/// The consensus configuration is specified by `config`, and the Zcash network
/// to verify blocks for is specified by `network`.
///
/// The block verification service asynchronously performs semantic verification
/// checks. Blocks that pass semantic verification are submitted to the supplied
/// `state_service` for contextual verification before being committed to the chain.
///
/// The transaction verification service asynchronously performs semantic verification
/// checks. Transactions that pass semantic verification return an `Ok` result to the caller.
///
/// Pre-downloads the Sapling and Sprout Groth16 parameters if needed,
/// checks they were downloaded correctly, and loads them into Zebra.
/// (The transaction verifier automatically downloads the parameters on first use.
/// But the parameter downloads can take around 10 minutes.
/// So we pre-download the parameters, to avoid verification timeouts.)
///
/// This function should only be called once for a particular state service.
///
/// Dropped requests are cancelled on a best-effort basis, but may continue to be processed.
///
/// # Correctness
///
/// Block and transaction verification requests should be wrapped in a timeout,
/// so that out-of-order and invalid requests do not hang indefinitely.
/// See the [`router`](`crate::router`) module documentation for details.
#[instrument(skip(state_service))]
pub async fn init<S>(
config: Config,
network: Network,
mut state_service: S,
debug_skip_parameter_preload: bool,
) -> (
Buffer<BoxService<Request, block::Hash, RouterError>, Request>,
Buffer<
BoxService<transaction::Request, transaction::Response, TransactionError>,
transaction::Request,
>,
BackgroundTaskHandles,
Height,
)
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static,
S::Future: Send +'static,
{
// Give other tasks priority before spawning the download and checkpoint tasks.
tokio::task::yield_now().await;
// Pre-download Groth16 parameters in a separate thread.
// The parameter download thread must be launched before initializing any verifiers.
// Otherwise, the download might happen on the startup thread.
let span = Span::current();
let groth16_download_handle = tokio::task::spawn_blocking(move || {
span.in_scope(|| {
if!debug_skip_parameter_preload {
// The lazy static initializer does the download, if needed,
// and the file hash checks.
lazy_static::initialize(&crate::groth16::GROTH16_PARAMETERS);
}
})
});
// Make sure the state contains the known best chain checkpoints, in a separate thread.
let checkpoint_state_service = state_service.clone();
let checkpoint_sync = config.checkpoint_sync;
let state_checkpoint_verify_handle = tokio::task::spawn(
// TODO: move this into an async function?
async move { |
// # Consensus
//
// We want to verify all available checkpoints, even if the node is not configured
// to use them for syncing. Zebra's checkpoints are updated with every release,
// which makes sure they include the latest settled network upgrade.
//
// > A network upgrade is settled on a given network when there is a social
// > consensus that it has activated with a given activation block hash.
// > A full validator that potentially risks Mainnet funds or displays Mainnet
// > transaction information to a user MUST do so only for a block chain that
// > includes the activation block of the most recent settled network upgrade,
// > with the corresponding activation block hash. Currently, there is social
// > consensus that NU5 has activated on the Zcash Mainnet and Testnet with the
// > activation block hashes given in § 3.12 ‘Mainnet and Testnet’ on p. 20.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockchain>
let full_checkpoints = CheckpointList::new(network);
for (height, checkpoint_hash) in full_checkpoints.iter() {
let checkpoint_state_service = checkpoint_state_service.clone();
let request = zebra_state::Request::BestChainBlockHash(*height);
match checkpoint_state_service.oneshot(request).await {
Ok(zebra_state::Response::BlockHash(Some(state_hash))) => assert_eq!(
*checkpoint_hash, state_hash,
"invalid block in state: a previous Zebra instance followed an \
incorrect chain. Delete and re-sync your state to use the best chain"
),
Ok(zebra_state::Response::BlockHash(None)) => {
if checkpoint_sync {
tracing::info!(
"state is not fully synced yet, remaining checkpoints will be \
verified during syncing"
);
} else {
tracing::warn!(
"state is not fully synced yet, remaining checkpoints will be \
verified next time Zebra starts up. Zebra will be less secure \
until it is restarted. Use consensus.checkpoint_sync = true \
in zebrad.toml to make sure you are following a valid chain"
);
}
break;
}
Ok(response) => {
unreachable!("unexpected response type: {response:?} from state request")
}
Err(e) => {
#[cfg(not(test))]
tracing::warn!(
"unexpected error: {e:?} in state request while verifying previous \
state checkpoints. Is Zebra shutting down?"
);
// This error happens a lot in some tests.
//
// TODO: fix the tests so they don't cause this error,
// or change the tracing filter
#[cfg(test)]
tracing::debug!(
"unexpected error: {e:?} in state request while verifying previous \
state checkpoints. Is Zebra shutting down?"
);
}
}
}
tracing::info!("finished state checkpoint validation");
}
.instrument(Span::current()),
);
// transaction verification
let transaction = transaction::Verifier::new(network, state_service.clone());
let transaction = Buffer::new(BoxService::new(transaction), VERIFIER_BUFFER_BOUND);
// block verification
let (list, max_checkpoint_height) = init_checkpoint_list(config, network);
let tip = match state_service
.ready()
.await
.unwrap()
.call(zs::Request::Tip)
.await
.unwrap()
{
zs::Response::Tip(tip) => tip,
_ => unreachable!("wrong response to Request::Tip"),
};
tracing::info!(
?tip,
?max_checkpoint_height,
"initializing block verifier router"
);
let block = SemanticBlockVerifier::new(network, state_service.clone(), transaction.clone());
let checkpoint = CheckpointVerifier::from_checkpoint_list(list, network, tip, state_service);
let router = BlockVerifierRouter {
checkpoint,
max_checkpoint_height,
block,
};
let router = Buffer::new(BoxService::new(router), VERIFIER_BUFFER_BOUND);
let task_handles = BackgroundTaskHandles {
groth16_download_handle,
state_checkpoint_verify_handle,
};
(router, transaction, task_handles, max_checkpoint_height)
}
/// Parses the checkpoint list for `network` and `config`.
/// Returns the checkpoint list and maximum checkpoint height.
pub fn init_checkpoint_list(config: Config, network: Network) -> (CheckpointList, Height) {
// TODO: Zebra parses the checkpoint list three times at startup.
// Instead, cache the checkpoint list for each `network`.
let list = CheckpointList::new(network);
let max_checkpoint_height = if config.checkpoint_sync {
list.max_height()
} else {
list.min_height_in_range(network.mandatory_checkpoint_height()..)
.expect("hardcoded checkpoint list extends past canopy activation")
};
(list, max_checkpoint_height)
}
/// The background task handles for `zebra-consensus` verifier initialization.
#[derive(Debug)]
pub struct BackgroundTaskHandles {
/// A handle to the Groth16 parameter download task.
/// Finishes when the parameters are downloaded and their checksums verified.
pub groth16_download_handle: JoinHandle<()>,
/// A handle to the state checkpoint verify task.
/// Finishes when all the checkpoints are verified, or when the state tip is reached.
pub state_checkpoint_verify_handle: JoinHandle<()>,
} | tracing::info!("starting state checkpoint validation"); | random_line_split |
javascript.rs | mod expression;
mod pattern;
#[cfg(test)]
mod tests;
use crate::{ast::*, error::GleamExpect, fs::Utf8Writer, line_numbers::LineNumbers, pretty::*};
use itertools::Itertools;
const INDENT: isize = 2;
const DEEP_EQUAL: &str = "
function $equal(x, y) {
let toCheck = [x, y];
while (toCheck) {
let a = toCheck.pop();
let b = toCheck.pop();
if (a === b) return true;
if (!$is_object(a) ||!$is_object(b)) return false;
for (let k of Object.keys(a)) {
toCheck.push(a[k], b[k]);
}
}
return true;
}
function $is_object(object) {
return object!== null && typeof object === 'object';
}";
const FUNCTION_DIVIDE: &str = "
function $divide(a, b) {
if (b === 0) return 0;
return a / b;
}";
pub type Output<'a> = Result<Document<'a>, Error>;
#[derive(Debug)]
pub struct Generator<'a> {
line_numbers: &'a LineNumbers,
module: &'a TypedModule,
float_division_used: bool,
object_equality_used: bool,
module_scope: im::HashMap<String, usize>,
}
impl<'a> Generator<'a> {
pub fn new(line_numbers: &'a LineNumbers, module: &'a TypedModule) -> Self {
Self {
line_numbers,
module,
float_division_used: false,
object_equality_used: false,
module_scope: Default::default(),
}
}
pub fn compile(&mut self) -> Output<'a> {
let statements = std::iter::once(Ok(r#""use strict";"#.to_doc())).chain(
self.module
.statements
.iter()
.flat_map(|s| self.statement(s)),
);
// Two lines between each statement
let statements = Itertools::intersperse(statements, Ok(lines(2)));
let mut statements = statements.collect::<Result<Vec<_>, _>>()?;
// If float division has been used render an appropriate function
if self.float_division_used {
statements.push(FUNCTION_DIVIDE.to_doc());
};
if self.object_equality_used {
statements.push(DEEP_EQUAL.to_doc());
};
statements.push(line());
Ok(statements.to_doc())
}
pub fn statement(&mut self, statement: &'a TypedStatement) -> Option<Output<'a>> {
match statement {
Statement::TypeAlias {.. } => None,
Statement::CustomType {.. } => None,
Statement::Import {
module,
as_name,
unqualified,
package,
..
} => Some(Ok(self.import(package, module, as_name, unqualified))),
Statement::ExternalType {.. } => None,
Statement::ModuleConstant {
public,
name,
value,
..
} => Some(self.module_constant(*public, name, value)),
Statement::Fn {
arguments,
name,
body,
public,
..
} => Some(self.module_function(*public, name, arguments, body)),
Statement::ExternalFn {
public,
name,
arguments,
module,
fun,
..
} => Some(Ok(
self.external_function(*public, name, arguments, module, fun)
)),
}
}
fn import_path(&mut self, package: &'a str, module: &'a [String]) -> Document<'a> {
let path = Document::String(module.join("/"));
if package == self.module.type_info.package {
// Same package uses relative paths
let prefix = match self.module.name.len() {
1 => "./".to_doc(),
_ => Document::String("../".repeat(module.len() - 1)),
};
docvec!["\"", prefix, path, ".js\""]
} else {
// Different packages uses absolute imports
docvec!["\"", package, "/", path, ".js\""]
}
}
fn import(
&mut self,
package: &'a str,
module: &'a [String],
as_name: &'a Option<String>,
unqualified: &'a [UnqualifiedImport],
) -> Document<'a> {
let module_name = as_name.as_ref().map(|n| n.as_str()).unwrap_or_else(|| {
module
.last()
.gleam_expect("JavaScript code generator could not identify imported module name.")
});
self.register_in_scope(module_name);
let module_name = maybe_escape_identifier(module_name);
let path: Document<'a> = self.import_path(package, module);
let import_line = docvec!["import * as ", module_name.clone(), " from ", path, ";"];
let mut any_unqualified_values = false;
let matches = unqualified
.iter()
.filter(|i| {
// We do not create a JS import for uppercase names are they are
// type or record constructors, both of which are not used at runtime
i.name
.chars()
.next()
.map(char::is_lowercase)
.unwrap_or(false)
})
.map(|i| {
any_unqualified_values = true;
let alias = i.as_name.as_ref().map(|n| {
self.register_in_scope(n);
maybe_escape_identifier(n)
});
(maybe_escape_identifier(&i.name), alias)
});
let matches = wrap_object(matches);
if any_unqualified_values {
docvec![
import_line,
line(),
"const ",
matches,
" = ",
module_name,
";"
]
} else {
import_line
}
}
fn module_constant(
&mut self,
public: bool,
name: &'a str,
value: &'a TypedConstant,
) -> Output<'a> {
let head = if public { "export const " } else { "const " };
self.register_in_scope(name);
Ok(docvec![
head,
maybe_escape_identifier(name),
" = ",
expression::constant_expression(value)?,
";",
])
}
fn register_in_scope(&mut self, name: &str) {
let _ = self.module_scope.insert(name.to_string(), 0);
}
fn module_function(
&mut self,
public: bool,
name: &'a str,
args: &'a [TypedArg],
body: &'a TypedExpr,
) -> Output<'a> {
self.register_in_scope(name);
let argument_names = args
.iter()
.map(|arg| arg.names.get_variable_name())
.collect();
let mut generator = expression::Generator::new(
&self.module.name,
self.line_numbers,
name,
argument_names,
&mut self.float_division_used,
&mut self.object_equality_used,
self.module_scope.clone(),
);
let head = if public {
"export function "
} else {
"function "
};
Ok(docvec![
head,
maybe_escape_identifier(name),
fun_args(args),
" {",
docvec![line(), generator.function_body(body)?]
.nest(INDENT)
.group(),
line(),
"}",
])
}
fn external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
module: &'a str,
fun: &'a str,
) -> Document<'a> |
fn imported_external_function(
&mut self,
public: bool,
name: &'a str,
module: &'a str,
fun: &'a str,
) -> Document<'a> {
let import = if name == fun {
docvec!["import { ", name, r#" } from ""#, module, r#"";"#]
} else {
docvec![
"import { ",
fun,
" as ",
name,
r#" } from ""#,
module,
r#"";"#
]
};
if public {
import
.append(line())
.append("export { ")
.append(name)
.append(" };")
} else {
import
}
}
fn global_external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
fun: &'a str,
) -> Document<'a> {
let head = if public {
"export function "
} else {
"function "
};
let arguments = external_fn_args(arguments);
let body = docvec!["return ", fun, arguments.clone()];
docvec![
head,
name,
arguments,
" {",
docvec![line(), body].nest(INDENT).group(),
line(),
"}",
]
}
}
fn external_fn_args<T>(arguments: &[ExternalFnArg<T>]) -> Document<'_> {
wrap_args(arguments.iter().enumerate().map(|a| {
match a {
(index, ExternalFnArg { label,.. }) => label
.as_ref()
.map(|l| l.as_str().to_doc())
.unwrap_or_else(|| Document::String(format!("arg{}", index))),
}
}))
}
pub fn module(
module: &TypedModule,
line_numbers: &LineNumbers,
writer: &mut impl Utf8Writer,
) -> Result<(), crate::Error> {
Generator::new(line_numbers, module)
.compile()
.map_err(crate::Error::JavaScript)?
.pretty_print(80, writer)
}
#[derive(Debug, Clone, PartialEq)]
pub enum Error {
Unsupported { feature: String },
}
fn unsupported<M: ToString, T>(label: M) -> Result<T, Error> {
Err(Error::Unsupported {
feature: label.to_string(),
})
}
fn fun_args(args: &'_ [TypedArg]) -> Document<'_> {
wrap_args(args.iter().map(|a| match &a.names {
ArgNames::Discard {.. } | ArgNames::LabelledDiscard {.. } => "_".to_doc(),
ArgNames::Named { name } | ArgNames::NamedLabelled { name,.. } => name.to_doc(),
}))
}
fn wrap_args<'a, I>(args: I) -> Document<'a>
where
I: Iterator<Item = Document<'a>>,
{
break_("", "")
.append(concat(Itertools::intersperse(args, break_(",", ", "))))
.nest(INDENT)
.append(break_("", ""))
.surround("(", ")")
.group()
}
fn wrap_object<'a>(
items: impl Iterator<Item = (Document<'a>, Option<Document<'a>>)>,
) -> Document<'a> {
let fields = items.map(|(key, value)| match value {
Some(value) => docvec![key, ": ", value,],
None => key.to_doc(),
});
docvec![
docvec![
"{",
break_("", " "),
concat(Itertools::intersperse(fields, break_(",", ", ")))
]
.nest(INDENT)
.append(break_("", " "))
.group(),
"}"
]
}
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar
// And we add `undefined` to avoid any unintentional overriding which could
// cause bugs.
fn is_valid_js_identifier(word: &str) -> bool {
!matches!(
word,
"await"
| "break"
| "case"
| "catch"
| "class"
| "const"
| "continue"
| "debugger"
| "default"
| "delete"
| "do"
| "else"
| "enum"
| "export"
| "extends"
| "false"
| "finally"
| "for"
| "function"
| "if"
| "implements"
| "import"
| "in"
| "instanceof"
| "interface"
| "let"
| "new"
| "null"
| "package"
| "private"
| "protected"
| "public"
| "return"
| "static"
| "super"
| "switch"
| "this"
| "throw"
| "true"
| "try"
| "typeof"
| "undefined"
| "var"
| "void"
| "while"
| "with"
| "yield"
)
}
fn maybe_escape_identifier(word: &str) -> Document<'_> {
if is_valid_js_identifier(word) {
word.to_doc()
} else {
escape_identifier(word)
}
}
fn escape_identifier(word: &str) -> Document<'_> {
Document::String(format!("{}$", word))
}
| {
if module.is_empty() {
self.global_external_function(public, name, arguments, fun)
} else {
self.imported_external_function(public, name, module, fun)
}
} | identifier_body |
javascript.rs | mod expression;
mod pattern;
#[cfg(test)]
mod tests;
use crate::{ast::*, error::GleamExpect, fs::Utf8Writer, line_numbers::LineNumbers, pretty::*};
use itertools::Itertools;
const INDENT: isize = 2;
const DEEP_EQUAL: &str = "
function $equal(x, y) {
let toCheck = [x, y];
while (toCheck) {
let a = toCheck.pop();
let b = toCheck.pop();
if (a === b) return true;
if (!$is_object(a) ||!$is_object(b)) return false;
for (let k of Object.keys(a)) {
toCheck.push(a[k], b[k]);
}
}
return true;
}
function $is_object(object) {
return object!== null && typeof object === 'object';
}";
const FUNCTION_DIVIDE: &str = "
function $divide(a, b) {
if (b === 0) return 0;
return a / b;
}";
pub type Output<'a> = Result<Document<'a>, Error>;
#[derive(Debug)]
pub struct Generator<'a> {
line_numbers: &'a LineNumbers,
module: &'a TypedModule,
float_division_used: bool,
object_equality_used: bool,
module_scope: im::HashMap<String, usize>,
}
impl<'a> Generator<'a> {
pub fn new(line_numbers: &'a LineNumbers, module: &'a TypedModule) -> Self {
Self {
line_numbers,
module,
float_division_used: false,
object_equality_used: false,
module_scope: Default::default(),
}
}
pub fn compile(&mut self) -> Output<'a> {
let statements = std::iter::once(Ok(r#""use strict";"#.to_doc())).chain(
self.module
.statements
.iter()
.flat_map(|s| self.statement(s)),
);
// Two lines between each statement
let statements = Itertools::intersperse(statements, Ok(lines(2)));
let mut statements = statements.collect::<Result<Vec<_>, _>>()?;
// If float division has been used render an appropriate function
if self.float_division_used {
statements.push(FUNCTION_DIVIDE.to_doc());
};
if self.object_equality_used {
statements.push(DEEP_EQUAL.to_doc());
};
statements.push(line());
Ok(statements.to_doc())
}
pub fn statement(&mut self, statement: &'a TypedStatement) -> Option<Output<'a>> {
match statement {
Statement::TypeAlias {.. } => None,
Statement::CustomType {.. } => None,
Statement::Import {
module,
as_name,
unqualified,
package,
..
} => Some(Ok(self.import(package, module, as_name, unqualified))),
Statement::ExternalType {.. } => None,
Statement::ModuleConstant {
public,
name,
value,
..
} => Some(self.module_constant(*public, name, value)),
Statement::Fn {
arguments,
name,
body,
public,
..
} => Some(self.module_function(*public, name, arguments, body)),
Statement::ExternalFn {
public,
name,
arguments,
module,
fun,
..
} => Some(Ok(
self.external_function(*public, name, arguments, module, fun)
)),
}
}
fn import_path(&mut self, package: &'a str, module: &'a [String]) -> Document<'a> {
let path = Document::String(module.join("/"));
if package == self.module.type_info.package {
// Same package uses relative paths
let prefix = match self.module.name.len() {
1 => "./".to_doc(),
_ => Document::String("../".repeat(module.len() - 1)),
};
docvec!["\"", prefix, path, ".js\""]
} else {
// Different packages uses absolute imports
docvec!["\"", package, "/", path, ".js\""]
}
}
fn import(
&mut self,
package: &'a str,
module: &'a [String],
as_name: &'a Option<String>,
unqualified: &'a [UnqualifiedImport],
) -> Document<'a> {
let module_name = as_name.as_ref().map(|n| n.as_str()).unwrap_or_else(|| {
module
.last()
.gleam_expect("JavaScript code generator could not identify imported module name.")
});
self.register_in_scope(module_name);
let module_name = maybe_escape_identifier(module_name);
let path: Document<'a> = self.import_path(package, module);
let import_line = docvec!["import * as ", module_name.clone(), " from ", path, ";"];
let mut any_unqualified_values = false;
let matches = unqualified
.iter()
.filter(|i| {
// We do not create a JS import for uppercase names are they are
// type or record constructors, both of which are not used at runtime
i.name
.chars()
.next()
.map(char::is_lowercase)
.unwrap_or(false)
})
.map(|i| {
any_unqualified_values = true;
let alias = i.as_name.as_ref().map(|n| {
self.register_in_scope(n);
maybe_escape_identifier(n)
});
(maybe_escape_identifier(&i.name), alias)
});
let matches = wrap_object(matches);
if any_unqualified_values {
docvec![
import_line,
line(),
"const ",
matches,
" = ",
module_name,
";"
]
} else {
import_line
}
}
fn module_constant(
&mut self,
public: bool,
name: &'a str,
value: &'a TypedConstant,
) -> Output<'a> {
let head = if public { "export const " } else { "const " };
self.register_in_scope(name);
Ok(docvec![
head,
maybe_escape_identifier(name),
" = ",
expression::constant_expression(value)?,
";",
])
}
fn register_in_scope(&mut self, name: &str) {
let _ = self.module_scope.insert(name.to_string(), 0);
}
fn module_function(
&mut self,
public: bool,
name: &'a str,
args: &'a [TypedArg],
body: &'a TypedExpr,
) -> Output<'a> {
self.register_in_scope(name);
let argument_names = args
.iter()
.map(|arg| arg.names.get_variable_name())
.collect();
let mut generator = expression::Generator::new(
&self.module.name,
self.line_numbers,
name,
argument_names,
&mut self.float_division_used,
&mut self.object_equality_used,
self.module_scope.clone(),
);
let head = if public {
"export function "
} else {
"function "
};
Ok(docvec![
head,
maybe_escape_identifier(name),
fun_args(args),
" {",
docvec![line(), generator.function_body(body)?]
.nest(INDENT)
.group(),
line(),
"}",
])
}
fn external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
module: &'a str,
fun: &'a str,
) -> Document<'a> {
if module.is_empty() {
self.global_external_function(public, name, arguments, fun)
} else {
self.imported_external_function(public, name, module, fun)
}
}
fn imported_external_function(
&mut self,
public: bool,
name: &'a str,
module: &'a str,
fun: &'a str,
) -> Document<'a> {
let import = if name == fun {
docvec!["import { ", name, r#" } from ""#, module, r#"";"#]
} else {
docvec![
"import { ",
fun,
" as ",
name,
r#" } from ""#,
module,
r#"";"#
]
};
if public {
import
.append(line())
.append("export { ")
.append(name)
.append(" };")
} else {
import
}
}
fn global_external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
fun: &'a str,
) -> Document<'a> {
let head = if public {
"export function "
} else {
"function "
};
let arguments = external_fn_args(arguments);
let body = docvec!["return ", fun, arguments.clone()];
docvec![
head,
name,
arguments,
" {",
docvec![line(), body].nest(INDENT).group(),
line(),
"}",
]
}
}
fn external_fn_args<T>(arguments: &[ExternalFnArg<T>]) -> Document<'_> {
wrap_args(arguments.iter().enumerate().map(|a| {
match a {
(index, ExternalFnArg { label,.. }) => label
.as_ref()
.map(|l| l.as_str().to_doc())
.unwrap_or_else(|| Document::String(format!("arg{}", index))),
}
}))
}
pub fn module(
module: &TypedModule,
line_numbers: &LineNumbers,
writer: &mut impl Utf8Writer,
) -> Result<(), crate::Error> {
Generator::new(line_numbers, module)
.compile()
.map_err(crate::Error::JavaScript)?
.pretty_print(80, writer)
}
#[derive(Debug, Clone, PartialEq)]
pub enum Error {
Unsupported { feature: String },
}
fn unsupported<M: ToString, T>(label: M) -> Result<T, Error> {
Err(Error::Unsupported {
feature: label.to_string(),
})
}
fn fun_args(args: &'_ [TypedArg]) -> Document<'_> {
wrap_args(args.iter().map(|a| match &a.names {
ArgNames::Discard {.. } | ArgNames::LabelledDiscard {.. } => "_".to_doc(),
ArgNames::Named { name } | ArgNames::NamedLabelled { name,.. } => name.to_doc(),
}))
}
fn wrap_args<'a, I>(args: I) -> Document<'a>
where
I: Iterator<Item = Document<'a>>,
{
break_("", "")
.append(concat(Itertools::intersperse(args, break_(",", ", "))))
.nest(INDENT)
.append(break_("", ""))
.surround("(", ")")
.group()
}
fn | <'a>(
items: impl Iterator<Item = (Document<'a>, Option<Document<'a>>)>,
) -> Document<'a> {
let fields = items.map(|(key, value)| match value {
Some(value) => docvec![key, ": ", value,],
None => key.to_doc(),
});
docvec![
docvec![
"{",
break_("", " "),
concat(Itertools::intersperse(fields, break_(",", ", ")))
]
.nest(INDENT)
.append(break_("", " "))
.group(),
"}"
]
}
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar
// And we add `undefined` to avoid any unintentional overriding which could
// cause bugs.
fn is_valid_js_identifier(word: &str) -> bool {
!matches!(
word,
"await"
| "break"
| "case"
| "catch"
| "class"
| "const"
| "continue"
| "debugger"
| "default"
| "delete"
| "do"
| "else"
| "enum"
| "export"
| "extends"
| "false"
| "finally"
| "for"
| "function"
| "if"
| "implements"
| "import"
| "in"
| "instanceof"
| "interface"
| "let"
| "new"
| "null"
| "package"
| "private"
| "protected"
| "public"
| "return"
| "static"
| "super"
| "switch"
| "this"
| "throw"
| "true"
| "try"
| "typeof"
| "undefined"
| "var"
| "void"
| "while"
| "with"
| "yield"
)
}
fn maybe_escape_identifier(word: &str) -> Document<'_> {
if is_valid_js_identifier(word) {
word.to_doc()
} else {
escape_identifier(word)
}
}
fn escape_identifier(word: &str) -> Document<'_> {
Document::String(format!("{}$", word))
}
| wrap_object | identifier_name |
javascript.rs | mod expression;
mod pattern;
#[cfg(test)]
mod tests;
use crate::{ast::*, error::GleamExpect, fs::Utf8Writer, line_numbers::LineNumbers, pretty::*};
use itertools::Itertools;
const INDENT: isize = 2;
const DEEP_EQUAL: &str = "
function $equal(x, y) {
let toCheck = [x, y];
while (toCheck) {
let a = toCheck.pop();
let b = toCheck.pop();
if (a === b) return true;
if (!$is_object(a) ||!$is_object(b)) return false;
for (let k of Object.keys(a)) {
toCheck.push(a[k], b[k]);
}
}
return true;
}
function $is_object(object) {
return object!== null && typeof object === 'object';
}";
const FUNCTION_DIVIDE: &str = "
function $divide(a, b) {
if (b === 0) return 0;
return a / b;
}";
pub type Output<'a> = Result<Document<'a>, Error>;
#[derive(Debug)]
pub struct Generator<'a> {
line_numbers: &'a LineNumbers,
module: &'a TypedModule,
float_division_used: bool,
object_equality_used: bool,
module_scope: im::HashMap<String, usize>,
}
impl<'a> Generator<'a> {
pub fn new(line_numbers: &'a LineNumbers, module: &'a TypedModule) -> Self {
Self {
line_numbers,
module,
float_division_used: false,
object_equality_used: false,
module_scope: Default::default(),
}
}
pub fn compile(&mut self) -> Output<'a> {
let statements = std::iter::once(Ok(r#""use strict";"#.to_doc())).chain(
self.module
.statements
.iter()
.flat_map(|s| self.statement(s)),
);
// Two lines between each statement
let statements = Itertools::intersperse(statements, Ok(lines(2)));
let mut statements = statements.collect::<Result<Vec<_>, _>>()?;
// If float division has been used render an appropriate function
if self.float_division_used {
statements.push(FUNCTION_DIVIDE.to_doc());
};
if self.object_equality_used {
statements.push(DEEP_EQUAL.to_doc());
};
statements.push(line());
Ok(statements.to_doc())
}
pub fn statement(&mut self, statement: &'a TypedStatement) -> Option<Output<'a>> {
match statement {
Statement::TypeAlias {.. } => None,
Statement::CustomType {.. } => None,
Statement::Import {
module,
as_name,
unqualified,
package,
..
} => Some(Ok(self.import(package, module, as_name, unqualified))),
Statement::ExternalType {.. } => None,
Statement::ModuleConstant {
public,
name,
value,
..
} => Some(self.module_constant(*public, name, value)),
Statement::Fn {
arguments,
name,
body,
public,
..
} => Some(self.module_function(*public, name, arguments, body)),
Statement::ExternalFn {
public,
name,
arguments,
module,
fun,
..
} => Some(Ok(
self.external_function(*public, name, arguments, module, fun)
)),
}
}
fn import_path(&mut self, package: &'a str, module: &'a [String]) -> Document<'a> {
let path = Document::String(module.join("/"));
if package == self.module.type_info.package {
// Same package uses relative paths
let prefix = match self.module.name.len() {
1 => "./".to_doc(),
_ => Document::String("../".repeat(module.len() - 1)),
};
docvec!["\"", prefix, path, ".js\""]
} else {
// Different packages uses absolute imports
docvec!["\"", package, "/", path, ".js\""]
}
}
fn import(
&mut self,
package: &'a str,
module: &'a [String],
as_name: &'a Option<String>,
unqualified: &'a [UnqualifiedImport],
) -> Document<'a> {
let module_name = as_name.as_ref().map(|n| n.as_str()).unwrap_or_else(|| {
module
.last()
.gleam_expect("JavaScript code generator could not identify imported module name.")
});
self.register_in_scope(module_name);
let module_name = maybe_escape_identifier(module_name);
let path: Document<'a> = self.import_path(package, module);
let import_line = docvec!["import * as ", module_name.clone(), " from ", path, ";"];
let mut any_unqualified_values = false;
let matches = unqualified
.iter()
.filter(|i| {
// We do not create a JS import for uppercase names are they are
// type or record constructors, both of which are not used at runtime
i.name
.chars()
.next()
.map(char::is_lowercase)
.unwrap_or(false)
})
.map(|i| {
any_unqualified_values = true;
let alias = i.as_name.as_ref().map(|n| {
self.register_in_scope(n);
maybe_escape_identifier(n)
});
(maybe_escape_identifier(&i.name), alias)
});
let matches = wrap_object(matches);
if any_unqualified_values {
docvec![
import_line,
line(),
"const ",
matches,
" = ",
module_name,
";"
]
} else {
import_line
}
}
fn module_constant(
&mut self,
public: bool,
name: &'a str,
value: &'a TypedConstant,
) -> Output<'a> {
let head = if public { "export const " } else { "const " };
self.register_in_scope(name);
Ok(docvec![
head,
maybe_escape_identifier(name),
" = ",
expression::constant_expression(value)?,
";",
])
}
fn register_in_scope(&mut self, name: &str) {
let _ = self.module_scope.insert(name.to_string(), 0);
}
fn module_function(
&mut self,
public: bool,
name: &'a str,
args: &'a [TypedArg],
body: &'a TypedExpr,
) -> Output<'a> {
self.register_in_scope(name);
let argument_names = args
.iter()
.map(|arg| arg.names.get_variable_name())
.collect();
let mut generator = expression::Generator::new(
&self.module.name,
self.line_numbers,
name,
argument_names,
&mut self.float_division_used, | "export function "
} else {
"function "
};
Ok(docvec![
head,
maybe_escape_identifier(name),
fun_args(args),
" {",
docvec![line(), generator.function_body(body)?]
.nest(INDENT)
.group(),
line(),
"}",
])
}
fn external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
module: &'a str,
fun: &'a str,
) -> Document<'a> {
if module.is_empty() {
self.global_external_function(public, name, arguments, fun)
} else {
self.imported_external_function(public, name, module, fun)
}
}
fn imported_external_function(
&mut self,
public: bool,
name: &'a str,
module: &'a str,
fun: &'a str,
) -> Document<'a> {
let import = if name == fun {
docvec!["import { ", name, r#" } from ""#, module, r#"";"#]
} else {
docvec![
"import { ",
fun,
" as ",
name,
r#" } from ""#,
module,
r#"";"#
]
};
if public {
import
.append(line())
.append("export { ")
.append(name)
.append(" };")
} else {
import
}
}
fn global_external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
fun: &'a str,
) -> Document<'a> {
let head = if public {
"export function "
} else {
"function "
};
let arguments = external_fn_args(arguments);
let body = docvec!["return ", fun, arguments.clone()];
docvec![
head,
name,
arguments,
" {",
docvec![line(), body].nest(INDENT).group(),
line(),
"}",
]
}
}
fn external_fn_args<T>(arguments: &[ExternalFnArg<T>]) -> Document<'_> {
wrap_args(arguments.iter().enumerate().map(|a| {
match a {
(index, ExternalFnArg { label,.. }) => label
.as_ref()
.map(|l| l.as_str().to_doc())
.unwrap_or_else(|| Document::String(format!("arg{}", index))),
}
}))
}
pub fn module(
module: &TypedModule,
line_numbers: &LineNumbers,
writer: &mut impl Utf8Writer,
) -> Result<(), crate::Error> {
Generator::new(line_numbers, module)
.compile()
.map_err(crate::Error::JavaScript)?
.pretty_print(80, writer)
}
#[derive(Debug, Clone, PartialEq)]
pub enum Error {
Unsupported { feature: String },
}
fn unsupported<M: ToString, T>(label: M) -> Result<T, Error> {
Err(Error::Unsupported {
feature: label.to_string(),
})
}
fn fun_args(args: &'_ [TypedArg]) -> Document<'_> {
wrap_args(args.iter().map(|a| match &a.names {
ArgNames::Discard {.. } | ArgNames::LabelledDiscard {.. } => "_".to_doc(),
ArgNames::Named { name } | ArgNames::NamedLabelled { name,.. } => name.to_doc(),
}))
}
fn wrap_args<'a, I>(args: I) -> Document<'a>
where
I: Iterator<Item = Document<'a>>,
{
break_("", "")
.append(concat(Itertools::intersperse(args, break_(",", ", "))))
.nest(INDENT)
.append(break_("", ""))
.surround("(", ")")
.group()
}
fn wrap_object<'a>(
items: impl Iterator<Item = (Document<'a>, Option<Document<'a>>)>,
) -> Document<'a> {
let fields = items.map(|(key, value)| match value {
Some(value) => docvec![key, ": ", value,],
None => key.to_doc(),
});
docvec![
docvec![
"{",
break_("", " "),
concat(Itertools::intersperse(fields, break_(",", ", ")))
]
.nest(INDENT)
.append(break_("", " "))
.group(),
"}"
]
}
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar
// And we add `undefined` to avoid any unintentional overriding which could
// cause bugs.
fn is_valid_js_identifier(word: &str) -> bool {
!matches!(
word,
"await"
| "break"
| "case"
| "catch"
| "class"
| "const"
| "continue"
| "debugger"
| "default"
| "delete"
| "do"
| "else"
| "enum"
| "export"
| "extends"
| "false"
| "finally"
| "for"
| "function"
| "if"
| "implements"
| "import"
| "in"
| "instanceof"
| "interface"
| "let"
| "new"
| "null"
| "package"
| "private"
| "protected"
| "public"
| "return"
| "static"
| "super"
| "switch"
| "this"
| "throw"
| "true"
| "try"
| "typeof"
| "undefined"
| "var"
| "void"
| "while"
| "with"
| "yield"
)
}
fn maybe_escape_identifier(word: &str) -> Document<'_> {
if is_valid_js_identifier(word) {
word.to_doc()
} else {
escape_identifier(word)
}
}
fn escape_identifier(word: &str) -> Document<'_> {
Document::String(format!("{}$", word))
} | &mut self.object_equality_used,
self.module_scope.clone(),
);
let head = if public { | random_line_split |
benchmarks.rs | use std::time::{Instant, Duration};
use ordered_float::OrderedFloat;
use rand::seq::SliceRandom;
use crate::actions::*;
use crate::simulation::*;
use crate::simulation_state::*;
use crate::start_and_strategy_ai::{Strategy, FastStrategy, CombatResult, play_out, collect_starting_points};
use crate::neural_net_ai::NeuralStrategy;
pub trait StrategyOptimizer {
type Strategy: Strategy;
fn step (&mut self, state: & CombatState);
fn report (&self)->& Self::Strategy;
}
struct CandidateStrategy <T> {
strategy: T,
playouts: usize,
total_score: f64,
}
fn playout_result(state: & CombatState, strategy: & impl Strategy)->CombatResult {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
}
// Note: This meta strategy often performed WORSE than the naive strategy it's based on,
// probably because it chose lucky moves rather than good moves
struct MetaStrategy <'a, T>(&'a T);
impl <'a, T: Strategy> Strategy for MetaStrategy <'a, T> {
fn choose_choice(&self, state: &CombatState) -> Vec<Choice> {
let combos = collect_starting_points(state.clone(), 200);
let choices = combos.into_iter().map(|(mut state, choices)| {
run_until_unable(&mut Runner::new(&mut state, true, false));
let num_attempts = 200;
let score = (0..num_attempts).map (|_| {
playout_result(& state, self.0).score
}).sum::<f64>()/num_attempts as f64;
(choices, score)
});
choices
.max_by_key(|(_, score)| OrderedFloat(*score))
.unwrap()
.0
}
}
pub struct ExplorationOptimizer <T, F> {
candidate_strategies: Vec<CandidateStrategy <T>>,
new_strategy: F,
passes: usize,
current_pass_index: usize,
}
impl <T, F> ExplorationOptimizer <T, F> {
pub fn max_strategy_playouts(&self) -> usize {
((self.passes as f64).sqrt() + 2.0) as usize
}
pub fn new (new_strategy: F)->Self {
ExplorationOptimizer {
candidate_strategies: Vec::new(),
new_strategy,
passes: 0,
current_pass_index: 0,
}
}
fn best_strategy(&self)->& CandidateStrategy <T> {
// not the best average score, but the most-explored, which comes out to best average score at last sorting among strategies that are at the max playouts
// note that this function may be called in the middle of a pass, when the current best strategy has not yet been visited to increase its number of playouts to the new maximum, so don't rely on the given maximum;
// since this function chooses the FIRST qualifying strategy, it's based on the most recent time the strategies were sorted, so the score-dependence of this choice isn't biased by the change in score variance from some of them having one extra playout.
&self.candidate_strategies.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1
}
}
impl <T: Strategy, F: Fn (& [CandidateStrategy <T>])->T> StrategyOptimizer for ExplorationOptimizer <T, F> {
type Strategy = T;
fn step (&mut self, state: & CombatState) {
loop {
if self.current_pass_index >= self.candidate_strategies.len() {
self.candidate_strategies.sort_by_key (| strategy | OrderedFloat (- strategy.total_score/strategy.playouts as f64));
let mut index = 0;
self.candidate_strategies.retain(| strategy | {
index += 1;
strategy.playouts >= index
});
self.passes += 1;
self.candidate_strategies.push (CandidateStrategy {
strategy: (self.new_strategy)(&self.candidate_strategies),
playouts: 0,
total_score: 0.0,
});
self.current_pass_index = 0;
}
let max_strategy_playouts = self.max_strategy_playouts();
let strategy = &mut self.candidate_strategies [self.current_pass_index];
self.current_pass_index += 1;
if strategy.playouts < max_strategy_playouts {
let result = playout_result(state, & strategy.strategy);
strategy.total_score += result.score;
strategy.playouts += 1;
return
}
}
}
fn report (&self)->& Self::Strategy {
let best = self.best_strategy();
println!( "ExplorationOptimizer reporting strategy with {} playouts, running average {}", best.playouts, (best.total_score/best.playouts as f64));
& best.strategy
}
}
impl StrategyOptimizer for NeuralStrategy {
type Strategy = NeuralStrategy ;
fn step (&mut self, state: & CombatState) {
self.do_training_playout(state);
}
fn report (&self)->& Self::Strategy {
self
}
}
pub fn benchmark_step(name: & str, state: & CombatState, optimizer: &mut impl StrategyOptimizer) {
println!( "Optimizing {}…", name);
let start = Instant::now();
let mut steps = 0;
let elapsed = loop {
optimizer.step(state);
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(2000) {
break elapsed;
}
};
println!( "Optimized {} for {:.2?} ({} steps). Reporting…", name, elapsed, steps) ;
let strategy = optimizer.report();
let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, strategy).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(500) {
break elapsed;
}
};
println!( "Evaluated {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;
/*let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, &MetaStrategy(strategy)).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(5000*20) {
break elapsed;
}
};
println!( "Evaluated meta-strategy for {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;*/
}
/*
pub fn run_benchmark (name: & str, state: & CombatState, optimization_playouts: usize, test_playouts: usize, mut optimizer: impl StrategyOptimizer) {
println!( "Starting benchmark for {}, doing {} optimization playouts…", name, optimization_playouts);
for iteration in 0..optimization_playouts {
optimizer.step (| strategy | {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
});
if iteration % 10000 == 9999 {
println!( "Completed {} playouts…", iteration + 1);
}
}
let (best_strategy, anticipated_score) = optimizer.current_best();
println!( "Optimization completed for {}. Found strategy with anticipated score {}. Doing {} test playouts…", name, anticipated_score, test_playouts);
let total_test_score: f64 = (0..test_playouts)
.map(|_| {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
best_strategy,
);
CombatResult::new (& state).score
})
.sum();
println!( "Testing completed for {}. Final average score: {}.", name, total_test_score/test_playouts as f64);
println!();
}*/
pub fn run_benchmarks() {
let optimization_playouts = 1000000;
let test_playouts = 10000;
let ghost_file = std::fs::File::open ("data/hexaghost.json").unwrap();
let ghost_state: CombatState = serde_json::from_reader (std::io::BufReader::new (ghost_file)).unwrap();
let mut fast_random: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <FastStrategy>] | FastStrategy::random()); | let mut fast_genetic: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (| candidates: & [CandidateStrategy <FastStrategy>] | {
if candidates.len() < 2 {
FastStrategy::random()
}
else {
FastStrategy::offspring(& candidates.choose_multiple(&mut rand::thread_rng(), 2).map (| candidate | & candidate.strategy).collect::<Vec<_>>())
}
});
let mut neural_random_only: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <NeuralStrategy>] | NeuralStrategy::new_random(&ghost_state, 16));
let mut neural_training_only = NeuralStrategy::new_random(&ghost_state, 16);
let mut neural_random_training: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand::random::<f64>() < 0.4 {
NeuralStrategy::new_random(&ghost_state, 16)
}
else {
let mut improved = //candidates.choose (&mut thread_rng).clone();
candidates.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1.strategy.clone();
for _ in 0..30 {
improved.do_training_playout(& ghost_state);
}
improved
}
});
let mut neural_mutating: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand::random::<f64>() < 0.4 {
NeuralStrategy::new_random(&ghost_state, 16)
}
else {
candidates.choose (&mut rand::thread_rng()).unwrap().strategy.mutated()
}
});
for _ in 0..20 {
benchmark_step("Hexaghost (FastStrategy, random)", & ghost_state, &mut fast_random);
benchmark_step("Hexaghost (FastStrategy, genetic)", & ghost_state, &mut fast_genetic);
benchmark_step("Hexaghost (NeuralStrategy, random only)", & ghost_state, &mut neural_random_only);
//benchmark_step("Hexaghost (NeuralStrategy, training only)", & ghost_state, &mut neural_training_only);
//benchmark_step("Hexaghost (NeuralStrategy, random/training)", & ghost_state, &mut neural_random_training);
benchmark_step("Hexaghost (NeuralStrategy, mutating)", & ghost_state, &mut neural_mutating);
println!();
}
} | random_line_split |
|
benchmarks.rs | use std::time::{Instant, Duration};
use ordered_float::OrderedFloat;
use rand::seq::SliceRandom;
use crate::actions::*;
use crate::simulation::*;
use crate::simulation_state::*;
use crate::start_and_strategy_ai::{Strategy, FastStrategy, CombatResult, play_out, collect_starting_points};
use crate::neural_net_ai::NeuralStrategy;
pub trait StrategyOptimizer {
type Strategy: Strategy;
fn step (&mut self, state: & CombatState);
fn report (&self)->& Self::Strategy;
}
struct | <T> {
strategy: T,
playouts: usize,
total_score: f64,
}
fn playout_result(state: & CombatState, strategy: & impl Strategy)->CombatResult {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
}
// Note: This meta strategy often performed WORSE than the naive strategy it's based on,
// probably because it chose lucky moves rather than good moves
struct MetaStrategy <'a, T>(&'a T);
impl <'a, T: Strategy> Strategy for MetaStrategy <'a, T> {
fn choose_choice(&self, state: &CombatState) -> Vec<Choice> {
let combos = collect_starting_points(state.clone(), 200);
let choices = combos.into_iter().map(|(mut state, choices)| {
run_until_unable(&mut Runner::new(&mut state, true, false));
let num_attempts = 200;
let score = (0..num_attempts).map (|_| {
playout_result(& state, self.0).score
}).sum::<f64>()/num_attempts as f64;
(choices, score)
});
choices
.max_by_key(|(_, score)| OrderedFloat(*score))
.unwrap()
.0
}
}
pub struct ExplorationOptimizer <T, F> {
candidate_strategies: Vec<CandidateStrategy <T>>,
new_strategy: F,
passes: usize,
current_pass_index: usize,
}
impl <T, F> ExplorationOptimizer <T, F> {
pub fn max_strategy_playouts(&self) -> usize {
((self.passes as f64).sqrt() + 2.0) as usize
}
pub fn new (new_strategy: F)->Self {
ExplorationOptimizer {
candidate_strategies: Vec::new(),
new_strategy,
passes: 0,
current_pass_index: 0,
}
}
fn best_strategy(&self)->& CandidateStrategy <T> {
// not the best average score, but the most-explored, which comes out to best average score at last sorting among strategies that are at the max playouts
// note that this function may be called in the middle of a pass, when the current best strategy has not yet been visited to increase its number of playouts to the new maximum, so don't rely on the given maximum;
// since this function chooses the FIRST qualifying strategy, it's based on the most recent time the strategies were sorted, so the score-dependence of this choice isn't biased by the change in score variance from some of them having one extra playout.
&self.candidate_strategies.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1
}
}
impl <T: Strategy, F: Fn (& [CandidateStrategy <T>])->T> StrategyOptimizer for ExplorationOptimizer <T, F> {
type Strategy = T;
fn step (&mut self, state: & CombatState) {
loop {
if self.current_pass_index >= self.candidate_strategies.len() {
self.candidate_strategies.sort_by_key (| strategy | OrderedFloat (- strategy.total_score/strategy.playouts as f64));
let mut index = 0;
self.candidate_strategies.retain(| strategy | {
index += 1;
strategy.playouts >= index
});
self.passes += 1;
self.candidate_strategies.push (CandidateStrategy {
strategy: (self.new_strategy)(&self.candidate_strategies),
playouts: 0,
total_score: 0.0,
});
self.current_pass_index = 0;
}
let max_strategy_playouts = self.max_strategy_playouts();
let strategy = &mut self.candidate_strategies [self.current_pass_index];
self.current_pass_index += 1;
if strategy.playouts < max_strategy_playouts {
let result = playout_result(state, & strategy.strategy);
strategy.total_score += result.score;
strategy.playouts += 1;
return
}
}
}
fn report (&self)->& Self::Strategy {
let best = self.best_strategy();
println!( "ExplorationOptimizer reporting strategy with {} playouts, running average {}", best.playouts, (best.total_score/best.playouts as f64));
& best.strategy
}
}
impl StrategyOptimizer for NeuralStrategy {
type Strategy = NeuralStrategy ;
fn step (&mut self, state: & CombatState) {
self.do_training_playout(state);
}
fn report (&self)->& Self::Strategy {
self
}
}
pub fn benchmark_step(name: & str, state: & CombatState, optimizer: &mut impl StrategyOptimizer) {
println!( "Optimizing {}…", name);
let start = Instant::now();
let mut steps = 0;
let elapsed = loop {
optimizer.step(state);
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(2000) {
break elapsed;
}
};
println!( "Optimized {} for {:.2?} ({} steps). Reporting…", name, elapsed, steps) ;
let strategy = optimizer.report();
let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, strategy).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(500) {
break elapsed;
}
};
println!( "Evaluated {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;
/*let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, &MetaStrategy(strategy)).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(5000*20) {
break elapsed;
}
};
println!( "Evaluated meta-strategy for {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;*/
}
/*
pub fn run_benchmark (name: & str, state: & CombatState, optimization_playouts: usize, test_playouts: usize, mut optimizer: impl StrategyOptimizer) {
println!( "Starting benchmark for {}, doing {} optimization playouts…", name, optimization_playouts);
for iteration in 0..optimization_playouts {
optimizer.step (| strategy | {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
});
if iteration % 10000 == 9999 {
println!( "Completed {} playouts…", iteration + 1);
}
}
let (best_strategy, anticipated_score) = optimizer.current_best();
println!( "Optimization completed for {}. Found strategy with anticipated score {}. Doing {} test playouts…", name, anticipated_score, test_playouts);
let total_test_score: f64 = (0..test_playouts)
.map(|_| {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
best_strategy,
);
CombatResult::new (& state).score
})
.sum();
println!( "Testing completed for {}. Final average score: {}.", name, total_test_score/test_playouts as f64);
println!();
}*/
pub fn run_benchmarks() {
let optimization_playouts = 1000000;
let test_playouts = 10000;
let ghost_file = std::fs::File::open ("data/hexaghost.json").unwrap();
let ghost_state: CombatState = serde_json::from_reader (std::io::BufReader::new (ghost_file)).unwrap();
let mut fast_random: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <FastStrategy>] | FastStrategy::random());
let mut fast_genetic: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (| candidates: & [CandidateStrategy <FastStrategy>] | {
if candidates.len() < 2 {
FastStrategy::random()
}
else {
FastStrategy::offspring(& candidates.choose_multiple(&mut rand::thread_rng(), 2).map (| candidate | & candidate.strategy).collect::<Vec<_>>())
}
});
let mut neural_random_only: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <NeuralStrategy>] | NeuralStrategy::new_random(&ghost_state, 16));
let mut neural_training_only = NeuralStrategy::new_random(&ghost_state, 16);
let mut neural_random_training: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand::random::<f64>() < 0.4 {
NeuralStrategy::new_random(&ghost_state, 16)
}
else {
let mut improved = //candidates.choose (&mut thread_rng).clone();
candidates.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1.strategy.clone();
for _ in 0..30 {
improved.do_training_playout(& ghost_state);
}
improved
}
});
let mut neural_mutating: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand::random::<f64>() < 0.4 {
NeuralStrategy::new_random(&ghost_state, 16)
}
else {
candidates.choose (&mut rand::thread_rng()).unwrap().strategy.mutated()
}
});
for _ in 0..20 {
benchmark_step("Hexaghost (FastStrategy, random)", & ghost_state, &mut fast_random);
benchmark_step("Hexaghost (FastStrategy, genetic)", & ghost_state, &mut fast_genetic);
benchmark_step("Hexaghost (NeuralStrategy, random only)", & ghost_state, &mut neural_random_only);
//benchmark_step("Hexaghost (NeuralStrategy, training only)", & ghost_state, &mut neural_training_only);
//benchmark_step("Hexaghost (NeuralStrategy, random/training)", & ghost_state, &mut neural_random_training);
benchmark_step("Hexaghost (NeuralStrategy, mutating)", & ghost_state, &mut neural_mutating);
println!();
}
}
| CandidateStrategy | identifier_name |
benchmarks.rs | use std::time::{Instant, Duration};
use ordered_float::OrderedFloat;
use rand::seq::SliceRandom;
use crate::actions::*;
use crate::simulation::*;
use crate::simulation_state::*;
use crate::start_and_strategy_ai::{Strategy, FastStrategy, CombatResult, play_out, collect_starting_points};
use crate::neural_net_ai::NeuralStrategy;
pub trait StrategyOptimizer {
type Strategy: Strategy;
fn step (&mut self, state: & CombatState);
fn report (&self)->& Self::Strategy;
}
struct CandidateStrategy <T> {
strategy: T,
playouts: usize,
total_score: f64,
}
fn playout_result(state: & CombatState, strategy: & impl Strategy)->CombatResult {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
}
// Note: This meta strategy often performed WORSE than the naive strategy it's based on,
// probably because it chose lucky moves rather than good moves
struct MetaStrategy <'a, T>(&'a T);
impl <'a, T: Strategy> Strategy for MetaStrategy <'a, T> {
fn choose_choice(&self, state: &CombatState) -> Vec<Choice> {
let combos = collect_starting_points(state.clone(), 200);
let choices = combos.into_iter().map(|(mut state, choices)| {
run_until_unable(&mut Runner::new(&mut state, true, false));
let num_attempts = 200;
let score = (0..num_attempts).map (|_| {
playout_result(& state, self.0).score
}).sum::<f64>()/num_attempts as f64;
(choices, score)
});
choices
.max_by_key(|(_, score)| OrderedFloat(*score))
.unwrap()
.0
}
}
pub struct ExplorationOptimizer <T, F> {
candidate_strategies: Vec<CandidateStrategy <T>>,
new_strategy: F,
passes: usize,
current_pass_index: usize,
}
impl <T, F> ExplorationOptimizer <T, F> {
pub fn max_strategy_playouts(&self) -> usize {
((self.passes as f64).sqrt() + 2.0) as usize
}
pub fn new (new_strategy: F)->Self {
ExplorationOptimizer {
candidate_strategies: Vec::new(),
new_strategy,
passes: 0,
current_pass_index: 0,
}
}
fn best_strategy(&self)->& CandidateStrategy <T> {
// not the best average score, but the most-explored, which comes out to best average score at last sorting among strategies that are at the max playouts
// note that this function may be called in the middle of a pass, when the current best strategy has not yet been visited to increase its number of playouts to the new maximum, so don't rely on the given maximum;
// since this function chooses the FIRST qualifying strategy, it's based on the most recent time the strategies were sorted, so the score-dependence of this choice isn't biased by the change in score variance from some of them having one extra playout.
&self.candidate_strategies.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1
}
}
impl <T: Strategy, F: Fn (& [CandidateStrategy <T>])->T> StrategyOptimizer for ExplorationOptimizer <T, F> {
type Strategy = T;
fn step (&mut self, state: & CombatState) {
loop {
if self.current_pass_index >= self.candidate_strategies.len() {
self.candidate_strategies.sort_by_key (| strategy | OrderedFloat (- strategy.total_score/strategy.playouts as f64));
let mut index = 0;
self.candidate_strategies.retain(| strategy | {
index += 1;
strategy.playouts >= index
});
self.passes += 1;
self.candidate_strategies.push (CandidateStrategy {
strategy: (self.new_strategy)(&self.candidate_strategies),
playouts: 0,
total_score: 0.0,
});
self.current_pass_index = 0;
}
let max_strategy_playouts = self.max_strategy_playouts();
let strategy = &mut self.candidate_strategies [self.current_pass_index];
self.current_pass_index += 1;
if strategy.playouts < max_strategy_playouts {
let result = playout_result(state, & strategy.strategy);
strategy.total_score += result.score;
strategy.playouts += 1;
return
}
}
}
fn report (&self)->& Self::Strategy {
let best = self.best_strategy();
println!( "ExplorationOptimizer reporting strategy with {} playouts, running average {}", best.playouts, (best.total_score/best.playouts as f64));
& best.strategy
}
}
impl StrategyOptimizer for NeuralStrategy {
type Strategy = NeuralStrategy ;
fn step (&mut self, state: & CombatState) {
self.do_training_playout(state);
}
fn report (&self)->& Self::Strategy {
self
}
}
pub fn benchmark_step(name: & str, state: & CombatState, optimizer: &mut impl StrategyOptimizer) {
println!( "Optimizing {}…", name);
let start = Instant::now();
let mut steps = 0;
let elapsed = loop {
optimizer.step(state);
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(2000) {
break elapsed;
}
};
println!( "Optimized {} for {:.2?} ({} steps). Reporting…", name, elapsed, steps) ;
let strategy = optimizer.report();
let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, strategy).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(500) {
break elapsed;
}
};
println!( "Evaluated {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;
/*let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, &MetaStrategy(strategy)).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(5000*20) {
break elapsed;
}
};
println!( "Evaluated meta-strategy for {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;*/
}
/*
pub fn run_benchmark (name: & str, state: & CombatState, optimization_playouts: usize, test_playouts: usize, mut optimizer: impl StrategyOptimizer) {
println!( "Starting benchmark for {}, doing {} optimization playouts…", name, optimization_playouts);
for iteration in 0..optimization_playouts {
optimizer.step (| strategy | {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
});
if iteration % 10000 == 9999 {
println!( "Completed {} playouts…", iteration + 1);
}
}
let (best_strategy, anticipated_score) = optimizer.current_best();
println!( "Optimization completed for {}. Found strategy with anticipated score {}. Doing {} test playouts…", name, anticipated_score, test_playouts);
let total_test_score: f64 = (0..test_playouts)
.map(|_| {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
best_strategy,
);
CombatResult::new (& state).score
})
.sum();
println!( "Testing completed for {}. Final average score: {}.", name, total_test_score/test_playouts as f64);
println!();
}*/
pub fn run_benchmarks() {
let optimization_playouts = 1000000;
let test_playouts = 10000;
let ghost_file = std::fs::File::open ("data/hexaghost.json").unwrap();
let ghost_state: CombatState = serde_json::from_reader (std::io::BufReader::new (ghost_file)).unwrap();
let mut fast_random: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <FastStrategy>] | FastStrategy::random());
let mut fast_genetic: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (| candidates: & [CandidateStrategy <FastStrategy>] | {
if candidates.len() < 2 {
FastStrategy::random()
}
else {
Fa | let mut neural_random_only: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <NeuralStrategy>] | NeuralStrategy::new_random(&ghost_state, 16));
let mut neural_training_only = NeuralStrategy::new_random(&ghost_state, 16);
let mut neural_random_training: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand::random::<f64>() < 0.4 {
NeuralStrategy::new_random(&ghost_state, 16)
}
else {
let mut improved = //candidates.choose (&mut thread_rng).clone();
candidates.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1.strategy.clone();
for _ in 0..30 {
improved.do_training_playout(& ghost_state);
}
improved
}
});
let mut neural_mutating: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand::random::<f64>() < 0.4 {
NeuralStrategy::new_random(&ghost_state, 16)
}
else {
candidates.choose (&mut rand::thread_rng()).unwrap().strategy.mutated()
}
});
for _ in 0..20 {
benchmark_step("Hexaghost (FastStrategy, random)", & ghost_state, &mut fast_random);
benchmark_step("Hexaghost (FastStrategy, genetic)", & ghost_state, &mut fast_genetic);
benchmark_step("Hexaghost (NeuralStrategy, random only)", & ghost_state, &mut neural_random_only);
//benchmark_step("Hexaghost (NeuralStrategy, training only)", & ghost_state, &mut neural_training_only);
//benchmark_step("Hexaghost (NeuralStrategy, random/training)", & ghost_state, &mut neural_random_training);
benchmark_step("Hexaghost (NeuralStrategy, mutating)", & ghost_state, &mut neural_mutating);
println!();
}
}
| stStrategy::offspring(& candidates.choose_multiple(&mut rand::thread_rng(), 2).map (| candidate | & candidate.strategy).collect::<Vec<_>>())
}
});
| conditional_block |
benchmarks.rs | use std::time::{Instant, Duration};
use ordered_float::OrderedFloat;
use rand::seq::SliceRandom;
use crate::actions::*;
use crate::simulation::*;
use crate::simulation_state::*;
use crate::start_and_strategy_ai::{Strategy, FastStrategy, CombatResult, play_out, collect_starting_points};
use crate::neural_net_ai::NeuralStrategy;
pub trait StrategyOptimizer {
type Strategy: Strategy;
fn step (&mut self, state: & CombatState);
fn report (&self)->& Self::Strategy;
}
struct CandidateStrategy <T> {
strategy: T,
playouts: usize,
total_score: f64,
}
fn playout_result(state: & CombatState, strategy: & impl Strategy)->CombatResult {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
}
// Note: This meta strategy often performed WORSE than the naive strategy it's based on,
// probably because it chose lucky moves rather than good moves
struct MetaStrategy <'a, T>(&'a T);
impl <'a, T: Strategy> Strategy for MetaStrategy <'a, T> {
fn choose_choice(&self, state: &CombatState) -> Vec<Choice> {
let combos = collect_starting_points(state.clone(), 200);
let choices = combos.into_iter().map(|(mut state, choices)| {
run_until_unable(&mut Runner::new(&mut state, true, false));
let num_attempts = 200;
let score = (0..num_attempts).map (|_| {
playout_result(& state, self.0).score
}).sum::<f64>()/num_attempts as f64;
(choices, score)
});
choices
.max_by_key(|(_, score)| OrderedFloat(*score))
.unwrap()
.0
}
}
pub struct ExplorationOptimizer <T, F> {
candidate_strategies: Vec<CandidateStrategy <T>>,
new_strategy: F,
passes: usize,
current_pass_index: usize,
}
impl <T, F> ExplorationOptimizer <T, F> {
pub fn max_strategy_playouts(&self) -> usize {
((self.passes as f64).sqrt() + 2.0) as usize
}
pub fn new (new_strategy: F)->Self {
ExplorationOptimizer {
candidate_strategies: Vec::new(),
new_strategy,
passes: 0,
current_pass_index: 0,
}
}
fn best_strategy(&self)->& CandidateStrategy <T> {
// not the best average score, but the most-explored, which comes out to best average score at last sorting among strategies that are at the max playouts
// note that this function may be called in the middle of a pass, when the current best strategy has not yet been visited to increase its number of playouts to the new maximum, so don't rely on the given maximum;
// since this function chooses the FIRST qualifying strategy, it's based on the most recent time the strategies were sorted, so the score-dependence of this choice isn't biased by the change in score variance from some of them having one extra playout.
&self.candidate_strategies.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1
}
}
impl <T: Strategy, F: Fn (& [CandidateStrategy <T>])->T> StrategyOptimizer for ExplorationOptimizer <T, F> {
type Strategy = T;
fn step (&mut self, state: & CombatState) {
loop {
if self.current_pass_index >= self.candidate_strategies.len() {
self.candidate_strategies.sort_by_key (| strategy | OrderedFloat (- strategy.total_score/strategy.playouts as f64));
let mut index = 0;
self.candidate_strategies.retain(| strategy | {
index += 1;
strategy.playouts >= index
});
self.passes += 1;
self.candidate_strategies.push (CandidateStrategy {
strategy: (self.new_strategy)(&self.candidate_strategies),
playouts: 0,
total_score: 0.0,
});
self.current_pass_index = 0;
}
let max_strategy_playouts = self.max_strategy_playouts();
let strategy = &mut self.candidate_strategies [self.current_pass_index];
self.current_pass_index += 1;
if strategy.playouts < max_strategy_playouts {
let result = playout_result(state, & strategy.strategy);
strategy.total_score += result.score;
strategy.playouts += 1;
return
}
}
}
fn report (&self)->& Self::Strategy {
let best = self.best_strategy();
println!( "ExplorationOptimizer reporting strategy with {} playouts, running average {}", best.playouts, (best.total_score/best.playouts as f64));
& best.strategy
}
}
impl StrategyOptimizer for NeuralStrategy {
type Strategy = NeuralStrategy ;
fn step (&mut self, state: & CombatState) {
self.do_training_playout(state);
}
fn report (&self)->& Self::Strategy {
self
}
}
pub fn benchmark_step(name: & str, state: & CombatState, optimizer: &mut impl StrategyOptimizer) {
println!( "Optimizing {}…", name);
let start = Instant::now();
let mut steps = 0;
let elapsed = loop {
optimizer.step(state);
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(2000) {
break elapsed;
}
};
println!( "Optimized {} for {:.2?} ({} steps). Reporting…", name, elapsed, steps) ;
let strategy = optimizer.report();
let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, strategy).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(500) {
break elapsed;
}
};
println!( "Evaluated {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;
/*let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, &MetaStrategy(strategy)).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(5000*20) {
break elapsed;
}
};
println!( "Evaluated meta-strategy for {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;*/
}
/*
pub fn run_benchmark (name: & str, state: & CombatState, optimization_playouts: usize, test_playouts: usize, mut optimizer: impl StrategyOptimizer) {
println!( "Starting benchmark for {}, doing {} optimization playouts…", name, optimization_playouts);
for iteration in 0..optimization_playouts {
optimizer.step (| strategy | {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
});
if iteration % 10000 == 9999 {
println!( "Completed {} playouts…", iteration + 1);
}
}
let (best_strategy, anticipated_score) = optimizer.current_best();
println!( "Optimization completed for {}. Found strategy with anticipated score {}. Doing {} test playouts…", name, anticipated_score, test_playouts);
let total_test_score: f64 = (0..test_playouts)
.map(|_| {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
best_strategy,
);
CombatResult::new (& state).score
})
.sum();
println!( "Testing completed for {}. Final average score: {}.", name, total_test_score/test_playouts as f64);
println!();
}*/
pub fn run_benchmarks() {
let op | NeuralStrategy::new_random(&ghost_state, 16)
}
else {
let mut improved = //candidates.choose (&mut thread_rng).clone();
candidates.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1.strategy.clone();
for _ in 0..30 {
improved.do_training_playout(& ghost_state);
}
improved
}
});
let mut neural_mutating: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand::random::<f64>() < 0.4 {
NeuralStrategy::new_random(&ghost_state, 16)
}
else {
candidates.choose (&mut rand::thread_rng()).unwrap().strategy.mutated()
}
});
for _ in 0..20 {
benchmark_step("Hexaghost (FastStrategy, random)", & ghost_state, &mut fast_random);
benchmark_step("Hexaghost (FastStrategy, genetic)", & ghost_state, &mut fast_genetic);
benchmark_step("Hexaghost (NeuralStrategy, random only)", & ghost_state, &mut neural_random_only);
//benchmark_step("Hexaghost (NeuralStrategy, training only)", & ghost_state, &mut neural_training_only);
//benchmark_step("Hexaghost (NeuralStrategy, random/training)", & ghost_state, &mut neural_random_training);
benchmark_step("Hexaghost (NeuralStrategy, mutating)", & ghost_state, &mut neural_mutating);
println!();
}
}
| timization_playouts = 1000000;
let test_playouts = 10000;
let ghost_file = std::fs::File::open ("data/hexaghost.json").unwrap();
let ghost_state: CombatState = serde_json::from_reader (std::io::BufReader::new (ghost_file)).unwrap();
let mut fast_random: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <FastStrategy>] | FastStrategy::random());
let mut fast_genetic: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (| candidates: & [CandidateStrategy <FastStrategy>] | {
if candidates.len() < 2 {
FastStrategy::random()
}
else {
FastStrategy::offspring(& candidates.choose_multiple(&mut rand::thread_rng(), 2).map (| candidate | & candidate.strategy).collect::<Vec<_>>())
}
});
let mut neural_random_only: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <NeuralStrategy>] | NeuralStrategy::new_random(&ghost_state, 16));
let mut neural_training_only = NeuralStrategy::new_random(&ghost_state, 16);
let mut neural_random_training: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand::random::<f64>() < 0.4 { | identifier_body |
klogd.rs | use crate::libbb::ptr_to_globals::bb_errno;
use libc;
use libc::openlog;
use libc::syslog;
extern "C" {
#[no_mangle]
fn strtoul(
__nptr: *const libc::c_char,
__endptr: *mut *mut libc::c_char,
__base: libc::c_int,
) -> libc::c_ulong;
#[no_mangle]
fn signal(__sig: libc::c_int, __handler: __sighandler_t) -> __sighandler_t;
#[no_mangle]
fn strchrnul(__s: *const libc::c_char, __c: libc::c_int) -> *mut libc::c_char;
#[no_mangle]
fn klogctl(type_0: libc::c_int, b: *mut libc::c_char, len: libc::c_int) -> libc::c_int;
#[no_mangle]
fn bb_signals_recursive_norestart(
sigs: libc::c_int,
f: Option<unsafe extern "C" fn(_: libc::c_int) -> ()>,
);
#[no_mangle]
fn kill_myself_with_sig(sig: libc::c_int) ->!;
#[no_mangle]
static mut bb_got_signal: smallint;
#[no_mangle]
fn record_signo(signo: libc::c_int);
#[no_mangle]
fn overlapping_strcpy(dst: *mut libc::c_char, src: *const libc::c_char);
#[no_mangle]
fn xatou_range(str: *const libc::c_char, l: libc::c_uint, u: libc::c_uint) -> libc::c_uint;
#[no_mangle]
fn bb_daemonize_or_rexec(flags: libc::c_int);
#[no_mangle]
fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _:...) -> u32;
#[no_mangle]
fn write_pidfile_std_path_and_ext(path: *const libc::c_char);
#[no_mangle]
fn remove_pidfile_std_path_and_ext(path: *const libc::c_char);
#[no_mangle]
static mut logmode: smallint;
#[no_mangle]
fn bb_simple_perror_msg(s: *const libc::c_char);
#[no_mangle]
static bb_banner: [libc::c_char; 0];
#[no_mangle]
static mut bb_common_bufsiz1: [libc::c_char; 0];
}
use crate::librb::signal::__sighandler_t;
use crate::librb::smallint;
pub type C2RustUnnamed = libc::c_uint;
pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054;
pub type C2RustUnnamed_0 = libc::c_uint;
pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8;
pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4;
pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2;
pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1;
pub type C2RustUnnamed_1 = libc::c_uint;
pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3;
pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2;
pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1;
pub const LOGMODE_NONE: C2RustUnnamed_1 = 0;
pub type C2RustUnnamed_2 = libc::c_uint;
pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2;
pub const OPT_LEVEL: C2RustUnnamed_2 = 1;
pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024;
/*
* Mini klogd implementation for busybox
*
* Copyright (C) 2001 by Gennady Feldman <[email protected]>.
* Changes: Made this a standalone busybox module which uses standalone
* syslog() client interface.
*
* Copyright (C) 1999-2004 by Erik Andersen <[email protected]>
*
* Copyright (C) 2000 by Karl M. Hegbloom <[email protected]>
*
* "circular buffer" Copyright (C) 2000 by Gennady Feldman <[email protected]>
*
* Maintainer: Gennady Feldman <[email protected]> as of Mar 12, 2001
*
* Licensed under GPLv2 or later, see file LICENSE in this source tree.
*/
//config:config KLOGD
//config: bool "klogd (5.7 kb)"
//config: default y
//config: help
//config: klogd is a utility which intercepts and logs all
//config: messages from the Linux kernel and sends the messages
//config: out to the'syslogd' utility so they can be logged. If
//config: you wish to record the messages produced by the kernel,
//config: you should enable this option.
//config:
//config:comment "klogd should not be used together with syslog to kernel printk buffer"
//config: depends on KLOGD && FEATURE_KMSG_SYSLOG
//config:
//config:config FEATURE_KLOGD_KLOGCTL
//config: bool "Use the klogctl() interface"
//config: default y
//config: depends on KLOGD
//config: select PLATFORM_LINUX
//config: help
//config: The klogd applet supports two interfaces for reading
//config: kernel messages. Linux provides the klogctl() interface
//config: which allows reading messages from the kernel ring buffer
//config: independently from the file system.
//config:
//config: If you answer 'N' here, klogd will use the more portable
//config: approach of reading them from /proc or a device node.
//config: However, this method requires the file to be available.
//config:
//config: If in doubt, say 'Y'.
//applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP))
//kbuild:lib-$(CONFIG_KLOGD) += klogd.o
//usage:#define klogd_trivial_usage
//usage: "[-c N] [-n]"
//usage:#define klogd_full_usage "\n\n"
//usage: "Log kernel messages to syslog\n"
//usage: "\n -c N Print to console messages more urgent than prio N (1-8)"
//usage: "\n -n Run in foreground"
/* The Linux-specific klogctl(3) interface does not rely on the filesystem and
* allows us to change the console loglevel. Alternatively, we read the
* messages from _PATH_KLOG. */
unsafe extern "C" fn klogd_open() {
/* "Open the log. Currently a NOP" */
klogctl(1i32, 0 as *mut libc::c_char, 0i32);
}
unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) {
/* "printk() prints a message on the console only if it has a loglevel
* less than console_loglevel". Here we set console_loglevel = lvl. */
klogctl(8i32, 0 as *mut libc::c_char, lvl);
}
unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int {
/* "2 -- Read from the log." */
return klogctl(2i32, bufp, len);
}
unsafe extern "C" fn klogd_close() {
/* FYI: cmd 7 is equivalent to setting console_loglevel to 7
* via klogctl(8, NULL, 7). */
klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */
klogctl(0i32, 0 as *mut libc::c_char, 0i32);
/* "0 -- Close the log. Currently a NOP" */
}
/* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead,
* because that's how they interpret word "default"
* in the openlog() manpage:
* LOG_USER (default)
* generic user-level messages
* and the fact that LOG_KERN is a constant 0.
* glibc interprets it as "0 in openlog() call means 'use default'".
* I think it means "if openlog wasn't called before syslog() is called,
* use default".
* Convincing glibc maintainers otherwise is, as usual, nearly impossible.
* Should we open-code syslog() here to use correct facility?
*/
#[no_mangle]
pub unsafe extern "C" fn | (
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut i: libc::c_int = 0i32;
let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt: libc::c_int = 0;
let mut used: libc::c_int = 0;
opt = getopt32(
argv,
b"c:n\x00" as *const u8 as *const libc::c_char,
&mut opt_c as *mut *mut libc::c_char,
) as libc::c_int;
if opt & OPT_LEVEL as libc::c_int!= 0 {
/* Valid levels are between 1 and 8 */
i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int
}
if opt & OPT_FOREGROUND as libc::c_int == 0 {
bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int);
}
logmode = LOGMODE_SYSLOG as libc::c_int as smallint;
/* klogd_open() before openlog(), since it might use fixed fd 3,
* and openlog() also may use the same fd 3 if we swap them:
*/
klogd_open();
openlog(
b"kernel\x00" as *const u8 as *const libc::c_char,
0i32,
0i32 << 3i32,
);
/*
* glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER
* above. The logic behind this is that standard
* http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html
* says the following about openlog and syslog:
* "LOG_USER
* Messages generated by arbitrary processes.
* This is the default facility identifier if none is specified."
*
* I believe glibc misinterpreted this text as "if openlog's
* third parameter is 0 (=LOG_KERN), treat it as LOG_USER".
* Whereas it was meant to say "if *syslog* is called with facility
* 0 in its 1st parameter without prior call to openlog, then perform
* implicit openlog(LOG_USER)".
*
* As a result of this, eh, feature, standard klogd was forced
* to open-code its own openlog and syslog implementation (!).
*
* Note that prohibiting openlog(LOG_KERN) on libc level does not
* add any security: any process can open a socket to "/dev/log"
* and write a string "<0>Voila, a LOG_KERN + LOG_EMERG message"
*
* Google code search tells me there is no widespread use of
* openlog("foo", 0, 0), thus fixing glibc won't break userspace.
*
* The bug against glibc was filed:
* bugzilla.redhat.com/show_bug.cgi?id=547000
*/
if i!= 0 {
klogd_setloglevel(i);
}
signal(
1i32,
::std::mem::transmute::<libc::intptr_t, __sighandler_t>(1i32 as libc::intptr_t),
);
/* We want klogd_read to not be restarted, thus _norestart: */
bb_signals_recursive_norestart(
BB_FATAL_SIGS as libc::c_int,
Some(record_signo as unsafe extern "C" fn(_: libc::c_int) -> ()),
);
syslog(
5i32,
b"klogd started: %s\x00" as *const u8 as *const libc::c_char,
bb_banner.as_ptr(),
);
write_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char);
used = 0i32;
while bb_got_signal == 0 {
let mut n: libc::c_int = 0;
let mut priority: libc::c_int = 0;
let mut start: *mut libc::c_char = 0 as *mut libc::c_char;
start = bb_common_bufsiz1.as_mut_ptr().offset(used as isize);
n = klogd_read(start, KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 - used);
if n < 0i32 {
if *bb_errno == 4i32 {
continue;
}
bb_simple_perror_msg(b"klogctl(2) error\x00" as *const u8 as *const libc::c_char);
break;
} else {
*start.offset(n as isize) = '\u{0}' as i32 as libc::c_char;
/* Process each newline-terminated line in the buffer */
start = bb_common_bufsiz1.as_mut_ptr();
loop {
let mut newline: *mut libc::c_char = strchrnul(start, '\n' as i32);
if *newline as libc::c_int == '\u{0}' as i32 {
/* This line is incomplete */
/* move it to the front of the buffer */
overlapping_strcpy(bb_common_bufsiz1.as_mut_ptr(), start);
used = newline.wrapping_offset_from(start) as libc::c_long as libc::c_int;
if used < KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 {
break;
}
/* buffer is full, log it anyway */
used = 0i32;
newline = 0 as *mut libc::c_char
} else {
let fresh0 = newline;
newline = newline.offset(1);
*fresh0 = '\u{0}' as i32 as libc::c_char
}
/* Extract the priority */
priority = 6i32;
if *start as libc::c_int == '<' as i32 {
start = start.offset(1);
if *start!= 0 {
let mut end: *mut libc::c_char = 0 as *mut libc::c_char;
priority = strtoul(start, &mut end, 10i32) as libc::c_int;
if *end as libc::c_int == '>' as i32 {
end = end.offset(1)
}
start = end
}
}
/* Log (only non-empty lines) */
if *start!= 0 {
syslog(
priority,
b"%s\x00" as *const u8 as *const libc::c_char,
start,
);
}
if newline.is_null() {
break;
}
start = newline
}
}
}
klogd_close();
syslog(
5i32,
b"klogd: exiting\x00" as *const u8 as *const libc::c_char,
);
remove_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char);
if bb_got_signal!= 0 {
kill_myself_with_sig(bb_got_signal as libc::c_int);
}
return 1i32;
}
| klogd_main | identifier_name |
klogd.rs | use crate::libbb::ptr_to_globals::bb_errno;
use libc;
use libc::openlog;
use libc::syslog;
extern "C" {
#[no_mangle]
fn strtoul(
__nptr: *const libc::c_char,
__endptr: *mut *mut libc::c_char,
__base: libc::c_int,
) -> libc::c_ulong;
#[no_mangle]
fn signal(__sig: libc::c_int, __handler: __sighandler_t) -> __sighandler_t;
#[no_mangle]
fn strchrnul(__s: *const libc::c_char, __c: libc::c_int) -> *mut libc::c_char;
#[no_mangle]
fn klogctl(type_0: libc::c_int, b: *mut libc::c_char, len: libc::c_int) -> libc::c_int;
#[no_mangle]
fn bb_signals_recursive_norestart(
sigs: libc::c_int,
f: Option<unsafe extern "C" fn(_: libc::c_int) -> ()>,
);
#[no_mangle]
fn kill_myself_with_sig(sig: libc::c_int) ->!;
#[no_mangle]
static mut bb_got_signal: smallint;
#[no_mangle]
fn record_signo(signo: libc::c_int);
#[no_mangle]
fn overlapping_strcpy(dst: *mut libc::c_char, src: *const libc::c_char);
#[no_mangle]
fn xatou_range(str: *const libc::c_char, l: libc::c_uint, u: libc::c_uint) -> libc::c_uint;
#[no_mangle]
fn bb_daemonize_or_rexec(flags: libc::c_int);
#[no_mangle] | fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _:...) -> u32;
#[no_mangle]
fn write_pidfile_std_path_and_ext(path: *const libc::c_char);
#[no_mangle]
fn remove_pidfile_std_path_and_ext(path: *const libc::c_char);
#[no_mangle]
static mut logmode: smallint;
#[no_mangle]
fn bb_simple_perror_msg(s: *const libc::c_char);
#[no_mangle]
static bb_banner: [libc::c_char; 0];
#[no_mangle]
static mut bb_common_bufsiz1: [libc::c_char; 0];
}
use crate::librb::signal::__sighandler_t;
use crate::librb::smallint;
pub type C2RustUnnamed = libc::c_uint;
pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054;
pub type C2RustUnnamed_0 = libc::c_uint;
pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8;
pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4;
pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2;
pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1;
pub type C2RustUnnamed_1 = libc::c_uint;
pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3;
pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2;
pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1;
pub const LOGMODE_NONE: C2RustUnnamed_1 = 0;
pub type C2RustUnnamed_2 = libc::c_uint;
pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2;
pub const OPT_LEVEL: C2RustUnnamed_2 = 1;
pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024;
/*
* Mini klogd implementation for busybox
*
* Copyright (C) 2001 by Gennady Feldman <[email protected]>.
* Changes: Made this a standalone busybox module which uses standalone
* syslog() client interface.
*
* Copyright (C) 1999-2004 by Erik Andersen <[email protected]>
*
* Copyright (C) 2000 by Karl M. Hegbloom <[email protected]>
*
* "circular buffer" Copyright (C) 2000 by Gennady Feldman <[email protected]>
*
* Maintainer: Gennady Feldman <[email protected]> as of Mar 12, 2001
*
* Licensed under GPLv2 or later, see file LICENSE in this source tree.
*/
//config:config KLOGD
//config: bool "klogd (5.7 kb)"
//config: default y
//config: help
//config: klogd is a utility which intercepts and logs all
//config: messages from the Linux kernel and sends the messages
//config: out to the'syslogd' utility so they can be logged. If
//config: you wish to record the messages produced by the kernel,
//config: you should enable this option.
//config:
//config:comment "klogd should not be used together with syslog to kernel printk buffer"
//config: depends on KLOGD && FEATURE_KMSG_SYSLOG
//config:
//config:config FEATURE_KLOGD_KLOGCTL
//config: bool "Use the klogctl() interface"
//config: default y
//config: depends on KLOGD
//config: select PLATFORM_LINUX
//config: help
//config: The klogd applet supports two interfaces for reading
//config: kernel messages. Linux provides the klogctl() interface
//config: which allows reading messages from the kernel ring buffer
//config: independently from the file system.
//config:
//config: If you answer 'N' here, klogd will use the more portable
//config: approach of reading them from /proc or a device node.
//config: However, this method requires the file to be available.
//config:
//config: If in doubt, say 'Y'.
//applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP))
//kbuild:lib-$(CONFIG_KLOGD) += klogd.o
//usage:#define klogd_trivial_usage
//usage: "[-c N] [-n]"
//usage:#define klogd_full_usage "\n\n"
//usage: "Log kernel messages to syslog\n"
//usage: "\n -c N Print to console messages more urgent than prio N (1-8)"
//usage: "\n -n Run in foreground"
/* The Linux-specific klogctl(3) interface does not rely on the filesystem and
* allows us to change the console loglevel. Alternatively, we read the
* messages from _PATH_KLOG. */
unsafe extern "C" fn klogd_open() {
/* "Open the log. Currently a NOP" */
klogctl(1i32, 0 as *mut libc::c_char, 0i32);
}
unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) {
/* "printk() prints a message on the console only if it has a loglevel
* less than console_loglevel". Here we set console_loglevel = lvl. */
klogctl(8i32, 0 as *mut libc::c_char, lvl);
}
unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int {
/* "2 -- Read from the log." */
return klogctl(2i32, bufp, len);
}
unsafe extern "C" fn klogd_close() {
/* FYI: cmd 7 is equivalent to setting console_loglevel to 7
* via klogctl(8, NULL, 7). */
klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */
klogctl(0i32, 0 as *mut libc::c_char, 0i32);
/* "0 -- Close the log. Currently a NOP" */
}
/* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead,
* because that's how they interpret word "default"
* in the openlog() manpage:
* LOG_USER (default)
* generic user-level messages
* and the fact that LOG_KERN is a constant 0.
* glibc interprets it as "0 in openlog() call means 'use default'".
* I think it means "if openlog wasn't called before syslog() is called,
* use default".
* Convincing glibc maintainers otherwise is, as usual, nearly impossible.
* Should we open-code syslog() here to use correct facility?
*/
#[no_mangle]
pub unsafe extern "C" fn klogd_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut i: libc::c_int = 0i32;
let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt: libc::c_int = 0;
let mut used: libc::c_int = 0;
opt = getopt32(
argv,
b"c:n\x00" as *const u8 as *const libc::c_char,
&mut opt_c as *mut *mut libc::c_char,
) as libc::c_int;
if opt & OPT_LEVEL as libc::c_int!= 0 {
/* Valid levels are between 1 and 8 */
i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int
}
if opt & OPT_FOREGROUND as libc::c_int == 0 {
bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int);
}
logmode = LOGMODE_SYSLOG as libc::c_int as smallint;
/* klogd_open() before openlog(), since it might use fixed fd 3,
* and openlog() also may use the same fd 3 if we swap them:
*/
klogd_open();
openlog(
b"kernel\x00" as *const u8 as *const libc::c_char,
0i32,
0i32 << 3i32,
);
/*
* glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER
* above. The logic behind this is that standard
* http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html
* says the following about openlog and syslog:
* "LOG_USER
* Messages generated by arbitrary processes.
* This is the default facility identifier if none is specified."
*
* I believe glibc misinterpreted this text as "if openlog's
* third parameter is 0 (=LOG_KERN), treat it as LOG_USER".
* Whereas it was meant to say "if *syslog* is called with facility
* 0 in its 1st parameter without prior call to openlog, then perform
* implicit openlog(LOG_USER)".
*
* As a result of this, eh, feature, standard klogd was forced
* to open-code its own openlog and syslog implementation (!).
*
* Note that prohibiting openlog(LOG_KERN) on libc level does not
* add any security: any process can open a socket to "/dev/log"
* and write a string "<0>Voila, a LOG_KERN + LOG_EMERG message"
*
* Google code search tells me there is no widespread use of
* openlog("foo", 0, 0), thus fixing glibc won't break userspace.
*
* The bug against glibc was filed:
* bugzilla.redhat.com/show_bug.cgi?id=547000
*/
if i!= 0 {
klogd_setloglevel(i);
}
signal(
1i32,
::std::mem::transmute::<libc::intptr_t, __sighandler_t>(1i32 as libc::intptr_t),
);
/* We want klogd_read to not be restarted, thus _norestart: */
bb_signals_recursive_norestart(
BB_FATAL_SIGS as libc::c_int,
Some(record_signo as unsafe extern "C" fn(_: libc::c_int) -> ()),
);
syslog(
5i32,
b"klogd started: %s\x00" as *const u8 as *const libc::c_char,
bb_banner.as_ptr(),
);
write_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char);
used = 0i32;
while bb_got_signal == 0 {
let mut n: libc::c_int = 0;
let mut priority: libc::c_int = 0;
let mut start: *mut libc::c_char = 0 as *mut libc::c_char;
start = bb_common_bufsiz1.as_mut_ptr().offset(used as isize);
n = klogd_read(start, KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 - used);
if n < 0i32 {
if *bb_errno == 4i32 {
continue;
}
bb_simple_perror_msg(b"klogctl(2) error\x00" as *const u8 as *const libc::c_char);
break;
} else {
*start.offset(n as isize) = '\u{0}' as i32 as libc::c_char;
/* Process each newline-terminated line in the buffer */
start = bb_common_bufsiz1.as_mut_ptr();
loop {
let mut newline: *mut libc::c_char = strchrnul(start, '\n' as i32);
if *newline as libc::c_int == '\u{0}' as i32 {
/* This line is incomplete */
/* move it to the front of the buffer */
overlapping_strcpy(bb_common_bufsiz1.as_mut_ptr(), start);
used = newline.wrapping_offset_from(start) as libc::c_long as libc::c_int;
if used < KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 {
break;
}
/* buffer is full, log it anyway */
used = 0i32;
newline = 0 as *mut libc::c_char
} else {
let fresh0 = newline;
newline = newline.offset(1);
*fresh0 = '\u{0}' as i32 as libc::c_char
}
/* Extract the priority */
priority = 6i32;
if *start as libc::c_int == '<' as i32 {
start = start.offset(1);
if *start!= 0 {
let mut end: *mut libc::c_char = 0 as *mut libc::c_char;
priority = strtoul(start, &mut end, 10i32) as libc::c_int;
if *end as libc::c_int == '>' as i32 {
end = end.offset(1)
}
start = end
}
}
/* Log (only non-empty lines) */
if *start!= 0 {
syslog(
priority,
b"%s\x00" as *const u8 as *const libc::c_char,
start,
);
}
if newline.is_null() {
break;
}
start = newline
}
}
}
klogd_close();
syslog(
5i32,
b"klogd: exiting\x00" as *const u8 as *const libc::c_char,
);
remove_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char);
if bb_got_signal!= 0 {
kill_myself_with_sig(bb_got_signal as libc::c_int);
}
return 1i32;
} | random_line_split |
|
klogd.rs | use crate::libbb::ptr_to_globals::bb_errno;
use libc;
use libc::openlog;
use libc::syslog;
extern "C" {
#[no_mangle]
fn strtoul(
__nptr: *const libc::c_char,
__endptr: *mut *mut libc::c_char,
__base: libc::c_int,
) -> libc::c_ulong;
#[no_mangle]
fn signal(__sig: libc::c_int, __handler: __sighandler_t) -> __sighandler_t;
#[no_mangle]
fn strchrnul(__s: *const libc::c_char, __c: libc::c_int) -> *mut libc::c_char;
#[no_mangle]
fn klogctl(type_0: libc::c_int, b: *mut libc::c_char, len: libc::c_int) -> libc::c_int;
#[no_mangle]
fn bb_signals_recursive_norestart(
sigs: libc::c_int,
f: Option<unsafe extern "C" fn(_: libc::c_int) -> ()>,
);
#[no_mangle]
fn kill_myself_with_sig(sig: libc::c_int) ->!;
#[no_mangle]
static mut bb_got_signal: smallint;
#[no_mangle]
fn record_signo(signo: libc::c_int);
#[no_mangle]
fn overlapping_strcpy(dst: *mut libc::c_char, src: *const libc::c_char);
#[no_mangle]
fn xatou_range(str: *const libc::c_char, l: libc::c_uint, u: libc::c_uint) -> libc::c_uint;
#[no_mangle]
fn bb_daemonize_or_rexec(flags: libc::c_int);
#[no_mangle]
fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _:...) -> u32;
#[no_mangle]
fn write_pidfile_std_path_and_ext(path: *const libc::c_char);
#[no_mangle]
fn remove_pidfile_std_path_and_ext(path: *const libc::c_char);
#[no_mangle]
static mut logmode: smallint;
#[no_mangle]
fn bb_simple_perror_msg(s: *const libc::c_char);
#[no_mangle]
static bb_banner: [libc::c_char; 0];
#[no_mangle]
static mut bb_common_bufsiz1: [libc::c_char; 0];
}
use crate::librb::signal::__sighandler_t;
use crate::librb::smallint;
pub type C2RustUnnamed = libc::c_uint;
pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054;
pub type C2RustUnnamed_0 = libc::c_uint;
pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8;
pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4;
pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2;
pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1;
pub type C2RustUnnamed_1 = libc::c_uint;
pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3;
pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2;
pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1;
pub const LOGMODE_NONE: C2RustUnnamed_1 = 0;
pub type C2RustUnnamed_2 = libc::c_uint;
pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2;
pub const OPT_LEVEL: C2RustUnnamed_2 = 1;
pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024;
/*
* Mini klogd implementation for busybox
*
* Copyright (C) 2001 by Gennady Feldman <[email protected]>.
* Changes: Made this a standalone busybox module which uses standalone
* syslog() client interface.
*
* Copyright (C) 1999-2004 by Erik Andersen <[email protected]>
*
* Copyright (C) 2000 by Karl M. Hegbloom <[email protected]>
*
* "circular buffer" Copyright (C) 2000 by Gennady Feldman <[email protected]>
*
* Maintainer: Gennady Feldman <[email protected]> as of Mar 12, 2001
*
* Licensed under GPLv2 or later, see file LICENSE in this source tree.
*/
//config:config KLOGD
//config: bool "klogd (5.7 kb)"
//config: default y
//config: help
//config: klogd is a utility which intercepts and logs all
//config: messages from the Linux kernel and sends the messages
//config: out to the'syslogd' utility so they can be logged. If
//config: you wish to record the messages produced by the kernel,
//config: you should enable this option.
//config:
//config:comment "klogd should not be used together with syslog to kernel printk buffer"
//config: depends on KLOGD && FEATURE_KMSG_SYSLOG
//config:
//config:config FEATURE_KLOGD_KLOGCTL
//config: bool "Use the klogctl() interface"
//config: default y
//config: depends on KLOGD
//config: select PLATFORM_LINUX
//config: help
//config: The klogd applet supports two interfaces for reading
//config: kernel messages. Linux provides the klogctl() interface
//config: which allows reading messages from the kernel ring buffer
//config: independently from the file system.
//config:
//config: If you answer 'N' here, klogd will use the more portable
//config: approach of reading them from /proc or a device node.
//config: However, this method requires the file to be available.
//config:
//config: If in doubt, say 'Y'.
//applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP))
//kbuild:lib-$(CONFIG_KLOGD) += klogd.o
//usage:#define klogd_trivial_usage
//usage: "[-c N] [-n]"
//usage:#define klogd_full_usage "\n\n"
//usage: "Log kernel messages to syslog\n"
//usage: "\n -c N Print to console messages more urgent than prio N (1-8)"
//usage: "\n -n Run in foreground"
/* The Linux-specific klogctl(3) interface does not rely on the filesystem and
* allows us to change the console loglevel. Alternatively, we read the
* messages from _PATH_KLOG. */
unsafe extern "C" fn klogd_open() {
/* "Open the log. Currently a NOP" */
klogctl(1i32, 0 as *mut libc::c_char, 0i32);
}
unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) {
/* "printk() prints a message on the console only if it has a loglevel
* less than console_loglevel". Here we set console_loglevel = lvl. */
klogctl(8i32, 0 as *mut libc::c_char, lvl);
}
unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int |
unsafe extern "C" fn klogd_close() {
/* FYI: cmd 7 is equivalent to setting console_loglevel to 7
* via klogctl(8, NULL, 7). */
klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */
klogctl(0i32, 0 as *mut libc::c_char, 0i32);
/* "0 -- Close the log. Currently a NOP" */
}
/* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead,
* because that's how they interpret word "default"
* in the openlog() manpage:
* LOG_USER (default)
* generic user-level messages
* and the fact that LOG_KERN is a constant 0.
* glibc interprets it as "0 in openlog() call means 'use default'".
* I think it means "if openlog wasn't called before syslog() is called,
* use default".
* Convincing glibc maintainers otherwise is, as usual, nearly impossible.
* Should we open-code syslog() here to use correct facility?
*/
#[no_mangle]
pub unsafe extern "C" fn klogd_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut i: libc::c_int = 0i32;
let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt: libc::c_int = 0;
let mut used: libc::c_int = 0;
opt = getopt32(
argv,
b"c:n\x00" as *const u8 as *const libc::c_char,
&mut opt_c as *mut *mut libc::c_char,
) as libc::c_int;
if opt & OPT_LEVEL as libc::c_int!= 0 {
/* Valid levels are between 1 and 8 */
i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int
}
if opt & OPT_FOREGROUND as libc::c_int == 0 {
bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int);
}
logmode = LOGMODE_SYSLOG as libc::c_int as smallint;
/* klogd_open() before openlog(), since it might use fixed fd 3,
* and openlog() also may use the same fd 3 if we swap them:
*/
klogd_open();
openlog(
b"kernel\x00" as *const u8 as *const libc::c_char,
0i32,
0i32 << 3i32,
);
/*
* glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER
* above. The logic behind this is that standard
* http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html
* says the following about openlog and syslog:
* "LOG_USER
* Messages generated by arbitrary processes.
* This is the default facility identifier if none is specified."
*
* I believe glibc misinterpreted this text as "if openlog's
* third parameter is 0 (=LOG_KERN), treat it as LOG_USER".
* Whereas it was meant to say "if *syslog* is called with facility
* 0 in its 1st parameter without prior call to openlog, then perform
* implicit openlog(LOG_USER)".
*
* As a result of this, eh, feature, standard klogd was forced
* to open-code its own openlog and syslog implementation (!).
*
* Note that prohibiting openlog(LOG_KERN) on libc level does not
* add any security: any process can open a socket to "/dev/log"
* and write a string "<0>Voila, a LOG_KERN + LOG_EMERG message"
*
* Google code search tells me there is no widespread use of
* openlog("foo", 0, 0), thus fixing glibc won't break userspace.
*
* The bug against glibc was filed:
* bugzilla.redhat.com/show_bug.cgi?id=547000
*/
if i!= 0 {
klogd_setloglevel(i);
}
signal(
1i32,
::std::mem::transmute::<libc::intptr_t, __sighandler_t>(1i32 as libc::intptr_t),
);
/* We want klogd_read to not be restarted, thus _norestart: */
bb_signals_recursive_norestart(
BB_FATAL_SIGS as libc::c_int,
Some(record_signo as unsafe extern "C" fn(_: libc::c_int) -> ()),
);
syslog(
5i32,
b"klogd started: %s\x00" as *const u8 as *const libc::c_char,
bb_banner.as_ptr(),
);
write_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char);
used = 0i32;
while bb_got_signal == 0 {
let mut n: libc::c_int = 0;
let mut priority: libc::c_int = 0;
let mut start: *mut libc::c_char = 0 as *mut libc::c_char;
start = bb_common_bufsiz1.as_mut_ptr().offset(used as isize);
n = klogd_read(start, KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 - used);
if n < 0i32 {
if *bb_errno == 4i32 {
continue;
}
bb_simple_perror_msg(b"klogctl(2) error\x00" as *const u8 as *const libc::c_char);
break;
} else {
*start.offset(n as isize) = '\u{0}' as i32 as libc::c_char;
/* Process each newline-terminated line in the buffer */
start = bb_common_bufsiz1.as_mut_ptr();
loop {
let mut newline: *mut libc::c_char = strchrnul(start, '\n' as i32);
if *newline as libc::c_int == '\u{0}' as i32 {
/* This line is incomplete */
/* move it to the front of the buffer */
overlapping_strcpy(bb_common_bufsiz1.as_mut_ptr(), start);
used = newline.wrapping_offset_from(start) as libc::c_long as libc::c_int;
if used < KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 {
break;
}
/* buffer is full, log it anyway */
used = 0i32;
newline = 0 as *mut libc::c_char
} else {
let fresh0 = newline;
newline = newline.offset(1);
*fresh0 = '\u{0}' as i32 as libc::c_char
}
/* Extract the priority */
priority = 6i32;
if *start as libc::c_int == '<' as i32 {
start = start.offset(1);
if *start!= 0 {
let mut end: *mut libc::c_char = 0 as *mut libc::c_char;
priority = strtoul(start, &mut end, 10i32) as libc::c_int;
if *end as libc::c_int == '>' as i32 {
end = end.offset(1)
}
start = end
}
}
/* Log (only non-empty lines) */
if *start!= 0 {
syslog(
priority,
b"%s\x00" as *const u8 as *const libc::c_char,
start,
);
}
if newline.is_null() {
break;
}
start = newline
}
}
}
klogd_close();
syslog(
5i32,
b"klogd: exiting\x00" as *const u8 as *const libc::c_char,
);
remove_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char);
if bb_got_signal!= 0 {
kill_myself_with_sig(bb_got_signal as libc::c_int);
}
return 1i32;
}
| {
/* "2 -- Read from the log." */
return klogctl(2i32, bufp, len);
} | identifier_body |
klogd.rs | use crate::libbb::ptr_to_globals::bb_errno;
use libc;
use libc::openlog;
use libc::syslog;
extern "C" {
#[no_mangle]
fn strtoul(
__nptr: *const libc::c_char,
__endptr: *mut *mut libc::c_char,
__base: libc::c_int,
) -> libc::c_ulong;
#[no_mangle]
fn signal(__sig: libc::c_int, __handler: __sighandler_t) -> __sighandler_t;
#[no_mangle]
fn strchrnul(__s: *const libc::c_char, __c: libc::c_int) -> *mut libc::c_char;
#[no_mangle]
fn klogctl(type_0: libc::c_int, b: *mut libc::c_char, len: libc::c_int) -> libc::c_int;
#[no_mangle]
fn bb_signals_recursive_norestart(
sigs: libc::c_int,
f: Option<unsafe extern "C" fn(_: libc::c_int) -> ()>,
);
#[no_mangle]
fn kill_myself_with_sig(sig: libc::c_int) ->!;
#[no_mangle]
static mut bb_got_signal: smallint;
#[no_mangle]
fn record_signo(signo: libc::c_int);
#[no_mangle]
fn overlapping_strcpy(dst: *mut libc::c_char, src: *const libc::c_char);
#[no_mangle]
fn xatou_range(str: *const libc::c_char, l: libc::c_uint, u: libc::c_uint) -> libc::c_uint;
#[no_mangle]
fn bb_daemonize_or_rexec(flags: libc::c_int);
#[no_mangle]
fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _:...) -> u32;
#[no_mangle]
fn write_pidfile_std_path_and_ext(path: *const libc::c_char);
#[no_mangle]
fn remove_pidfile_std_path_and_ext(path: *const libc::c_char);
#[no_mangle]
static mut logmode: smallint;
#[no_mangle]
fn bb_simple_perror_msg(s: *const libc::c_char);
#[no_mangle]
static bb_banner: [libc::c_char; 0];
#[no_mangle]
static mut bb_common_bufsiz1: [libc::c_char; 0];
}
use crate::librb::signal::__sighandler_t;
use crate::librb::smallint;
pub type C2RustUnnamed = libc::c_uint;
pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054;
pub type C2RustUnnamed_0 = libc::c_uint;
pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8;
pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4;
pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2;
pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1;
pub type C2RustUnnamed_1 = libc::c_uint;
pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3;
pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2;
pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1;
pub const LOGMODE_NONE: C2RustUnnamed_1 = 0;
pub type C2RustUnnamed_2 = libc::c_uint;
pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2;
pub const OPT_LEVEL: C2RustUnnamed_2 = 1;
pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024;
/*
* Mini klogd implementation for busybox
*
* Copyright (C) 2001 by Gennady Feldman <[email protected]>.
* Changes: Made this a standalone busybox module which uses standalone
* syslog() client interface.
*
* Copyright (C) 1999-2004 by Erik Andersen <[email protected]>
*
* Copyright (C) 2000 by Karl M. Hegbloom <[email protected]>
*
* "circular buffer" Copyright (C) 2000 by Gennady Feldman <[email protected]>
*
* Maintainer: Gennady Feldman <[email protected]> as of Mar 12, 2001
*
* Licensed under GPLv2 or later, see file LICENSE in this source tree.
*/
//config:config KLOGD
//config: bool "klogd (5.7 kb)"
//config: default y
//config: help
//config: klogd is a utility which intercepts and logs all
//config: messages from the Linux kernel and sends the messages
//config: out to the'syslogd' utility so they can be logged. If
//config: you wish to record the messages produced by the kernel,
//config: you should enable this option.
//config:
//config:comment "klogd should not be used together with syslog to kernel printk buffer"
//config: depends on KLOGD && FEATURE_KMSG_SYSLOG
//config:
//config:config FEATURE_KLOGD_KLOGCTL
//config: bool "Use the klogctl() interface"
//config: default y
//config: depends on KLOGD
//config: select PLATFORM_LINUX
//config: help
//config: The klogd applet supports two interfaces for reading
//config: kernel messages. Linux provides the klogctl() interface
//config: which allows reading messages from the kernel ring buffer
//config: independently from the file system.
//config:
//config: If you answer 'N' here, klogd will use the more portable
//config: approach of reading them from /proc or a device node.
//config: However, this method requires the file to be available.
//config:
//config: If in doubt, say 'Y'.
//applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP))
//kbuild:lib-$(CONFIG_KLOGD) += klogd.o
//usage:#define klogd_trivial_usage
//usage: "[-c N] [-n]"
//usage:#define klogd_full_usage "\n\n"
//usage: "Log kernel messages to syslog\n"
//usage: "\n -c N Print to console messages more urgent than prio N (1-8)"
//usage: "\n -n Run in foreground"
/* The Linux-specific klogctl(3) interface does not rely on the filesystem and
* allows us to change the console loglevel. Alternatively, we read the
* messages from _PATH_KLOG. */
unsafe extern "C" fn klogd_open() {
/* "Open the log. Currently a NOP" */
klogctl(1i32, 0 as *mut libc::c_char, 0i32);
}
unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) {
/* "printk() prints a message on the console only if it has a loglevel
* less than console_loglevel". Here we set console_loglevel = lvl. */
klogctl(8i32, 0 as *mut libc::c_char, lvl);
}
unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int {
/* "2 -- Read from the log." */
return klogctl(2i32, bufp, len);
}
unsafe extern "C" fn klogd_close() {
/* FYI: cmd 7 is equivalent to setting console_loglevel to 7
* via klogctl(8, NULL, 7). */
klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */
klogctl(0i32, 0 as *mut libc::c_char, 0i32);
/* "0 -- Close the log. Currently a NOP" */
}
/* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead,
* because that's how they interpret word "default"
* in the openlog() manpage:
* LOG_USER (default)
* generic user-level messages
* and the fact that LOG_KERN is a constant 0.
* glibc interprets it as "0 in openlog() call means 'use default'".
* I think it means "if openlog wasn't called before syslog() is called,
* use default".
* Convincing glibc maintainers otherwise is, as usual, nearly impossible.
* Should we open-code syslog() here to use correct facility?
*/
#[no_mangle]
pub unsafe extern "C" fn klogd_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut i: libc::c_int = 0i32;
let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt: libc::c_int = 0;
let mut used: libc::c_int = 0;
opt = getopt32(
argv,
b"c:n\x00" as *const u8 as *const libc::c_char,
&mut opt_c as *mut *mut libc::c_char,
) as libc::c_int;
if opt & OPT_LEVEL as libc::c_int!= 0 {
/* Valid levels are between 1 and 8 */
i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int
}
if opt & OPT_FOREGROUND as libc::c_int == 0 {
bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int);
}
logmode = LOGMODE_SYSLOG as libc::c_int as smallint;
/* klogd_open() before openlog(), since it might use fixed fd 3,
* and openlog() also may use the same fd 3 if we swap them:
*/
klogd_open();
openlog(
b"kernel\x00" as *const u8 as *const libc::c_char,
0i32,
0i32 << 3i32,
);
/*
* glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER
* above. The logic behind this is that standard
* http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html
* says the following about openlog and syslog:
* "LOG_USER
* Messages generated by arbitrary processes.
* This is the default facility identifier if none is specified."
*
* I believe glibc misinterpreted this text as "if openlog's
* third parameter is 0 (=LOG_KERN), treat it as LOG_USER".
* Whereas it was meant to say "if *syslog* is called with facility
* 0 in its 1st parameter without prior call to openlog, then perform
* implicit openlog(LOG_USER)".
*
* As a result of this, eh, feature, standard klogd was forced
* to open-code its own openlog and syslog implementation (!).
*
* Note that prohibiting openlog(LOG_KERN) on libc level does not
* add any security: any process can open a socket to "/dev/log"
* and write a string "<0>Voila, a LOG_KERN + LOG_EMERG message"
*
* Google code search tells me there is no widespread use of
* openlog("foo", 0, 0), thus fixing glibc won't break userspace.
*
* The bug against glibc was filed:
* bugzilla.redhat.com/show_bug.cgi?id=547000
*/
if i!= 0 {
klogd_setloglevel(i);
}
signal(
1i32,
::std::mem::transmute::<libc::intptr_t, __sighandler_t>(1i32 as libc::intptr_t),
);
/* We want klogd_read to not be restarted, thus _norestart: */
bb_signals_recursive_norestart(
BB_FATAL_SIGS as libc::c_int,
Some(record_signo as unsafe extern "C" fn(_: libc::c_int) -> ()),
);
syslog(
5i32,
b"klogd started: %s\x00" as *const u8 as *const libc::c_char,
bb_banner.as_ptr(),
);
write_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char);
used = 0i32;
while bb_got_signal == 0 {
let mut n: libc::c_int = 0;
let mut priority: libc::c_int = 0;
let mut start: *mut libc::c_char = 0 as *mut libc::c_char;
start = bb_common_bufsiz1.as_mut_ptr().offset(used as isize);
n = klogd_read(start, KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 - used);
if n < 0i32 {
if *bb_errno == 4i32 {
continue;
}
bb_simple_perror_msg(b"klogctl(2) error\x00" as *const u8 as *const libc::c_char);
break;
} else | *fresh0 = '\u{0}' as i32 as libc::c_char
}
/* Extract the priority */
priority = 6i32;
if *start as libc::c_int == '<' as i32 {
start = start.offset(1);
if *start!= 0 {
let mut end: *mut libc::c_char = 0 as *mut libc::c_char;
priority = strtoul(start, &mut end, 10i32) as libc::c_int;
if *end as libc::c_int == '>' as i32 {
end = end.offset(1)
}
start = end
}
}
/* Log (only non-empty lines) */
if *start!= 0 {
syslog(
priority,
b"%s\x00" as *const u8 as *const libc::c_char,
start,
);
}
if newline.is_null() {
break;
}
start = newline
}
}
}
klogd_close();
syslog(
5i32,
b"klogd: exiting\x00" as *const u8 as *const libc::c_char,
);
remove_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char);
if bb_got_signal!= 0 {
kill_myself_with_sig(bb_got_signal as libc::c_int);
}
return 1i32;
}
| {
*start.offset(n as isize) = '\u{0}' as i32 as libc::c_char;
/* Process each newline-terminated line in the buffer */
start = bb_common_bufsiz1.as_mut_ptr();
loop {
let mut newline: *mut libc::c_char = strchrnul(start, '\n' as i32);
if *newline as libc::c_int == '\u{0}' as i32 {
/* This line is incomplete */
/* move it to the front of the buffer */
overlapping_strcpy(bb_common_bufsiz1.as_mut_ptr(), start);
used = newline.wrapping_offset_from(start) as libc::c_long as libc::c_int;
if used < KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 {
break;
}
/* buffer is full, log it anyway */
used = 0i32;
newline = 0 as *mut libc::c_char
} else {
let fresh0 = newline;
newline = newline.offset(1); | conditional_block |
protocol_adapter.rs | use crate::HandlerError;
use bigdecimal::{BigDecimal, FromPrimitive};
use graphql_parser::query::{
Definition, Document, OperationDefinition, Selection as GqlSelection, SelectionSet, Value,
};
use query_core::query_document::*;
/// Protocol adapter for GraphQL -> Query Document.
///
/// GraphQL is mapped as following:
/// - Every field of a `query {... }` or single selection block `{... }` is mapped to an `Operation::Read`.
/// - Every field of a single `mutation {... }` is mapped to an `Operation::Write`.
/// - If the JSON payload specifies an operation name, only that specific operation is picked and the rest ignored.
/// - Fields on the queries are mapped to `Field`s, including arguments.
/// - Concrete values (e.g. in arguments) are mapped to `ArgumentValue`s.
///
/// Currently unsupported features:
/// - Fragments in any form.
/// - Variables.
/// - Subscription queries.
/// - Query names are ignored
pub struct GraphQLProtocolAdapter;
impl GraphQLProtocolAdapter {
pub fn convert_query_to_operation(query: &str, operation_name: Option<String>) -> crate::Result<Operation> {
let gql_doc = match graphql_parser::parse_query(query) {
Ok(doc) => doc,
Err(err)
if err.to_string().contains("number too large to fit in target type")
| err.to_string().contains("number too small to fit in target type") =>
{
return Err(HandlerError::ValueFitError("Query parsing failure: A number used in the query does not fit into a 64 bit signed integer. Consider using `BigInt` as field type if you're trying to store large integers.".to_owned()));
}
err @ Err(_) => err?,
};
Self::convert(gql_doc, operation_name)
}
pub fn convert(gql_doc: Document<String>, operation: Option<String>) -> crate::Result<Operation> {
let mut operations: Vec<Operation> = match operation {
Some(ref op) => gql_doc
.definitions
.into_iter()
.find(|def| Self::matches_operation(def, op))
.ok_or_else(|| HandlerError::query_conversion(format!("Operation '{op}' does not match any query.")))
.and_then(Self::convert_definition),
None => gql_doc
.definitions
.into_iter()
.map(Self::convert_definition)
.collect::<crate::Result<Vec<Vec<Operation>>>>()
.map(|r| r.into_iter().flatten().collect::<Vec<Operation>>()),
}?;
let operation = operations
.pop()
.ok_or_else(|| HandlerError::query_conversion("Document contained no operations."))?
.dedup_selections();
Ok(operation)
}
fn convert_definition(def: Definition<String>) -> crate::Result<Vec<Operation>> |
fn convert_query(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Read).collect())
}
fn convert_mutation(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Write).collect())
}
fn convert_selection_set(selection_set: SelectionSet<String>) -> crate::Result<Vec<Selection>> {
selection_set
.items
.into_iter()
.map(|item| match item {
GqlSelection::Field(f) => {
let arguments: Vec<(String, ArgumentValue)> = f
.arguments
.into_iter()
.map(|(k, v)| Ok((k, Self::convert_value(v)?)))
.collect::<crate::Result<Vec<_>>>()?;
let nested_selections = Self::convert_selection_set(f.selection_set)?;
Ok(Selection::new(f.name, f.alias, arguments, nested_selections))
}
GqlSelection::FragmentSpread(fs) => Err(HandlerError::unsupported_feature(
"Fragment spread",
format!("Fragment '{}', at position {}.", fs.fragment_name, fs.position),
)),
GqlSelection::InlineFragment(i) => Err(HandlerError::unsupported_feature(
"Inline fragment",
format!("At position {}.", i.position),
)),
})
.collect()
}
/// Checks if the given GraphQL definition matches the operation name that should be executed.
fn matches_operation(def: &Definition<String>, operation: &str) -> bool {
let check = |n: Option<&String>| n.filter(|name| name.as_str() == operation).is_some();
match def {
Definition::Fragment(_) => false,
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => check(s.name.as_ref()),
OperationDefinition::SelectionSet(_) => false,
OperationDefinition::Query(q) => check(q.name.as_ref()),
OperationDefinition::Mutation(m) => check(m.name.as_ref()),
},
}
}
fn convert_value(value: Value<String>) -> crate::Result<ArgumentValue> {
match value {
Value::Variable(name) => Err(HandlerError::unsupported_feature(
"Variable usage",
format!("Variable '{name}'."),
)),
Value::Int(i) => match i.as_i64() {
Some(i) => Ok(ArgumentValue::int(i)),
None => Err(HandlerError::query_conversion(format!("Invalid 64 bit integer: {i:?}"))),
},
Value::Float(f) => match BigDecimal::from_f64(f) {
Some(dec) => Ok(ArgumentValue::float(dec)),
None => Err(HandlerError::query_conversion(format!("invalid 64-bit float: {f:?}"))),
},
Value::String(s) => Ok(ArgumentValue::string(s)),
Value::Boolean(b) => Ok(ArgumentValue::bool(b)),
Value::Null => Ok(ArgumentValue::null()),
Value::Enum(e) => Ok(ArgumentValue::r#enum(e)),
Value::List(values) => {
let values: Vec<ArgumentValue> = values
.into_iter()
.map(Self::convert_value)
.collect::<crate::Result<Vec<ArgumentValue>>>()?;
Ok(ArgumentValue::list(values))
}
Value::Object(map) => {
let values = map
.into_iter()
.map(|(k, v)| Self::convert_value(v).map(|v| (k, v)))
.collect::<crate::Result<ArgumentValueObject>>()?;
Ok(ArgumentValue::object(values))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn converts_single_query() {
let query = r#"
query findTheModelOperation {
findOneModel(where: {a_number: {gte: 1}}) {
id,
large_number,
other {
name
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "findOneModel");
assert!(matches!(operation, Operation::Read(_)));
let read = operation.into_read().unwrap();
let where_args = ArgumentValue::object([(
"a_number".to_string(),
ArgumentValue::object([("gte".to_string(), ArgumentValue::int(1))]),
)]);
assert_eq!(read.arguments(), [("where".to_string(), where_args)]);
let selections = Vec::from([
Selection::new("id", None, [], Vec::new()),
Selection::new("large_number", None, [], Vec::new()),
Selection::new("other", None, [], Vec::from([Selection::new("name", None, [], [])])),
]);
assert_eq!(read.nested_selections(), selections);
}
#[test]
fn converts_single_mutation() {
let query = r#"
mutation {
createOnePost(data: {
id: 1,
categories: {create: [{id: 1}, {id: 2}]}
}) {
id,
categories {
id
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "createOnePost");
assert!(matches!(operation, Operation::Write(_)));
let write = operation.into_write().unwrap();
let data_args = ArgumentValue::object([
("id".to_string(), ArgumentValue::int(1)),
(
"categories".to_string(),
ArgumentValue::object([(
"create".to_string(),
ArgumentValue::list([
ArgumentValue::object([("id".to_string(), ArgumentValue::int(1))]),
ArgumentValue::object([("id".to_string(), ArgumentValue::int(2))]),
]),
)]),
),
]);
println!("args {:?}", write.arguments());
assert_eq!(write.arguments(), [("data".to_string(), data_args)]);
}
}
| {
match def {
Definition::Fragment(f) => Err(HandlerError::unsupported_feature(
"Fragment definition",
format!("Fragment '{}', at position {}.", f.name, f.position),
)),
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => Err(HandlerError::unsupported_feature(
"Subscription query",
format!("At position {}.", s.position),
)),
OperationDefinition::SelectionSet(s) => Self::convert_query(s),
OperationDefinition::Query(q) => Self::convert_query(q.selection_set),
OperationDefinition::Mutation(m) => Self::convert_mutation(m.selection_set),
},
}
} | identifier_body |
protocol_adapter.rs | use crate::HandlerError;
use bigdecimal::{BigDecimal, FromPrimitive};
use graphql_parser::query::{
Definition, Document, OperationDefinition, Selection as GqlSelection, SelectionSet, Value,
};
use query_core::query_document::*;
/// Protocol adapter for GraphQL -> Query Document.
///
/// GraphQL is mapped as following:
/// - Every field of a `query {... }` or single selection block `{... }` is mapped to an `Operation::Read`.
/// - Every field of a single `mutation {... }` is mapped to an `Operation::Write`.
/// - If the JSON payload specifies an operation name, only that specific operation is picked and the rest ignored.
/// - Fields on the queries are mapped to `Field`s, including arguments.
/// - Concrete values (e.g. in arguments) are mapped to `ArgumentValue`s.
///
/// Currently unsupported features:
/// - Fragments in any form.
/// - Variables.
/// - Subscription queries.
/// - Query names are ignored
pub struct GraphQLProtocolAdapter;
impl GraphQLProtocolAdapter {
pub fn convert_query_to_operation(query: &str, operation_name: Option<String>) -> crate::Result<Operation> {
let gql_doc = match graphql_parser::parse_query(query) {
Ok(doc) => doc,
Err(err)
if err.to_string().contains("number too large to fit in target type")
| err.to_string().contains("number too small to fit in target type") =>
{
return Err(HandlerError::ValueFitError("Query parsing failure: A number used in the query does not fit into a 64 bit signed integer. Consider using `BigInt` as field type if you're trying to store large integers.".to_owned()));
}
err @ Err(_) => err?,
};
Self::convert(gql_doc, operation_name)
}
pub fn | (gql_doc: Document<String>, operation: Option<String>) -> crate::Result<Operation> {
let mut operations: Vec<Operation> = match operation {
Some(ref op) => gql_doc
.definitions
.into_iter()
.find(|def| Self::matches_operation(def, op))
.ok_or_else(|| HandlerError::query_conversion(format!("Operation '{op}' does not match any query.")))
.and_then(Self::convert_definition),
None => gql_doc
.definitions
.into_iter()
.map(Self::convert_definition)
.collect::<crate::Result<Vec<Vec<Operation>>>>()
.map(|r| r.into_iter().flatten().collect::<Vec<Operation>>()),
}?;
let operation = operations
.pop()
.ok_or_else(|| HandlerError::query_conversion("Document contained no operations."))?
.dedup_selections();
Ok(operation)
}
fn convert_definition(def: Definition<String>) -> crate::Result<Vec<Operation>> {
match def {
Definition::Fragment(f) => Err(HandlerError::unsupported_feature(
"Fragment definition",
format!("Fragment '{}', at position {}.", f.name, f.position),
)),
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => Err(HandlerError::unsupported_feature(
"Subscription query",
format!("At position {}.", s.position),
)),
OperationDefinition::SelectionSet(s) => Self::convert_query(s),
OperationDefinition::Query(q) => Self::convert_query(q.selection_set),
OperationDefinition::Mutation(m) => Self::convert_mutation(m.selection_set),
},
}
}
fn convert_query(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Read).collect())
}
fn convert_mutation(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Write).collect())
}
fn convert_selection_set(selection_set: SelectionSet<String>) -> crate::Result<Vec<Selection>> {
selection_set
.items
.into_iter()
.map(|item| match item {
GqlSelection::Field(f) => {
let arguments: Vec<(String, ArgumentValue)> = f
.arguments
.into_iter()
.map(|(k, v)| Ok((k, Self::convert_value(v)?)))
.collect::<crate::Result<Vec<_>>>()?;
let nested_selections = Self::convert_selection_set(f.selection_set)?;
Ok(Selection::new(f.name, f.alias, arguments, nested_selections))
}
GqlSelection::FragmentSpread(fs) => Err(HandlerError::unsupported_feature(
"Fragment spread",
format!("Fragment '{}', at position {}.", fs.fragment_name, fs.position),
)),
GqlSelection::InlineFragment(i) => Err(HandlerError::unsupported_feature(
"Inline fragment",
format!("At position {}.", i.position),
)),
})
.collect()
}
/// Checks if the given GraphQL definition matches the operation name that should be executed.
fn matches_operation(def: &Definition<String>, operation: &str) -> bool {
let check = |n: Option<&String>| n.filter(|name| name.as_str() == operation).is_some();
match def {
Definition::Fragment(_) => false,
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => check(s.name.as_ref()),
OperationDefinition::SelectionSet(_) => false,
OperationDefinition::Query(q) => check(q.name.as_ref()),
OperationDefinition::Mutation(m) => check(m.name.as_ref()),
},
}
}
fn convert_value(value: Value<String>) -> crate::Result<ArgumentValue> {
match value {
Value::Variable(name) => Err(HandlerError::unsupported_feature(
"Variable usage",
format!("Variable '{name}'."),
)),
Value::Int(i) => match i.as_i64() {
Some(i) => Ok(ArgumentValue::int(i)),
None => Err(HandlerError::query_conversion(format!("Invalid 64 bit integer: {i:?}"))),
},
Value::Float(f) => match BigDecimal::from_f64(f) {
Some(dec) => Ok(ArgumentValue::float(dec)),
None => Err(HandlerError::query_conversion(format!("invalid 64-bit float: {f:?}"))),
},
Value::String(s) => Ok(ArgumentValue::string(s)),
Value::Boolean(b) => Ok(ArgumentValue::bool(b)),
Value::Null => Ok(ArgumentValue::null()),
Value::Enum(e) => Ok(ArgumentValue::r#enum(e)),
Value::List(values) => {
let values: Vec<ArgumentValue> = values
.into_iter()
.map(Self::convert_value)
.collect::<crate::Result<Vec<ArgumentValue>>>()?;
Ok(ArgumentValue::list(values))
}
Value::Object(map) => {
let values = map
.into_iter()
.map(|(k, v)| Self::convert_value(v).map(|v| (k, v)))
.collect::<crate::Result<ArgumentValueObject>>()?;
Ok(ArgumentValue::object(values))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn converts_single_query() {
let query = r#"
query findTheModelOperation {
findOneModel(where: {a_number: {gte: 1}}) {
id,
large_number,
other {
name
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "findOneModel");
assert!(matches!(operation, Operation::Read(_)));
let read = operation.into_read().unwrap();
let where_args = ArgumentValue::object([(
"a_number".to_string(),
ArgumentValue::object([("gte".to_string(), ArgumentValue::int(1))]),
)]);
assert_eq!(read.arguments(), [("where".to_string(), where_args)]);
let selections = Vec::from([
Selection::new("id", None, [], Vec::new()),
Selection::new("large_number", None, [], Vec::new()),
Selection::new("other", None, [], Vec::from([Selection::new("name", None, [], [])])),
]);
assert_eq!(read.nested_selections(), selections);
}
#[test]
fn converts_single_mutation() {
let query = r#"
mutation {
createOnePost(data: {
id: 1,
categories: {create: [{id: 1}, {id: 2}]}
}) {
id,
categories {
id
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "createOnePost");
assert!(matches!(operation, Operation::Write(_)));
let write = operation.into_write().unwrap();
let data_args = ArgumentValue::object([
("id".to_string(), ArgumentValue::int(1)),
(
"categories".to_string(),
ArgumentValue::object([(
"create".to_string(),
ArgumentValue::list([
ArgumentValue::object([("id".to_string(), ArgumentValue::int(1))]),
ArgumentValue::object([("id".to_string(), ArgumentValue::int(2))]),
]),
)]),
),
]);
println!("args {:?}", write.arguments());
assert_eq!(write.arguments(), [("data".to_string(), data_args)]);
}
}
| convert | identifier_name |
protocol_adapter.rs | use crate::HandlerError;
use bigdecimal::{BigDecimal, FromPrimitive};
use graphql_parser::query::{
Definition, Document, OperationDefinition, Selection as GqlSelection, SelectionSet, Value,
};
use query_core::query_document::*;
/// Protocol adapter for GraphQL -> Query Document.
///
/// GraphQL is mapped as following:
/// - Every field of a `query {... }` or single selection block `{... }` is mapped to an `Operation::Read`.
/// - Every field of a single `mutation {... }` is mapped to an `Operation::Write`.
/// - If the JSON payload specifies an operation name, only that specific operation is picked and the rest ignored.
/// - Fields on the queries are mapped to `Field`s, including arguments.
/// - Concrete values (e.g. in arguments) are mapped to `ArgumentValue`s.
///
/// Currently unsupported features:
/// - Fragments in any form.
/// - Variables.
/// - Subscription queries.
/// - Query names are ignored
pub struct GraphQLProtocolAdapter;
impl GraphQLProtocolAdapter {
pub fn convert_query_to_operation(query: &str, operation_name: Option<String>) -> crate::Result<Operation> {
let gql_doc = match graphql_parser::parse_query(query) {
Ok(doc) => doc,
Err(err)
if err.to_string().contains("number too large to fit in target type")
| err.to_string().contains("number too small to fit in target type") =>
{
return Err(HandlerError::ValueFitError("Query parsing failure: A number used in the query does not fit into a 64 bit signed integer. Consider using `BigInt` as field type if you're trying to store large integers.".to_owned()));
}
err @ Err(_) => err?,
};
Self::convert(gql_doc, operation_name)
}
pub fn convert(gql_doc: Document<String>, operation: Option<String>) -> crate::Result<Operation> {
let mut operations: Vec<Operation> = match operation {
Some(ref op) => gql_doc
.definitions
.into_iter()
.find(|def| Self::matches_operation(def, op))
.ok_or_else(|| HandlerError::query_conversion(format!("Operation '{op}' does not match any query.")))
.and_then(Self::convert_definition),
None => gql_doc
.definitions
.into_iter()
.map(Self::convert_definition)
.collect::<crate::Result<Vec<Vec<Operation>>>>()
.map(|r| r.into_iter().flatten().collect::<Vec<Operation>>()),
}?;
let operation = operations
.pop()
.ok_or_else(|| HandlerError::query_conversion("Document contained no operations."))?
.dedup_selections();
Ok(operation)
}
fn convert_definition(def: Definition<String>) -> crate::Result<Vec<Operation>> {
match def {
Definition::Fragment(f) => Err(HandlerError::unsupported_feature(
"Fragment definition",
format!("Fragment '{}', at position {}.", f.name, f.position),
)),
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => Err(HandlerError::unsupported_feature(
"Subscription query",
format!("At position {}.", s.position),
)),
OperationDefinition::SelectionSet(s) => Self::convert_query(s),
OperationDefinition::Query(q) => Self::convert_query(q.selection_set),
OperationDefinition::Mutation(m) => Self::convert_mutation(m.selection_set),
},
}
}
fn convert_query(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Read).collect())
}
fn convert_mutation(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Write).collect())
}
fn convert_selection_set(selection_set: SelectionSet<String>) -> crate::Result<Vec<Selection>> {
selection_set
.items
.into_iter()
.map(|item| match item {
GqlSelection::Field(f) => {
let arguments: Vec<(String, ArgumentValue)> = f
.arguments
.into_iter()
.map(|(k, v)| Ok((k, Self::convert_value(v)?)))
.collect::<crate::Result<Vec<_>>>()?;
let nested_selections = Self::convert_selection_set(f.selection_set)?;
Ok(Selection::new(f.name, f.alias, arguments, nested_selections))
}
GqlSelection::FragmentSpread(fs) => Err(HandlerError::unsupported_feature(
"Fragment spread",
format!("Fragment '{}', at position {}.", fs.fragment_name, fs.position),
)),
GqlSelection::InlineFragment(i) => Err(HandlerError::unsupported_feature(
"Inline fragment",
format!("At position {}.", i.position),
)),
})
.collect()
}
/// Checks if the given GraphQL definition matches the operation name that should be executed.
fn matches_operation(def: &Definition<String>, operation: &str) -> bool {
let check = |n: Option<&String>| n.filter(|name| name.as_str() == operation).is_some();
match def {
Definition::Fragment(_) => false,
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => check(s.name.as_ref()),
OperationDefinition::SelectionSet(_) => false,
OperationDefinition::Query(q) => check(q.name.as_ref()),
OperationDefinition::Mutation(m) => check(m.name.as_ref()),
},
}
}
fn convert_value(value: Value<String>) -> crate::Result<ArgumentValue> {
match value {
Value::Variable(name) => Err(HandlerError::unsupported_feature(
"Variable usage",
format!("Variable '{name}'."),
)),
Value::Int(i) => match i.as_i64() {
Some(i) => Ok(ArgumentValue::int(i)),
None => Err(HandlerError::query_conversion(format!("Invalid 64 bit integer: {i:?}"))),
},
Value::Float(f) => match BigDecimal::from_f64(f) {
Some(dec) => Ok(ArgumentValue::float(dec)),
None => Err(HandlerError::query_conversion(format!("invalid 64-bit float: {f:?}"))),
},
Value::String(s) => Ok(ArgumentValue::string(s)),
Value::Boolean(b) => Ok(ArgumentValue::bool(b)),
Value::Null => Ok(ArgumentValue::null()),
Value::Enum(e) => Ok(ArgumentValue::r#enum(e)),
Value::List(values) => {
let values: Vec<ArgumentValue> = values
.into_iter()
.map(Self::convert_value)
.collect::<crate::Result<Vec<ArgumentValue>>>()?;
Ok(ArgumentValue::list(values))
}
Value::Object(map) => {
let values = map
.into_iter()
.map(|(k, v)| Self::convert_value(v).map(|v| (k, v)))
.collect::<crate::Result<ArgumentValueObject>>()?;
Ok(ArgumentValue::object(values))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn converts_single_query() {
let query = r#"
query findTheModelOperation {
findOneModel(where: {a_number: {gte: 1}}) {
id,
large_number,
other {
name
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "findOneModel");
assert!(matches!(operation, Operation::Read(_)));
let read = operation.into_read().unwrap();
let where_args = ArgumentValue::object([(
"a_number".to_string(),
ArgumentValue::object([("gte".to_string(), ArgumentValue::int(1))]),
)]);
assert_eq!(read.arguments(), [("where".to_string(), where_args)]);
let selections = Vec::from([
Selection::new("id", None, [], Vec::new()),
Selection::new("large_number", None, [], Vec::new()),
Selection::new("other", None, [], Vec::from([Selection::new("name", None, [], [])])),
]);
assert_eq!(read.nested_selections(), selections);
}
#[test]
fn converts_single_mutation() {
let query = r#"
mutation {
createOnePost(data: {
id: 1,
categories: {create: [{id: 1}, {id: 2}]}
}) {
id,
categories {
id
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "createOnePost");
assert!(matches!(operation, Operation::Write(_)));
| (
"categories".to_string(),
ArgumentValue::object([(
"create".to_string(),
ArgumentValue::list([
ArgumentValue::object([("id".to_string(), ArgumentValue::int(1))]),
ArgumentValue::object([("id".to_string(), ArgumentValue::int(2))]),
]),
)]),
),
]);
println!("args {:?}", write.arguments());
assert_eq!(write.arguments(), [("data".to_string(), data_args)]);
}
} | let write = operation.into_write().unwrap();
let data_args = ArgumentValue::object([
("id".to_string(), ArgumentValue::int(1)), | random_line_split |
parser.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (grammar) BOOLOP STRLEN FILETEST FILEOP INTOP STRINGOP ; (vars) LParen StrlenOp
use std::ffi::{OsStr, OsString};
use std::iter::Peekable;
use super::error::{ParseError, ParseResult};
use uucore::display::Quotable;
/// Represents one of the binary comparison operators for strings, integers, or files
#[derive(Debug, PartialEq, Eq)]
pub enum Operator {
String(OsString),
Int(OsString),
File(OsString),
}
/// Represents one of the unary test operators for strings or files
#[derive(Debug, PartialEq, Eq)]
pub enum UnaryOperator {
StrlenOp(OsString),
FiletestOp(OsString),
}
/// Represents a parsed token from a test expression
#[derive(Debug, PartialEq, Eq)]
pub enum Symbol {
LParen,
Bang,
BoolOp(OsString),
Literal(OsString),
Op(Operator),
UnaryOp(UnaryOperator),
None,
}
impl Symbol {
/// Create a new Symbol from an OsString.
///
/// Returns Symbol::None in place of None
fn new(token: Option<OsString>) -> Self {
match token {
Some(s) => match s.to_str() {
Some(t) => match t {
"(" => Self::LParen,
"!" => Self::Bang,
"-a" | "-o" => Self::BoolOp(s),
"=" | "==" | "!=" => Self::Op(Operator::String(s)),
"-eq" | "-ge" | "-gt" | "-le" | "-lt" | "-ne" => Self::Op(Operator::Int(s)),
"-ef" | "-nt" | "-ot" => Self::Op(Operator::File(s)),
"-n" | "-z" => Self::UnaryOp(UnaryOperator::StrlenOp(s)),
"-b" | "-c" | "-d" | "-e" | "-f" | "-g" | "-G" | "-h" | "-k" | "-L" | "-N"
| "-O" | "-p" | "-r" | "-s" | "-S" | "-t" | "-u" | "-w" | "-x" => {
Self::UnaryOp(UnaryOperator::FiletestOp(s))
}
_ => Self::Literal(s),
},
None => Self::Literal(s),
},
None => Self::None,
}
}
/// Convert this Symbol into a Symbol::Literal, useful for cases where
/// test treats an operator as a string operand (test has no reserved
/// words).
///
/// # Panics
///
/// Panics if `self` is Symbol::None
fn into_literal(self) -> Self {
Self::Literal(match self {
Self::LParen => OsString::from("("),
Self::Bang => OsString::from("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => s,
Self::None => panic!(),
})
}
}
/// Implement Display trait for Symbol to make it easier to print useful errors.
/// We will try to match the format in which the symbol appears in the input.
impl std::fmt::Display for Symbol {
/// Format a Symbol for printing
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match &self {
Self::LParen => OsStr::new("("),
Self::Bang => OsStr::new("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => OsStr::new(s),
Self::None => OsStr::new("None"),
};
write!(f, "{}", s.quote())
}
}
/// Recursive descent parser for test, which converts a list of OsStrings
/// (typically command line arguments) into a stack of Symbols in postfix
/// order.
///
/// Grammar:
///
/// EXPR → TERM | EXPR BOOLOP EXPR
/// TERM → ( EXPR )
/// TERM →! EXPR
/// TERM → UOP str
/// UOP → STRLEN | FILETEST
/// TERM → str OP str
/// TERM → str | 𝜖
/// OP → STRINGOP | INTOP | FILEOP
/// STRINGOP → = | == |!=
/// INTOP → -eq | -ge | -gt | -le | -lt | -ne
/// FILEOP → -ef | -nt | -ot
/// STRLEN → -n | -z
/// FILETEST → -b | -c | -d | -e | -f | -g | -G | -h | -k | -L | -N | -O | -p |
/// -r | -s | -S | -t | -u | -w | -x
/// BOOLOP → -a | -o
///
#[derive(Debug)]
struct Parser {
tokens: Peekable<std::vec::IntoIter<OsString>>,
pub stack: Vec<Symbol>,
}
impl Parser {
/// Construct a new Parser from a `Vec<OsString>` of tokens.
fn new(tokens: Vec<OsString>) -> Self {
Self {
tokens: tokens.into_iter().peekable(),
stack: vec![],
}
}
/// Fetch the next token from the input stream as a Symbol.
fn next_token(&mut self) -> Symbol {
Symbol::new(self.tokens.next())
}
/// Consume the next token & verify that it matches the provided value.
fn expect(&mut self, value: &str) -> ParseResult<()> {
match self.next_token() {
Symbol::Literal(s) if s == value => Ok(()),
_ => Err(ParseError::Expected(value.quote().to_string())),
}
}
/// Peek at the next token from the input stream, returning it as a Symbol.
/// The stream is unchanged and will return the same Symbol on subsequent
/// calls to `next()` or `peek()`.
fn peek(&mut self) -> Symbol {
Symbol::new(self.tokens.peek().map(|s| s.to_os_string()))
}
/// Test if the next token in the stream is a BOOLOP (-a or -o), without
/// removing the token from the stream.
fn peek_is_boolop(&mut self) -> bool {
matches!(self.peek(), Symbol::BoolOp(_))
}
/// Parse an expression.
///
/// EXPR → TERM | EXPR BOOLOP EXPR
fn expr(&mut self) -> ParseResult<()> {
if!self.peek_is_boolop() {
self.term()?;
}
self.maybe_boolop()?;
Ok(())
}
/// Parse a term token and possible subsequent symbols: "(", "!", UOP,
/// literal, or None.
fn term(&mut self) -> ParseResult<()> {
let symbol = self.next_token();
match symbol {
Symbol::LParen => self.lparen()?,
Symbol::Bang => self.bang()?,
Symbol::UnaryOp(_) => self.uop(symbol),
Symbol::None => self.stack.push(symbol),
literal => self.literal(literal)?,
}
Ok(())
}
/// Parse a (possibly) parenthesized expression.
///
/// test has no reserved keywords, so "(" will be interpreted as a literal
/// in certain cases:
///
/// * when found at the end of the token stream
/// * when followed by a binary operator that is not _itself_ interpreted | /// as a literal
///
fn lparen(&mut self) -> ParseResult<()> {
// Look ahead up to 3 tokens to determine if the lparen is being used
// as a grouping operator or should be treated as a literal string
let peek3: Vec<Symbol> = self
.tokens
.clone()
.take(3)
.map(|token| Symbol::new(Some(token)))
.collect();
match peek3.as_slice() {
// case 1: lparen is a literal when followed by nothing
[] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 2: error if end of stream is `( <any_token>`
[symbol] => Err(ParseError::MissingArgument(format!("{symbol}"))),
// case 3: `( uop <any_token> )` → parenthesized unary operation;
// this case ensures we don’t get confused by `( -f ) )`
// or `( -f ( )`, for example
[Symbol::UnaryOp(_), _, Symbol::Literal(s)] if s == ")" => {
let symbol = self.next_token();
self.uop(symbol);
self.expect(")")?;
Ok(())
}
// case 4: binary comparison of literal lparen, e.g. `(!= )`
[Symbol::Op(_), Symbol::Literal(s)] | [Symbol::Op(_), Symbol::Literal(s), _]
if s == ")" =>
{
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 5: after handling the prior cases, any single token inside
// parentheses is a literal, e.g. `( -f )`
[_, Symbol::Literal(s)] | [_, Symbol::Literal(s), _] if s == ")" => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 6: two binary ops in a row, treat the first op as a literal
[Symbol::Op(_), Symbol::Op(_), _] => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 7: if earlier cases didn’t match, `( op <any_token>…`
// indicates binary comparison of literal lparen with
// anything _except_ ")" (case 4)
[Symbol::Op(_), _] | [Symbol::Op(_), _, _] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// Otherwise, lparen indicates the start of a parenthesized
// expression
_ => {
self.expr()?;
self.expect(")")?;
Ok(())
}
}
}
/// Parse a (possibly) negated expression.
///
/// Example cases:
///
/// * `! =`: negate the result of the implicit string length test of `=`
/// * `! = foo`: compare the literal strings `!` and `foo`
/// * `! = = str`: negate comparison of literal `=` and `str`
/// * `!`: bang followed by nothing is literal
/// * `! EXPR`: negate the result of the expression
///
/// Combined Boolean & negation:
///
/// * `! ( EXPR ) [BOOLOP EXPR]`: negate the parenthesized expression only
/// * `! UOP str BOOLOP EXPR`: negate the unary subexpression
/// * `! str BOOLOP str`: negate the entire Boolean expression
/// * `! str BOOLOP EXPR BOOLOP EXPR`: negate the value of the first `str` term
///
fn bang(&mut self) -> ParseResult<()> {
match self.peek() {
Symbol::Op(_) | Symbol::BoolOp(_) => {
// we need to peek ahead one more token to disambiguate the first
// three cases listed above
let peek2 = Symbol::new(self.tokens.clone().nth(1));
match peek2 {
// case 1: `! <OP as literal>`
// case 3: `! = OP str`
Symbol::Op(_) | Symbol::None => {
// op is literal
let op = self.next_token().into_literal();
self.literal(op)?;
self.stack.push(Symbol::Bang);
}
// case 2: `<! as literal> OP str [BOOLOP EXPR]`.
_ => {
// bang is literal; parsing continues with op
self.literal(Symbol::Bang.into_literal())?;
self.maybe_boolop()?;
}
}
}
// bang followed by nothing is literal
Symbol::None => self.stack.push(Symbol::Bang.into_literal()),
_ => {
// peek ahead up to 4 tokens to determine if we need to negate
// the entire expression or just the first term
let peek4: Vec<Symbol> = self
.tokens
.clone()
.take(4)
.map(|token| Symbol::new(Some(token)))
.collect();
match peek4.as_slice() {
// we peeked ahead 4 but there were only 3 tokens left
[Symbol::Literal(_), Symbol::BoolOp(_), Symbol::Literal(_)] => {
self.expr()?;
self.stack.push(Symbol::Bang);
}
_ => {
self.term()?;
self.stack.push(Symbol::Bang);
}
}
}
}
Ok(())
}
/// Peek at the next token and parse it as a BOOLOP or string literal,
/// as appropriate.
fn maybe_boolop(&mut self) -> ParseResult<()> {
if self.peek_is_boolop() {
let symbol = self.next_token();
// BoolOp by itself interpreted as Literal
if let Symbol::None = self.peek() {
self.literal(symbol.into_literal())?;
} else {
self.boolop(symbol)?;
self.maybe_boolop()?;
}
}
Ok(())
}
/// Parse a Boolean expression.
///
/// Logical and (-a) has higher precedence than or (-o), so in an
/// expression like `foo -o '' -a ''`, the and subexpression is evaluated
/// first.
fn boolop(&mut self, op: Symbol) -> ParseResult<()> {
if op == Symbol::BoolOp(OsString::from("-a")) {
self.term()?;
} else {
self.expr()?;
}
self.stack.push(op);
Ok(())
}
/// Parse a (possible) unary argument test (string length or file
/// attribute check).
///
/// If a UOP is followed by nothing it is interpreted as a literal string.
fn uop(&mut self, op: Symbol) {
match self.next_token() {
Symbol::None => self.stack.push(op.into_literal()),
symbol => {
self.stack.push(symbol.into_literal());
self.stack.push(op);
}
}
}
/// Parse a string literal, optionally followed by a comparison operator
/// and a second string literal.
fn literal(&mut self, token: Symbol) -> ParseResult<()> {
self.stack.push(token.into_literal());
// EXPR → str OP str
if let Symbol::Op(_) = self.peek() {
let op = self.next_token();
match self.next_token() {
Symbol::None => {
return Err(ParseError::MissingArgument(format!("{op}")));
}
token => self.stack.push(token.into_literal()),
}
self.stack.push(op);
}
Ok(())
}
/// Parser entry point: parse the token stream `self.tokens`, storing the
/// resulting `Symbol` stack in `self.stack`.
fn parse(&mut self) -> ParseResult<()> {
self.expr()?;
match self.tokens.next() {
Some(token) => Err(ParseError::ExtraArgument(token.quote().to_string())),
None => Ok(()),
}
}
}
/// Parse the token stream `args`, returning a `Symbol` stack representing the
/// operations to perform in postfix order.
pub fn parse(args: Vec<OsString>) -> ParseResult<Vec<Symbol>> {
let mut p = Parser::new(args);
p.parse()?;
Ok(p.stack)
} | random_line_split |
|
parser.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (grammar) BOOLOP STRLEN FILETEST FILEOP INTOP STRINGOP ; (vars) LParen StrlenOp
use std::ffi::{OsStr, OsString};
use std::iter::Peekable;
use super::error::{ParseError, ParseResult};
use uucore::display::Quotable;
/// Represents one of the binary comparison operators for strings, integers, or files
#[derive(Debug, PartialEq, Eq)]
pub enum Operator {
String(OsString),
Int(OsString),
File(OsString),
}
/// Represents one of the unary test operators for strings or files
#[derive(Debug, PartialEq, Eq)]
pub enum UnaryOperator {
StrlenOp(OsString),
FiletestOp(OsString),
}
/// Represents a parsed token from a test expression
#[derive(Debug, PartialEq, Eq)]
pub enum Symbol {
LParen,
Bang,
BoolOp(OsString),
Literal(OsString),
Op(Operator),
UnaryOp(UnaryOperator),
None,
}
impl Symbol {
/// Create a new Symbol from an OsString.
///
/// Returns Symbol::None in place of None
fn new(token: Option<OsString>) -> Self {
match token {
Some(s) => match s.to_str() {
Some(t) => match t {
"(" => Self::LParen,
"!" => Self::Bang,
"-a" | "-o" => Self::BoolOp(s),
"=" | "==" | "!=" => Self::Op(Operator::String(s)),
"-eq" | "-ge" | "-gt" | "-le" | "-lt" | "-ne" => Self::Op(Operator::Int(s)),
"-ef" | "-nt" | "-ot" => Self::Op(Operator::File(s)),
"-n" | "-z" => Self::UnaryOp(UnaryOperator::StrlenOp(s)),
"-b" | "-c" | "-d" | "-e" | "-f" | "-g" | "-G" | "-h" | "-k" | "-L" | "-N"
| "-O" | "-p" | "-r" | "-s" | "-S" | "-t" | "-u" | "-w" | "-x" => {
Self::UnaryOp(UnaryOperator::FiletestOp(s))
}
_ => Self::Literal(s),
},
None => Self::Literal(s),
},
None => Self::None,
}
}
/// Convert this Symbol into a Symbol::Literal, useful for cases where
/// test treats an operator as a string operand (test has no reserved
/// words).
///
/// # Panics
///
/// Panics if `self` is Symbol::None
fn into_literal(self) -> Self {
Self::Literal(match self {
Self::LParen => OsString::from("("),
Self::Bang => OsString::from("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => s,
Self::None => panic!(),
})
}
}
/// Implement Display trait for Symbol to make it easier to print useful errors.
/// We will try to match the format in which the symbol appears in the input.
impl std::fmt::Display for Symbol {
/// Format a Symbol for printing
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match &self {
Self::LParen => OsStr::new("("),
Self::Bang => OsStr::new("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => OsStr::new(s),
Self::None => OsStr::new("None"),
};
write!(f, "{}", s.quote())
}
}
/// Recursive descent parser for test, which converts a list of OsStrings
/// (typically command line arguments) into a stack of Symbols in postfix
/// order.
///
/// Grammar:
///
/// EXPR → TERM | EXPR BOOLOP EXPR
/// TERM → ( EXPR )
/// TERM →! EXPR
/// TERM → UOP str
/// UOP → STRLEN | FILETEST
/// TERM → str OP str
/// TERM → str | 𝜖
/// OP → STRINGOP | INTOP | FILEOP
/// STRINGOP → = | == |!=
/// INTOP → -eq | -ge | -gt | -le | -lt | -ne
/// FILEOP → -ef | -nt | -ot
/// STRLEN → -n | -z
/// FILETEST → -b | -c | -d | -e | -f | -g | -G | -h | -k | -L | -N | -O | -p |
/// -r | -s | -S | -t | -u | -w | -x
/// BOOLOP → -a | -o
///
#[derive(Debug)]
struct Parser {
tokens: Peekable<std::vec::IntoIter<OsString>>,
pub stack: Vec<Symbol>,
}
impl Parser {
/// Construct a new Parser from a `Vec<OsString>` of tokens.
fn new(tokens: Vec<OsString>) -> Self {
Self {
tokens: tokens.into_iter().peekable(),
stack: vec![],
}
}
/// Fetch the next token from the input stream as a Symbol.
fn next_token(&mut self) -> Symbol {
Symbol::new(self.tokens.next())
}
/// Consume the next token & verify that it matches the provided value.
fn expect(&mut self, value: &str) -> ParseResult<()> {
match self.next_token() {
Symbol::Literal(s) if s == value => Ok(()),
_ => Err(ParseError::Expected(value.quote().to_string())),
}
}
/// Peek at the next token from the input stream, returning it as a Symbol.
/// The stream is unchanged and will return the same Symbol on subsequent
/// calls to `next()` or `peek()`.
fn peek(&mut self) -> Symbol {
Symbol::new(self.tokens.peek().map(|s| s.to_os_string()))
}
/// Test if the next token in the stream is a BOOLOP (-a or -o), without
/// removing the token from the stream.
fn peek_is_boolop(&mut self) -> bool {
matches!(self.peek(), Symbol::BoolOp(_))
}
/// Parse an expression.
///
/// EXPR → TERM | EXPR BOOLOP EXPR
fn expr(&mut self) -> ParseResult<()> {
if!self.peek_is_boolop() {
self.term()?;
}
self.maybe_boolop()?;
Ok(())
}
/// Parse a term token and possible subsequent symbols: "(", "!", UOP,
/// literal, or None.
fn term(&mut self) -> ParseResult<()> {
let symbol = self.next_t | thesized expression.
///
/// test has no reserved keywords, so "(" will be interpreted as a literal
/// in certain cases:
///
/// * when found at the end of the token stream
/// * when followed by a binary operator that is not _itself_ interpreted
/// as a literal
///
fn lparen(&mut self) -> ParseResult<()> {
// Look ahead up to 3 tokens to determine if the lparen is being used
// as a grouping operator or should be treated as a literal string
let peek3: Vec<Symbol> = self
.tokens
.clone()
.take(3)
.map(|token| Symbol::new(Some(token)))
.collect();
match peek3.as_slice() {
// case 1: lparen is a literal when followed by nothing
[] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 2: error if end of stream is `( <any_token>`
[symbol] => Err(ParseError::MissingArgument(format!("{symbol}"))),
// case 3: `( uop <any_token> )` → parenthesized unary operation;
// this case ensures we don’t get confused by `( -f ) )`
// or `( -f ( )`, for example
[Symbol::UnaryOp(_), _, Symbol::Literal(s)] if s == ")" => {
let symbol = self.next_token();
self.uop(symbol);
self.expect(")")?;
Ok(())
}
// case 4: binary comparison of literal lparen, e.g. `(!= )`
[Symbol::Op(_), Symbol::Literal(s)] | [Symbol::Op(_), Symbol::Literal(s), _]
if s == ")" =>
{
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 5: after handling the prior cases, any single token inside
// parentheses is a literal, e.g. `( -f )`
[_, Symbol::Literal(s)] | [_, Symbol::Literal(s), _] if s == ")" => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 6: two binary ops in a row, treat the first op as a literal
[Symbol::Op(_), Symbol::Op(_), _] => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 7: if earlier cases didn’t match, `( op <any_token>…`
// indicates binary comparison of literal lparen with
// anything _except_ ")" (case 4)
[Symbol::Op(_), _] | [Symbol::Op(_), _, _] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// Otherwise, lparen indicates the start of a parenthesized
// expression
_ => {
self.expr()?;
self.expect(")")?;
Ok(())
}
}
}
/// Parse a (possibly) negated expression.
///
/// Example cases:
///
/// * `! =`: negate the result of the implicit string length test of `=`
/// * `! = foo`: compare the literal strings `!` and `foo`
/// * `! = = str`: negate comparison of literal `=` and `str`
/// * `!`: bang followed by nothing is literal
/// * `! EXPR`: negate the result of the expression
///
/// Combined Boolean & negation:
///
/// * `! ( EXPR ) [BOOLOP EXPR]`: negate the parenthesized expression only
/// * `! UOP str BOOLOP EXPR`: negate the unary subexpression
/// * `! str BOOLOP str`: negate the entire Boolean expression
/// * `! str BOOLOP EXPR BOOLOP EXPR`: negate the value of the first `str` term
///
fn bang(&mut self) -> ParseResult<()> {
match self.peek() {
Symbol::Op(_) | Symbol::BoolOp(_) => {
// we need to peek ahead one more token to disambiguate the first
// three cases listed above
let peek2 = Symbol::new(self.tokens.clone().nth(1));
match peek2 {
// case 1: `! <OP as literal>`
// case 3: `! = OP str`
Symbol::Op(_) | Symbol::None => {
// op is literal
let op = self.next_token().into_literal();
self.literal(op)?;
self.stack.push(Symbol::Bang);
}
// case 2: `<! as literal> OP str [BOOLOP EXPR]`.
_ => {
// bang is literal; parsing continues with op
self.literal(Symbol::Bang.into_literal())?;
self.maybe_boolop()?;
}
}
}
// bang followed by nothing is literal
Symbol::None => self.stack.push(Symbol::Bang.into_literal()),
_ => {
// peek ahead up to 4 tokens to determine if we need to negate
// the entire expression or just the first term
let peek4: Vec<Symbol> = self
.tokens
.clone()
.take(4)
.map(|token| Symbol::new(Some(token)))
.collect();
match peek4.as_slice() {
// we peeked ahead 4 but there were only 3 tokens left
[Symbol::Literal(_), Symbol::BoolOp(_), Symbol::Literal(_)] => {
self.expr()?;
self.stack.push(Symbol::Bang);
}
_ => {
self.term()?;
self.stack.push(Symbol::Bang);
}
}
}
}
Ok(())
}
/// Peek at the next token and parse it as a BOOLOP or string literal,
/// as appropriate.
fn maybe_boolop(&mut self) -> ParseResult<()> {
if self.peek_is_boolop() {
let symbol = self.next_token();
// BoolOp by itself interpreted as Literal
if let Symbol::None = self.peek() {
self.literal(symbol.into_literal())?;
} else {
self.boolop(symbol)?;
self.maybe_boolop()?;
}
}
Ok(())
}
/// Parse a Boolean expression.
///
/// Logical and (-a) has higher precedence than or (-o), so in an
/// expression like `foo -o '' -a ''`, the and subexpression is evaluated
/// first.
fn boolop(&mut self, op: Symbol) -> ParseResult<()> {
if op == Symbol::BoolOp(OsString::from("-a")) {
self.term()?;
} else {
self.expr()?;
}
self.stack.push(op);
Ok(())
}
/// Parse a (possible) unary argument test (string length or file
/// attribute check).
///
/// If a UOP is followed by nothing it is interpreted as a literal string.
fn uop(&mut self, op: Symbol) {
match self.next_token() {
Symbol::None => self.stack.push(op.into_literal()),
symbol => {
self.stack.push(symbol.into_literal());
self.stack.push(op);
}
}
}
/// Parse a string literal, optionally followed by a comparison operator
/// and a second string literal.
fn literal(&mut self, token: Symbol) -> ParseResult<()> {
self.stack.push(token.into_literal());
// EXPR → str OP str
if let Symbol::Op(_) = self.peek() {
let op = self.next_token();
match self.next_token() {
Symbol::None => {
return Err(ParseError::MissingArgument(format!("{op}")));
}
token => self.stack.push(token.into_literal()),
}
self.stack.push(op);
}
Ok(())
}
/// Parser entry point: parse the token stream `self.tokens`, storing the
/// resulting `Symbol` stack in `self.stack`.
fn parse(&mut self) -> ParseResult<()> {
self.expr()?;
match self.tokens.next() {
Some(token) => Err(ParseError::ExtraArgument(token.quote().to_string())),
None => Ok(()),
}
}
}
/// Parse the token stream `args`, returning a `Symbol` stack representing the
/// operations to perform in postfix order.
pub fn parse(args: Vec<OsString>) -> ParseResult<Vec<Symbol>> {
let mut p = Parser::new(args);
p.parse()?;
Ok(p.stack)
}
| oken();
match symbol {
Symbol::LParen => self.lparen()?,
Symbol::Bang => self.bang()?,
Symbol::UnaryOp(_) => self.uop(symbol),
Symbol::None => self.stack.push(symbol),
literal => self.literal(literal)?,
}
Ok(())
}
/// Parse a (possibly) paren | identifier_body |
parser.rs | // This file is part of the uutils coreutils package.
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (grammar) BOOLOP STRLEN FILETEST FILEOP INTOP STRINGOP ; (vars) LParen StrlenOp
use std::ffi::{OsStr, OsString};
use std::iter::Peekable;
use super::error::{ParseError, ParseResult};
use uucore::display::Quotable;
/// Represents one of the binary comparison operators for strings, integers, or files
#[derive(Debug, PartialEq, Eq)]
pub enum Operator {
String(OsString),
Int(OsString),
File(OsString),
}
/// Represents one of the unary test operators for strings or files
#[derive(Debug, PartialEq, Eq)]
pub enum UnaryOperator {
StrlenOp(OsString),
FiletestOp(OsString),
}
/// Represents a parsed token from a test expression
#[derive(Debug, PartialEq, Eq)]
pub enum Symbol {
LParen,
Bang,
BoolOp(OsString),
Literal(OsString),
Op(Operator),
UnaryOp(UnaryOperator),
None,
}
impl Symbol {
/// Create a new Symbol from an OsString.
///
/// Returns Symbol::None in place of None
fn new(token: Option<OsString>) -> Self {
match token {
Some(s) => match s.to_str() {
Some(t) => match t {
"(" => Self::LParen,
"!" => Self::Bang,
"-a" | "-o" => Self::BoolOp(s),
"=" | "==" | "!=" => Self::Op(Operator::String(s)),
"-eq" | "-ge" | "-gt" | "-le" | "-lt" | "-ne" => Self::Op(Operator::Int(s)),
"-ef" | "-nt" | "-ot" => Self::Op(Operator::File(s)),
"-n" | "-z" => Self::UnaryOp(UnaryOperator::StrlenOp(s)),
"-b" | "-c" | "-d" | "-e" | "-f" | "-g" | "-G" | "-h" | "-k" | "-L" | "-N"
| "-O" | "-p" | "-r" | "-s" | "-S" | "-t" | "-u" | "-w" | "-x" => {
Self::UnaryOp(UnaryOperator::FiletestOp(s))
}
_ => Self::Literal(s),
},
None => Self::Literal(s),
},
None => Self::None,
}
}
/// Convert this Symbol into a Symbol::Literal, useful for cases where
/// test treats an operator as a string operand (test has no reserved
/// words).
///
/// # Panics
///
/// Panics if `self` is Symbol::None
fn into_literal(self) -> Self {
Self::Literal(match self {
Self::LParen => OsString::from("("),
Self::Bang => OsString::from("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => s,
Self::None => panic!(),
})
}
}
/// Implement Display trait for Symbol to make it easier to print useful errors.
/// We will try to match the format in which the symbol appears in the input.
impl std::fmt::Display for Symbol {
/// Format a Symbol for printing
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match &self {
Self::LParen => OsStr::new("("),
Self::Bang => OsStr::new("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => OsStr::new(s),
Self::None => OsStr::new("None"),
};
write!(f, "{}", s.quote())
}
}
/// Recursive descent parser for test, which converts a list of OsStrings
/// (typically command line arguments) into a stack of Symbols in postfix
/// order.
///
/// Grammar:
///
/// EXPR → TERM | EXPR BOOLOP EXPR
/// TERM → ( EXPR )
/// TERM →! EXPR
/// TERM → UOP str
/// UOP → STRLEN | FILETEST
/// TERM → str OP str
/// TERM → str | 𝜖
/// OP → STRINGOP | INTOP | FILEOP
/// STRINGOP → = | == |!=
/// INTOP → -eq | -ge | -gt | -le | -lt | -ne
/// FILEOP → -ef | -nt | -ot
/// STRLEN → -n | -z
/// FILETEST → -b | -c | -d | -e | -f | -g | -G | -h | -k | -L | -N | -O | -p |
/// -r | -s | -S | -t | -u | -w | -x
/// BOOLOP → -a | -o
///
#[derive(Debug)]
struct Parser {
tokens: Peekable<std::vec::IntoIter<OsString>>,
pub stack: Vec<Symbol>,
}
impl Parser {
/// Construct a new Parser from a `Vec<OsString>` of tokens.
fn new(tokens: Vec<OsString>) -> Self {
Self {
tokens: tokens.into_iter().peekable(),
stack: vec![],
}
}
/// Fetch the next token from the input stream as a Symbol.
fn next_token(&mut self) -> Symbol {
Symbol::new(self.tokens.next())
}
/// Consume the next token & verify that it matches the provided value.
fn expect(&mut self, value: &str) -> ParseResult<()> {
match self.next_token() {
Symbol::Literal(s) if s == value => Ok(()),
_ => Err(ParseError::Expected(value.quote().to_string())),
}
}
/// Peek at the next token from the input stream, returning it as a Symbol.
/// The stream is unchanged and will return the same Symbol on subsequent
/// calls to `next()` or `peek()`.
fn peek(&mut self) -> Symbol {
Symbol::new(self.tokens.peek().map(|s| s.to_os_string()))
}
/// Test if the next token in the stream is a BOOLOP (-a or -o), without
/// removing the token from the stream.
fn peek_is_boolop(&mut self) -> bool {
matches!(self.peek(), Symbol::BoolOp(_))
}
/// Parse an expression.
///
/// EXPR → TERM | EXPR BOOLOP EXPR
fn expr(&mut self) -> ParseResult<()> {
if!self.peek_is_boolop() {
self.term()?;
}
self.maybe_boolop()?;
Ok(())
}
/// Parse a term token and possible subsequent symbols: "(", "!", UOP,
/// literal, or None.
fn term(&mut self) -> ParseResult<()> | let symbol = self.next_token();
match symbol {
Symbol::LParen => self.lparen()?,
Symbol::Bang => self.bang()?,
Symbol::UnaryOp(_) => self.uop(symbol),
Symbol::None => self.stack.push(symbol),
literal => self.literal(literal)?,
}
Ok(())
}
/// Parse a (possibly) parenthesized expression.
///
/// test has no reserved keywords, so "(" will be interpreted as a literal
/// in certain cases:
///
/// * when found at the end of the token stream
/// * when followed by a binary operator that is not _itself_ interpreted
/// as a literal
///
fn lparen(&mut self) -> ParseResult<()> {
// Look ahead up to 3 tokens to determine if the lparen is being used
// as a grouping operator or should be treated as a literal string
let peek3: Vec<Symbol> = self
.tokens
.clone()
.take(3)
.map(|token| Symbol::new(Some(token)))
.collect();
match peek3.as_slice() {
// case 1: lparen is a literal when followed by nothing
[] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 2: error if end of stream is `( <any_token>`
[symbol] => Err(ParseError::MissingArgument(format!("{symbol}"))),
// case 3: `( uop <any_token> )` → parenthesized unary operation;
// this case ensures we don’t get confused by `( -f ) )`
// or `( -f ( )`, for example
[Symbol::UnaryOp(_), _, Symbol::Literal(s)] if s == ")" => {
let symbol = self.next_token();
self.uop(symbol);
self.expect(")")?;
Ok(())
}
// case 4: binary comparison of literal lparen, e.g. `(!= )`
[Symbol::Op(_), Symbol::Literal(s)] | [Symbol::Op(_), Symbol::Literal(s), _]
if s == ")" =>
{
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 5: after handling the prior cases, any single token inside
// parentheses is a literal, e.g. `( -f )`
[_, Symbol::Literal(s)] | [_, Symbol::Literal(s), _] if s == ")" => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 6: two binary ops in a row, treat the first op as a literal
[Symbol::Op(_), Symbol::Op(_), _] => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 7: if earlier cases didn’t match, `( op <any_token>…`
// indicates binary comparison of literal lparen with
// anything _except_ ")" (case 4)
[Symbol::Op(_), _] | [Symbol::Op(_), _, _] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// Otherwise, lparen indicates the start of a parenthesized
// expression
_ => {
self.expr()?;
self.expect(")")?;
Ok(())
}
}
}
/// Parse a (possibly) negated expression.
///
/// Example cases:
///
/// * `! =`: negate the result of the implicit string length test of `=`
/// * `! = foo`: compare the literal strings `!` and `foo`
/// * `! = = str`: negate comparison of literal `=` and `str`
/// * `!`: bang followed by nothing is literal
/// * `! EXPR`: negate the result of the expression
///
/// Combined Boolean & negation:
///
/// * `! ( EXPR ) [BOOLOP EXPR]`: negate the parenthesized expression only
/// * `! UOP str BOOLOP EXPR`: negate the unary subexpression
/// * `! str BOOLOP str`: negate the entire Boolean expression
/// * `! str BOOLOP EXPR BOOLOP EXPR`: negate the value of the first `str` term
///
fn bang(&mut self) -> ParseResult<()> {
match self.peek() {
Symbol::Op(_) | Symbol::BoolOp(_) => {
// we need to peek ahead one more token to disambiguate the first
// three cases listed above
let peek2 = Symbol::new(self.tokens.clone().nth(1));
match peek2 {
// case 1: `! <OP as literal>`
// case 3: `! = OP str`
Symbol::Op(_) | Symbol::None => {
// op is literal
let op = self.next_token().into_literal();
self.literal(op)?;
self.stack.push(Symbol::Bang);
}
// case 2: `<! as literal> OP str [BOOLOP EXPR]`.
_ => {
// bang is literal; parsing continues with op
self.literal(Symbol::Bang.into_literal())?;
self.maybe_boolop()?;
}
}
}
// bang followed by nothing is literal
Symbol::None => self.stack.push(Symbol::Bang.into_literal()),
_ => {
// peek ahead up to 4 tokens to determine if we need to negate
// the entire expression or just the first term
let peek4: Vec<Symbol> = self
.tokens
.clone()
.take(4)
.map(|token| Symbol::new(Some(token)))
.collect();
match peek4.as_slice() {
// we peeked ahead 4 but there were only 3 tokens left
[Symbol::Literal(_), Symbol::BoolOp(_), Symbol::Literal(_)] => {
self.expr()?;
self.stack.push(Symbol::Bang);
}
_ => {
self.term()?;
self.stack.push(Symbol::Bang);
}
}
}
}
Ok(())
}
/// Peek at the next token and parse it as a BOOLOP or string literal,
/// as appropriate.
fn maybe_boolop(&mut self) -> ParseResult<()> {
if self.peek_is_boolop() {
let symbol = self.next_token();
// BoolOp by itself interpreted as Literal
if let Symbol::None = self.peek() {
self.literal(symbol.into_literal())?;
} else {
self.boolop(symbol)?;
self.maybe_boolop()?;
}
}
Ok(())
}
/// Parse a Boolean expression.
///
/// Logical and (-a) has higher precedence than or (-o), so in an
/// expression like `foo -o '' -a ''`, the and subexpression is evaluated
/// first.
fn boolop(&mut self, op: Symbol) -> ParseResult<()> {
if op == Symbol::BoolOp(OsString::from("-a")) {
self.term()?;
} else {
self.expr()?;
}
self.stack.push(op);
Ok(())
}
/// Parse a (possible) unary argument test (string length or file
/// attribute check).
///
/// If a UOP is followed by nothing it is interpreted as a literal string.
fn uop(&mut self, op: Symbol) {
match self.next_token() {
Symbol::None => self.stack.push(op.into_literal()),
symbol => {
self.stack.push(symbol.into_literal());
self.stack.push(op);
}
}
}
/// Parse a string literal, optionally followed by a comparison operator
/// and a second string literal.
fn literal(&mut self, token: Symbol) -> ParseResult<()> {
self.stack.push(token.into_literal());
// EXPR → str OP str
if let Symbol::Op(_) = self.peek() {
let op = self.next_token();
match self.next_token() {
Symbol::None => {
return Err(ParseError::MissingArgument(format!("{op}")));
}
token => self.stack.push(token.into_literal()),
}
self.stack.push(op);
}
Ok(())
}
/// Parser entry point: parse the token stream `self.tokens`, storing the
/// resulting `Symbol` stack in `self.stack`.
fn parse(&mut self) -> ParseResult<()> {
self.expr()?;
match self.tokens.next() {
Some(token) => Err(ParseError::ExtraArgument(token.quote().to_string())),
None => Ok(()),
}
}
}
/// Parse the token stream `args`, returning a `Symbol` stack representing the
/// operations to perform in postfix order.
pub fn parse(args: Vec<OsString>) -> ParseResult<Vec<Symbol>> {
let mut p = Parser::new(args);
p.parse()?;
Ok(p.stack)
}
| {
| identifier_name |
cpu_time.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
// Modified from https://github.com/rust-lang/cargo/blob/426fae51f39ebf6c545a2c12f78bc09fbfdb7aa9/src/cargo/util/cpu.rs
// TODO: Maybe use https://github.com/heim-rs/heim is better after https://github.com/heim-rs/heim/issues/233 is fixed.
use std::{
io, mem,
time::{Duration, Instant},
};
use derive_more::{Add, Sub};
#[derive(Add, Sub)]
pub struct LinuxStyleCpuTime {
pub user: u64,
pub nice: u64,
pub system: u64,
pub idle: u64,
pub iowait: u64,
pub irq: u64,
pub softirq: u64,
pub steal: u64,
pub guest: u64,
pub guest_nice: u64,
}
impl LinuxStyleCpuTime {
pub fn total(&self) -> u64 {
// Note: guest(_nice) is not counted, since it is already in user.
// See https://unix.stackexchange.com/questions/178045/proc-stat-is-guest-counted-into-user-time
self.user
+ self.system
+ self.idle
+ self.nice
+ self.iowait
+ self.irq
+ self.softirq
+ self.steal
}
pub fn current() -> io::Result<LinuxStyleCpuTime> {
imp::current()
}
}
pub use std::io::Result;
pub use imp::cpu_time;
/// A struct to monitor process cpu usage
#[derive(Clone, Copy)]
pub struct ProcessStat {
current_time: Instant,
cpu_time: Duration,
}
impl ProcessStat {
pub fn cur_proc_stat() -> io::Result<Self> {
Ok(ProcessStat {
current_time: Instant::now(),
cpu_time: imp::cpu_time()?,
})
}
/// return the cpu usage from last invoke,
/// or when this struct created if it is the first invoke.
pub fn cpu_usage(&mut self) -> io::Result<f64> {
let new_time = imp::cpu_time()?;
let old_time = mem::replace(&mut self.cpu_time, new_time);
let old_now = mem::replace(&mut self.current_time, Instant::now());
let real_time = self.current_time.duration_since(old_now).as_secs_f64();
if real_time > 0.0 {
let cpu_time = new_time
.checked_sub(old_time)
.map(|dur| dur.as_secs_f64())
.unwrap_or(0.0);
Ok(cpu_time / real_time)
} else {
Ok(0.0)
}
}
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
mod imp {
use std::{fs::File, io, io::Read, time::Duration};
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
let mut state = String::new();
File::open("/proc/stat")?.read_to_string(&mut state)?;
(|| {
let mut parts = state.lines().next()?.split_whitespace();
if parts.next()?!= "cpu" {
return None;
}
Some(super::LinuxStyleCpuTime {
user: parts.next()?.parse::<u64>().ok()?,
nice: parts.next()?.parse::<u64>().ok()?,
system: parts.next()?.parse::<u64>().ok()?,
idle: parts.next()?.parse::<u64>().ok()?,
iowait: parts.next()?.parse::<u64>().ok()?,
irq: parts.next()?.parse::<u64>().ok()?,
softirq: parts.next()?.parse::<u64>().ok()?,
steal: parts.next()?.parse::<u64>().ok()?,
guest: parts.next()?.parse::<u64>().ok()?,
guest_nice: parts.next()?.parse::<u64>().ok()?,
})
})()
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed"))
}
pub fn cpu_time() -> io::Result<Duration> {
let mut time = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 {
Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "macos")]
mod imp {
use std::{io, ptr};
use libc::*;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
// There's scant little documentation on `host_processor_info`
// throughout the internet, so this is just modeled after what everyone
// else is doing. For now this is modeled largely after libuv.
unsafe {
let mut num_cpus_u = 0;
let mut cpu_info = ptr::null_mut();
let mut msg_type = 0;
let ret = host_processor_info(
mach_host_self(),
PROCESSOR_CPU_LOAD_INFO as processor_flavor_t,
&mut num_cpus_u,
&mut cpu_info,
&mut msg_type,
);
if ret!= KERN_SUCCESS {
return Err(io::Error::from_raw_os_error(ret));
}
let mut ret = super::LinuxStyleCpuTime {
user: 0,
system: 0,
idle: 0,
iowait: 0,
irq: 0,
softirq: 0,
steal: 0,
guest: 0,
nice: 0,
guest_nice: 0,
};
let mut current = cpu_info as *const processor_cpu_load_info_data_t;
for _ in 0..num_cpus_u {
ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64;
ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64;
ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64;
ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64;
current = current.offset(1);
}
vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize);
Ok(ret)
}
}
pub fn cpu_time() -> io::Result<std::time::Duration> {
let mut time = unsafe { std::mem::zeroed() };
if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 {
let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64;
let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000;
Ok(std::time::Duration::new(sec, nsec))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "windows")]
mod imp {
use std::io;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
Err(io::Error::new(
io::ErrorKind::Other,
"unsupported platform to learn CPU state",
))
}
use std::{io, mem, time::Duration};
use scopeguard::defer;
use winapi::{
shared::{
minwindef::FILETIME,
ntdef::{FALSE, NULL},
},
um::{
handleapi::CloseHandle,
processthreadsapi::{
GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes,
GetThreadTimes, OpenThread,
},
sysinfoapi::{GetSystemInfo, SYSTEM_INFO},
winnt::THREAD_QUERY_INFORMATION,
},
};
/// convert to u64, unit 100 ns
fn filetime_to_ns100(ft: FILETIME) -> u64 {
((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64
}
fn get_sys_times() -> io::Result<(u64, u64, u64)> {
let mut idle = FILETIME::default();
let mut kernel = FILETIME::default();
let mut user = FILETIME::default();
let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) };
if ret == 0 {
return Err(io::Error::last_os_error());
}
let idle = filetime_to_ns100(idle);
let kernel = filetime_to_ns100(kernel);
let user = filetime_to_ns100(user);
Ok((idle, kernel, user))
}
fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> {
let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) };
if handler == NULL {
return Err(io::Error::last_os_error());
}
defer! {{
unsafe { CloseHandle(handler) };
}}
let mut create_time = FILETIME::default();
let mut exit_time = FILETIME::default();
let mut kernel_time = FILETIME::default();
let mut user_time = FILETIME::default();
let ret = unsafe {
GetThreadTimes(
handler,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
)
};
if ret == 0 {
return Err(io::Error::last_os_error());
}
let kernel_time = filetime_to_ns100(kernel_time);
let user_time = filetime_to_ns100(user_time);
Ok((kernel_time, user_time))
}
#[inline]
pub fn cpu_time() -> io::Result<Duration> | }
};
let kt = filetime_to_ns100(kernel_time);
let ut = filetime_to_ns100(user_time);
// convert ns
//
// Note: make it ns unit may overflow in some cases.
// For example, a machine with 128 cores runs for one year.
let cpu = (kt + ut) * 100;
// make it un-normalized
let cpu = cpu * processor_numbers()? as u64;
Ok(Duration::from_nanos(cpu))
}
}
#[cfg(test)]
mod tests {
use super::*;
// this test should be executed alone.
#[test]
fn test_process_usage() {
let mut stat = ProcessStat::cur_proc_stat().unwrap();
std::thread::sleep(std::time::Duration::from_secs(1));
let usage = stat.cpu_usage().unwrap();
assert!(usage < 0.01);
let num = 1;
for _ in 0..num * 10 {
std::thread::spawn(move || {
loop {
let _ = (0..10_000_000).into_iter().sum::<u128>();
}
});
}
std::thread::sleep(std::time::Duration::from_secs(1));
let usage = stat.cpu_usage().unwrap();
assert!(usage > 0.9_f64)
}
}
| {
let (kernel_time, user_time) = unsafe {
let process = GetCurrentProcess();
let mut create_time = mem::zeroed();
let mut exit_time = mem::zeroed();
let mut kernel_time = mem::zeroed();
let mut user_time = mem::zeroed();
let ret = GetProcessTimes(
process,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
);
if ret != 0 {
(kernel_time, user_time)
} else {
return Err(io::Error::last_os_error()); | identifier_body |
cpu_time.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
// Modified from https://github.com/rust-lang/cargo/blob/426fae51f39ebf6c545a2c12f78bc09fbfdb7aa9/src/cargo/util/cpu.rs
// TODO: Maybe use https://github.com/heim-rs/heim is better after https://github.com/heim-rs/heim/issues/233 is fixed.
use std::{
io, mem,
time::{Duration, Instant},
};
use derive_more::{Add, Sub};
#[derive(Add, Sub)]
pub struct LinuxStyleCpuTime {
pub user: u64,
pub nice: u64,
pub system: u64,
pub idle: u64,
pub iowait: u64,
pub irq: u64,
pub softirq: u64,
pub steal: u64,
pub guest: u64,
pub guest_nice: u64,
}
impl LinuxStyleCpuTime {
pub fn total(&self) -> u64 {
// Note: guest(_nice) is not counted, since it is already in user.
// See https://unix.stackexchange.com/questions/178045/proc-stat-is-guest-counted-into-user-time
self.user
+ self.system
+ self.idle
+ self.nice
+ self.iowait
+ self.irq
+ self.softirq
+ self.steal
}
pub fn current() -> io::Result<LinuxStyleCpuTime> {
imp::current()
}
}
pub use std::io::Result;
pub use imp::cpu_time;
/// A struct to monitor process cpu usage
#[derive(Clone, Copy)]
pub struct ProcessStat {
current_time: Instant,
cpu_time: Duration,
}
impl ProcessStat {
pub fn cur_proc_stat() -> io::Result<Self> {
Ok(ProcessStat {
current_time: Instant::now(),
cpu_time: imp::cpu_time()?,
})
}
/// return the cpu usage from last invoke,
/// or when this struct created if it is the first invoke.
pub fn cpu_usage(&mut self) -> io::Result<f64> {
let new_time = imp::cpu_time()?;
let old_time = mem::replace(&mut self.cpu_time, new_time);
let old_now = mem::replace(&mut self.current_time, Instant::now());
let real_time = self.current_time.duration_since(old_now).as_secs_f64();
if real_time > 0.0 {
let cpu_time = new_time
.checked_sub(old_time)
.map(|dur| dur.as_secs_f64())
.unwrap_or(0.0);
Ok(cpu_time / real_time)
} else {
Ok(0.0)
}
}
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
mod imp {
use std::{fs::File, io, io::Read, time::Duration};
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
let mut state = String::new();
File::open("/proc/stat")?.read_to_string(&mut state)?;
(|| {
let mut parts = state.lines().next()?.split_whitespace();
if parts.next()?!= "cpu" {
return None;
}
Some(super::LinuxStyleCpuTime {
user: parts.next()?.parse::<u64>().ok()?,
nice: parts.next()?.parse::<u64>().ok()?,
system: parts.next()?.parse::<u64>().ok()?,
idle: parts.next()?.parse::<u64>().ok()?,
iowait: parts.next()?.parse::<u64>().ok()?,
irq: parts.next()?.parse::<u64>().ok()?,
softirq: parts.next()?.parse::<u64>().ok()?,
steal: parts.next()?.parse::<u64>().ok()?,
guest: parts.next()?.parse::<u64>().ok()?,
guest_nice: parts.next()?.parse::<u64>().ok()?,
})
})()
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed"))
}
pub fn cpu_time() -> io::Result<Duration> {
let mut time = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 {
Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "macos")]
mod imp {
use std::{io, ptr};
use libc::*;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
// There's scant little documentation on `host_processor_info`
// throughout the internet, so this is just modeled after what everyone
// else is doing. For now this is modeled largely after libuv.
unsafe {
let mut num_cpus_u = 0;
let mut cpu_info = ptr::null_mut();
let mut msg_type = 0;
let ret = host_processor_info(
mach_host_self(),
PROCESSOR_CPU_LOAD_INFO as processor_flavor_t,
&mut num_cpus_u,
&mut cpu_info,
&mut msg_type,
);
if ret!= KERN_SUCCESS {
return Err(io::Error::from_raw_os_error(ret));
}
let mut ret = super::LinuxStyleCpuTime {
user: 0,
system: 0,
idle: 0,
iowait: 0,
irq: 0,
softirq: 0,
steal: 0,
guest: 0,
nice: 0,
guest_nice: 0,
};
let mut current = cpu_info as *const processor_cpu_load_info_data_t;
for _ in 0..num_cpus_u {
ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64;
ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64;
ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64;
ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64;
current = current.offset(1);
}
vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize);
Ok(ret)
}
}
pub fn | () -> io::Result<std::time::Duration> {
let mut time = unsafe { std::mem::zeroed() };
if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 {
let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64;
let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000;
Ok(std::time::Duration::new(sec, nsec))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "windows")]
mod imp {
use std::io;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
Err(io::Error::new(
io::ErrorKind::Other,
"unsupported platform to learn CPU state",
))
}
use std::{io, mem, time::Duration};
use scopeguard::defer;
use winapi::{
shared::{
minwindef::FILETIME,
ntdef::{FALSE, NULL},
},
um::{
handleapi::CloseHandle,
processthreadsapi::{
GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes,
GetThreadTimes, OpenThread,
},
sysinfoapi::{GetSystemInfo, SYSTEM_INFO},
winnt::THREAD_QUERY_INFORMATION,
},
};
/// convert to u64, unit 100 ns
fn filetime_to_ns100(ft: FILETIME) -> u64 {
((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64
}
fn get_sys_times() -> io::Result<(u64, u64, u64)> {
let mut idle = FILETIME::default();
let mut kernel = FILETIME::default();
let mut user = FILETIME::default();
let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) };
if ret == 0 {
return Err(io::Error::last_os_error());
}
let idle = filetime_to_ns100(idle);
let kernel = filetime_to_ns100(kernel);
let user = filetime_to_ns100(user);
Ok((idle, kernel, user))
}
fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> {
let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) };
if handler == NULL {
return Err(io::Error::last_os_error());
}
defer! {{
unsafe { CloseHandle(handler) };
}}
let mut create_time = FILETIME::default();
let mut exit_time = FILETIME::default();
let mut kernel_time = FILETIME::default();
let mut user_time = FILETIME::default();
let ret = unsafe {
GetThreadTimes(
handler,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
)
};
if ret == 0 {
return Err(io::Error::last_os_error());
}
let kernel_time = filetime_to_ns100(kernel_time);
let user_time = filetime_to_ns100(user_time);
Ok((kernel_time, user_time))
}
#[inline]
pub fn cpu_time() -> io::Result<Duration> {
let (kernel_time, user_time) = unsafe {
let process = GetCurrentProcess();
let mut create_time = mem::zeroed();
let mut exit_time = mem::zeroed();
let mut kernel_time = mem::zeroed();
let mut user_time = mem::zeroed();
let ret = GetProcessTimes(
process,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
);
if ret!= 0 {
(kernel_time, user_time)
} else {
return Err(io::Error::last_os_error());
}
};
let kt = filetime_to_ns100(kernel_time);
let ut = filetime_to_ns100(user_time);
// convert ns
//
// Note: make it ns unit may overflow in some cases.
// For example, a machine with 128 cores runs for one year.
let cpu = (kt + ut) * 100;
// make it un-normalized
let cpu = cpu * processor_numbers()? as u64;
Ok(Duration::from_nanos(cpu))
}
}
#[cfg(test)]
mod tests {
use super::*;
// this test should be executed alone.
#[test]
fn test_process_usage() {
let mut stat = ProcessStat::cur_proc_stat().unwrap();
std::thread::sleep(std::time::Duration::from_secs(1));
let usage = stat.cpu_usage().unwrap();
assert!(usage < 0.01);
let num = 1;
for _ in 0..num * 10 {
std::thread::spawn(move || {
loop {
let _ = (0..10_000_000).into_iter().sum::<u128>();
}
});
}
std::thread::sleep(std::time::Duration::from_secs(1));
let usage = stat.cpu_usage().unwrap();
assert!(usage > 0.9_f64)
}
}
| cpu_time | identifier_name |
cpu_time.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
// Modified from https://github.com/rust-lang/cargo/blob/426fae51f39ebf6c545a2c12f78bc09fbfdb7aa9/src/cargo/util/cpu.rs
// TODO: Maybe use https://github.com/heim-rs/heim is better after https://github.com/heim-rs/heim/issues/233 is fixed.
use std::{
io, mem,
time::{Duration, Instant},
};
use derive_more::{Add, Sub};
#[derive(Add, Sub)]
pub struct LinuxStyleCpuTime {
pub user: u64,
pub nice: u64,
pub system: u64,
pub idle: u64,
pub iowait: u64,
pub irq: u64,
pub softirq: u64,
pub steal: u64,
pub guest: u64,
pub guest_nice: u64,
}
impl LinuxStyleCpuTime {
pub fn total(&self) -> u64 {
// Note: guest(_nice) is not counted, since it is already in user.
// See https://unix.stackexchange.com/questions/178045/proc-stat-is-guest-counted-into-user-time
self.user
+ self.system
+ self.idle
+ self.nice
+ self.iowait
+ self.irq
+ self.softirq
+ self.steal
}
pub fn current() -> io::Result<LinuxStyleCpuTime> {
imp::current()
}
}
pub use std::io::Result;
pub use imp::cpu_time;
/// A struct to monitor process cpu usage
#[derive(Clone, Copy)]
pub struct ProcessStat {
current_time: Instant,
cpu_time: Duration,
}
impl ProcessStat {
pub fn cur_proc_stat() -> io::Result<Self> {
Ok(ProcessStat {
current_time: Instant::now(),
cpu_time: imp::cpu_time()?,
})
}
/// return the cpu usage from last invoke,
/// or when this struct created if it is the first invoke.
pub fn cpu_usage(&mut self) -> io::Result<f64> {
let new_time = imp::cpu_time()?;
let old_time = mem::replace(&mut self.cpu_time, new_time);
let old_now = mem::replace(&mut self.current_time, Instant::now());
let real_time = self.current_time.duration_since(old_now).as_secs_f64();
if real_time > 0.0 {
let cpu_time = new_time
.checked_sub(old_time)
.map(|dur| dur.as_secs_f64())
.unwrap_or(0.0);
Ok(cpu_time / real_time)
} else {
Ok(0.0)
}
}
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
mod imp {
use std::{fs::File, io, io::Read, time::Duration};
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
let mut state = String::new();
File::open("/proc/stat")?.read_to_string(&mut state)?;
(|| {
let mut parts = state.lines().next()?.split_whitespace();
if parts.next()?!= "cpu" {
return None;
}
Some(super::LinuxStyleCpuTime {
user: parts.next()?.parse::<u64>().ok()?,
nice: parts.next()?.parse::<u64>().ok()?,
system: parts.next()?.parse::<u64>().ok()?,
idle: parts.next()?.parse::<u64>().ok()?,
iowait: parts.next()?.parse::<u64>().ok()?,
irq: parts.next()?.parse::<u64>().ok()?,
softirq: parts.next()?.parse::<u64>().ok()?,
steal: parts.next()?.parse::<u64>().ok()?,
guest: parts.next()?.parse::<u64>().ok()?,
guest_nice: parts.next()?.parse::<u64>().ok()?,
})
})()
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed"))
}
pub fn cpu_time() -> io::Result<Duration> {
let mut time = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 {
Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "macos")]
mod imp {
use std::{io, ptr};
use libc::*;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
// There's scant little documentation on `host_processor_info`
// throughout the internet, so this is just modeled after what everyone
// else is doing. For now this is modeled largely after libuv.
unsafe {
let mut num_cpus_u = 0;
let mut cpu_info = ptr::null_mut();
let mut msg_type = 0;
let ret = host_processor_info(
mach_host_self(),
PROCESSOR_CPU_LOAD_INFO as processor_flavor_t,
&mut num_cpus_u,
&mut cpu_info,
&mut msg_type,
);
if ret!= KERN_SUCCESS {
return Err(io::Error::from_raw_os_error(ret));
}
let mut ret = super::LinuxStyleCpuTime {
user: 0,
system: 0,
idle: 0,
iowait: 0,
irq: 0,
softirq: 0,
steal: 0,
guest: 0,
nice: 0,
guest_nice: 0,
};
let mut current = cpu_info as *const processor_cpu_load_info_data_t;
for _ in 0..num_cpus_u {
ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64;
ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64;
ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64;
ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64;
current = current.offset(1);
}
vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize);
Ok(ret)
}
}
pub fn cpu_time() -> io::Result<std::time::Duration> {
let mut time = unsafe { std::mem::zeroed() };
if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 {
let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64;
let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000;
Ok(std::time::Duration::new(sec, nsec))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "windows")]
mod imp {
use std::io;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
Err(io::Error::new(
io::ErrorKind::Other,
"unsupported platform to learn CPU state",
))
}
use std::{io, mem, time::Duration};
use scopeguard::defer;
use winapi::{
shared::{
minwindef::FILETIME,
ntdef::{FALSE, NULL},
},
um::{
handleapi::CloseHandle,
processthreadsapi::{
GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes,
GetThreadTimes, OpenThread,
},
sysinfoapi::{GetSystemInfo, SYSTEM_INFO},
winnt::THREAD_QUERY_INFORMATION,
},
};
/// convert to u64, unit 100 ns
fn filetime_to_ns100(ft: FILETIME) -> u64 {
((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64
}
fn get_sys_times() -> io::Result<(u64, u64, u64)> {
let mut idle = FILETIME::default();
let mut kernel = FILETIME::default();
let mut user = FILETIME::default();
let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) };
if ret == 0 {
return Err(io::Error::last_os_error());
}
let idle = filetime_to_ns100(idle);
let kernel = filetime_to_ns100(kernel);
let user = filetime_to_ns100(user);
Ok((idle, kernel, user))
}
fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> {
let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) };
if handler == NULL {
return Err(io::Error::last_os_error());
}
defer! {{
unsafe { CloseHandle(handler) };
}}
let mut create_time = FILETIME::default();
let mut exit_time = FILETIME::default();
let mut kernel_time = FILETIME::default();
let mut user_time = FILETIME::default();
let ret = unsafe {
GetThreadTimes(
handler,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
)
};
if ret == 0 {
return Err(io::Error::last_os_error());
}
let kernel_time = filetime_to_ns100(kernel_time);
let user_time = filetime_to_ns100(user_time);
Ok((kernel_time, user_time))
}
#[inline]
pub fn cpu_time() -> io::Result<Duration> {
let (kernel_time, user_time) = unsafe {
let process = GetCurrentProcess();
let mut create_time = mem::zeroed();
let mut exit_time = mem::zeroed();
let mut kernel_time = mem::zeroed();
let mut user_time = mem::zeroed();
let ret = GetProcessTimes(
process,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
);
if ret!= 0 {
(kernel_time, user_time)
} else {
return Err(io::Error::last_os_error()); | }
};
let kt = filetime_to_ns100(kernel_time);
let ut = filetime_to_ns100(user_time);
// convert ns
//
// Note: make it ns unit may overflow in some cases.
// For example, a machine with 128 cores runs for one year.
let cpu = (kt + ut) * 100;
// make it un-normalized
let cpu = cpu * processor_numbers()? as u64;
Ok(Duration::from_nanos(cpu))
}
}
#[cfg(test)]
mod tests {
use super::*;
// this test should be executed alone.
#[test]
fn test_process_usage() {
let mut stat = ProcessStat::cur_proc_stat().unwrap();
std::thread::sleep(std::time::Duration::from_secs(1));
let usage = stat.cpu_usage().unwrap();
assert!(usage < 0.01);
let num = 1;
for _ in 0..num * 10 {
std::thread::spawn(move || {
loop {
let _ = (0..10_000_000).into_iter().sum::<u128>();
}
});
}
std::thread::sleep(std::time::Duration::from_secs(1));
let usage = stat.cpu_usage().unwrap();
assert!(usage > 0.9_f64)
}
} | random_line_split |
|
cpu_time.rs | // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
// Modified from https://github.com/rust-lang/cargo/blob/426fae51f39ebf6c545a2c12f78bc09fbfdb7aa9/src/cargo/util/cpu.rs
// TODO: Maybe use https://github.com/heim-rs/heim is better after https://github.com/heim-rs/heim/issues/233 is fixed.
use std::{
io, mem,
time::{Duration, Instant},
};
use derive_more::{Add, Sub};
#[derive(Add, Sub)]
pub struct LinuxStyleCpuTime {
pub user: u64,
pub nice: u64,
pub system: u64,
pub idle: u64,
pub iowait: u64,
pub irq: u64,
pub softirq: u64,
pub steal: u64,
pub guest: u64,
pub guest_nice: u64,
}
impl LinuxStyleCpuTime {
pub fn total(&self) -> u64 {
// Note: guest(_nice) is not counted, since it is already in user.
// See https://unix.stackexchange.com/questions/178045/proc-stat-is-guest-counted-into-user-time
self.user
+ self.system
+ self.idle
+ self.nice
+ self.iowait
+ self.irq
+ self.softirq
+ self.steal
}
pub fn current() -> io::Result<LinuxStyleCpuTime> {
imp::current()
}
}
pub use std::io::Result;
pub use imp::cpu_time;
/// A struct to monitor process cpu usage
#[derive(Clone, Copy)]
pub struct ProcessStat {
current_time: Instant,
cpu_time: Duration,
}
impl ProcessStat {
pub fn cur_proc_stat() -> io::Result<Self> {
Ok(ProcessStat {
current_time: Instant::now(),
cpu_time: imp::cpu_time()?,
})
}
/// return the cpu usage from last invoke,
/// or when this struct created if it is the first invoke.
pub fn cpu_usage(&mut self) -> io::Result<f64> {
let new_time = imp::cpu_time()?;
let old_time = mem::replace(&mut self.cpu_time, new_time);
let old_now = mem::replace(&mut self.current_time, Instant::now());
let real_time = self.current_time.duration_since(old_now).as_secs_f64();
if real_time > 0.0 | else {
Ok(0.0)
}
}
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
mod imp {
use std::{fs::File, io, io::Read, time::Duration};
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
let mut state = String::new();
File::open("/proc/stat")?.read_to_string(&mut state)?;
(|| {
let mut parts = state.lines().next()?.split_whitespace();
if parts.next()?!= "cpu" {
return None;
}
Some(super::LinuxStyleCpuTime {
user: parts.next()?.parse::<u64>().ok()?,
nice: parts.next()?.parse::<u64>().ok()?,
system: parts.next()?.parse::<u64>().ok()?,
idle: parts.next()?.parse::<u64>().ok()?,
iowait: parts.next()?.parse::<u64>().ok()?,
irq: parts.next()?.parse::<u64>().ok()?,
softirq: parts.next()?.parse::<u64>().ok()?,
steal: parts.next()?.parse::<u64>().ok()?,
guest: parts.next()?.parse::<u64>().ok()?,
guest_nice: parts.next()?.parse::<u64>().ok()?,
})
})()
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed"))
}
pub fn cpu_time() -> io::Result<Duration> {
let mut time = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 {
Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "macos")]
mod imp {
use std::{io, ptr};
use libc::*;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
// There's scant little documentation on `host_processor_info`
// throughout the internet, so this is just modeled after what everyone
// else is doing. For now this is modeled largely after libuv.
unsafe {
let mut num_cpus_u = 0;
let mut cpu_info = ptr::null_mut();
let mut msg_type = 0;
let ret = host_processor_info(
mach_host_self(),
PROCESSOR_CPU_LOAD_INFO as processor_flavor_t,
&mut num_cpus_u,
&mut cpu_info,
&mut msg_type,
);
if ret!= KERN_SUCCESS {
return Err(io::Error::from_raw_os_error(ret));
}
let mut ret = super::LinuxStyleCpuTime {
user: 0,
system: 0,
idle: 0,
iowait: 0,
irq: 0,
softirq: 0,
steal: 0,
guest: 0,
nice: 0,
guest_nice: 0,
};
let mut current = cpu_info as *const processor_cpu_load_info_data_t;
for _ in 0..num_cpus_u {
ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64;
ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64;
ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64;
ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64;
current = current.offset(1);
}
vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize);
Ok(ret)
}
}
pub fn cpu_time() -> io::Result<std::time::Duration> {
let mut time = unsafe { std::mem::zeroed() };
if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 {
let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64;
let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000;
Ok(std::time::Duration::new(sec, nsec))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "windows")]
mod imp {
use std::io;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
Err(io::Error::new(
io::ErrorKind::Other,
"unsupported platform to learn CPU state",
))
}
use std::{io, mem, time::Duration};
use scopeguard::defer;
use winapi::{
shared::{
minwindef::FILETIME,
ntdef::{FALSE, NULL},
},
um::{
handleapi::CloseHandle,
processthreadsapi::{
GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes,
GetThreadTimes, OpenThread,
},
sysinfoapi::{GetSystemInfo, SYSTEM_INFO},
winnt::THREAD_QUERY_INFORMATION,
},
};
/// convert to u64, unit 100 ns
fn filetime_to_ns100(ft: FILETIME) -> u64 {
((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64
}
fn get_sys_times() -> io::Result<(u64, u64, u64)> {
let mut idle = FILETIME::default();
let mut kernel = FILETIME::default();
let mut user = FILETIME::default();
let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) };
if ret == 0 {
return Err(io::Error::last_os_error());
}
let idle = filetime_to_ns100(idle);
let kernel = filetime_to_ns100(kernel);
let user = filetime_to_ns100(user);
Ok((idle, kernel, user))
}
fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> {
let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) };
if handler == NULL {
return Err(io::Error::last_os_error());
}
defer! {{
unsafe { CloseHandle(handler) };
}}
let mut create_time = FILETIME::default();
let mut exit_time = FILETIME::default();
let mut kernel_time = FILETIME::default();
let mut user_time = FILETIME::default();
let ret = unsafe {
GetThreadTimes(
handler,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
)
};
if ret == 0 {
return Err(io::Error::last_os_error());
}
let kernel_time = filetime_to_ns100(kernel_time);
let user_time = filetime_to_ns100(user_time);
Ok((kernel_time, user_time))
}
#[inline]
pub fn cpu_time() -> io::Result<Duration> {
let (kernel_time, user_time) = unsafe {
let process = GetCurrentProcess();
let mut create_time = mem::zeroed();
let mut exit_time = mem::zeroed();
let mut kernel_time = mem::zeroed();
let mut user_time = mem::zeroed();
let ret = GetProcessTimes(
process,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
);
if ret!= 0 {
(kernel_time, user_time)
} else {
return Err(io::Error::last_os_error());
}
};
let kt = filetime_to_ns100(kernel_time);
let ut = filetime_to_ns100(user_time);
// convert ns
//
// Note: make it ns unit may overflow in some cases.
// For example, a machine with 128 cores runs for one year.
let cpu = (kt + ut) * 100;
// make it un-normalized
let cpu = cpu * processor_numbers()? as u64;
Ok(Duration::from_nanos(cpu))
}
}
#[cfg(test)]
mod tests {
use super::*;
// this test should be executed alone.
#[test]
fn test_process_usage() {
let mut stat = ProcessStat::cur_proc_stat().unwrap();
std::thread::sleep(std::time::Duration::from_secs(1));
let usage = stat.cpu_usage().unwrap();
assert!(usage < 0.01);
let num = 1;
for _ in 0..num * 10 {
std::thread::spawn(move || {
loop {
let _ = (0..10_000_000).into_iter().sum::<u128>();
}
});
}
std::thread::sleep(std::time::Duration::from_secs(1));
let usage = stat.cpu_usage().unwrap();
assert!(usage > 0.9_f64)
}
}
| {
let cpu_time = new_time
.checked_sub(old_time)
.map(|dur| dur.as_secs_f64())
.unwrap_or(0.0);
Ok(cpu_time / real_time)
} | conditional_block |
cargo_workspace.rs | //! FIXME: write short doc here
use std::{
ops,
path::{Path, PathBuf},
};
use anyhow::{Context, Result};
use cargo_metadata::{BuildScript, CargoOpt, Message, MetadataCommand, PackageId};
use ra_arena::{Arena, Idx};
use ra_cargo_watch::run_cargo;
use ra_db::Edition;
use rustc_hash::FxHashMap;
use serde::Deserialize;
/// `CargoWorkspace` represents the logical structure of, well, a Cargo
/// workspace. It pretty closely mirrors `cargo metadata` output.
///
/// Note that internally, rust analyzer uses a different structure:
/// `CrateGraph`. `CrateGraph` is lower-level: it knows only about the crates,
/// while this knows about `Packages` & `Targets`: purely cargo-related
/// concepts.
#[derive(Debug, Clone)]
pub struct CargoWorkspace {
packages: Arena<PackageData>,
targets: Arena<TargetData>,
workspace_root: PathBuf,
}
impl ops::Index<Package> for CargoWorkspace {
type Output = PackageData;
fn index(&self, index: Package) -> &PackageData {
&self.packages[index]
}
}
impl ops::Index<Target> for CargoWorkspace {
type Output = TargetData;
fn index(&self, index: Target) -> &TargetData {
&self.targets[index]
}
}
#[derive(Deserialize, Clone, Debug, PartialEq, Eq)]
#[serde(rename_all = "camelCase", default)]
pub struct CargoFeatures {
/// Do not activate the `default` feature.
pub no_default_features: bool,
/// Activate all available features
pub all_features: bool,
/// List of features to activate.
/// This will be ignored if `cargo_all_features` is true.
pub features: Vec<String>,
/// Runs cargo check on launch to figure out the correct values of OUT_DIR
pub load_out_dirs_from_check: bool,
}
impl Default for CargoFeatures {
fn default() -> Self {
CargoFeatures {
no_default_features: false,
all_features: true,
features: Vec::new(),
load_out_dirs_from_check: false,
}
}
}
pub type Package = Idx<PackageData>;
pub type Target = Idx<TargetData>;
#[derive(Debug, Clone)]
pub struct PackageData {
pub name: String,
pub manifest: PathBuf,
pub targets: Vec<Target>,
pub is_member: bool,
pub dependencies: Vec<PackageDependency>,
pub edition: Edition,
pub features: Vec<String>,
pub out_dir: Option<PathBuf>,
pub proc_macro_dylib_path: Option<PathBuf>,
}
#[derive(Debug, Clone)]
pub struct PackageDependency {
pub pkg: Package,
pub name: String,
}
#[derive(Debug, Clone)]
pub struct TargetData {
pub package: Package,
pub name: String,
pub root: PathBuf,
pub kind: TargetKind,
pub is_proc_macro: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TargetKind {
Bin,
/// Any kind of Cargo lib crate-type (dylib, rlib, proc-macro,...).
Lib,
Example,
Test,
Bench,
Other,
}
impl TargetKind {
fn new(kinds: &[String]) -> TargetKind {
for kind in kinds {
return match kind.as_str() {
"bin" => TargetKind::Bin,
"test" => TargetKind::Test,
"bench" => TargetKind::Bench,
"example" => TargetKind::Example,
"proc-macro" => TargetKind::Lib,
_ if kind.contains("lib") => TargetKind::Lib,
_ => continue,
};
}
TargetKind::Other
}
}
impl PackageData {
pub fn root(&self) -> &Path {
self.manifest.parent().unwrap()
}
}
impl CargoWorkspace {
pub fn from_cargo_metadata(
cargo_toml: &Path,
cargo_features: &CargoFeatures,
) -> Result<CargoWorkspace> {
let mut meta = MetadataCommand::new();
meta.manifest_path(cargo_toml);
if cargo_features.all_features {
meta.features(CargoOpt::AllFeatures);
} else if cargo_features.no_default_features {
// FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures`
// https://github.com/oli-obk/cargo_metadata/issues/79
meta.features(CargoOpt::NoDefaultFeatures);
} else if!cargo_features.features.is_empty() {
meta.features(CargoOpt::SomeFeatures(cargo_features.features.clone()));
}
if let Some(parent) = cargo_toml.parent() {
meta.current_dir(parent);
}
let meta = meta.exec().with_context(|| {
format!("Failed to run `cargo metadata --manifest-path {}`", cargo_toml.display())
})?;
let mut out_dir_by_id = FxHashMap::default();
let mut proc_macro_dylib_paths = FxHashMap::default();
if cargo_features.load_out_dirs_from_check {
let resources = load_extern_resources(cargo_toml, cargo_features);
out_dir_by_id = resources.out_dirs;
proc_macro_dylib_paths = resources.proc_dylib_paths;
}
let mut pkg_by_id = FxHashMap::default();
let mut packages = Arena::default();
let mut targets = Arena::default();
let ws_members = &meta.workspace_members;
for meta_pkg in meta.packages {
let cargo_metadata::Package { id, edition, name, manifest_path,.. } = meta_pkg;
let is_member = ws_members.contains(&id);
let edition = edition
.parse::<Edition>()
.with_context(|| format!("Failed to parse edition {}", edition))?;
let pkg = packages.alloc(PackageData {
name,
manifest: manifest_path,
targets: Vec::new(),
is_member,
edition,
dependencies: Vec::new(),
features: Vec::new(),
out_dir: out_dir_by_id.get(&id).cloned(),
proc_macro_dylib_path: proc_macro_dylib_paths.get(&id).cloned(),
});
let pkg_data = &mut packages[pkg];
pkg_by_id.insert(id, pkg);
for meta_tgt in meta_pkg.targets {
let is_proc_macro = meta_tgt.kind.as_slice() == ["proc-macro"];
let tgt = targets.alloc(TargetData {
package: pkg,
name: meta_tgt.name,
root: meta_tgt.src_path.clone(),
kind: TargetKind::new(meta_tgt.kind.as_slice()),
is_proc_macro,
});
pkg_data.targets.push(tgt);
}
}
let resolve = meta.resolve.expect("metadata executed with deps");
for node in resolve.nodes {
let source = match pkg_by_id.get(&node.id) {
Some(&src) => src,
// FIXME: replace this and a similar branch below with `.unwrap`, once
// https://github.com/rust-lang/cargo/issues/7841
// is fixed and hits stable (around 1.43-is probably?).
None => {
log::error!("Node id do not match in cargo metadata, ignoring {}", node.id);
continue;
}
};
for dep_node in node.deps {
let pkg = match pkg_by_id.get(&dep_node.pkg) {
Some(&pkg) => pkg,
None => {
log::error!(
"Dep node id do not match in cargo metadata, ignoring {}",
dep_node.pkg
);
continue;
}
};
let dep = PackageDependency { name: dep_node.name, pkg };
packages[source].dependencies.push(dep);
}
packages[source].features.extend(node.features);
}
Ok(CargoWorkspace { packages, targets, workspace_root: meta.workspace_root })
}
pub fn packages<'a>(&'a self) -> impl Iterator<Item = Package> + ExactSizeIterator + 'a {
self.packages.iter().map(|(id, _pkg)| id)
}
pub fn target_by_root(&self, root: &Path) -> Option<Target> {
self.packages()
.filter_map(|pkg| self[pkg].targets.iter().find(|&&it| self[it].root == root))
.next() | }
}
#[derive(Debug, Clone, Default)]
pub struct ExternResources {
out_dirs: FxHashMap<PackageId, PathBuf>,
proc_dylib_paths: FxHashMap<PackageId, PathBuf>,
}
pub fn load_extern_resources(cargo_toml: &Path, cargo_features: &CargoFeatures) -> ExternResources {
let mut args: Vec<String> = vec![
"check".to_string(),
"--message-format=json".to_string(),
"--manifest-path".to_string(),
cargo_toml.display().to_string(),
];
if cargo_features.all_features {
args.push("--all-features".to_string());
} else if cargo_features.no_default_features {
// FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures`
// https://github.com/oli-obk/cargo_metadata/issues/79
args.push("--no-default-features".to_string());
} else {
args.extend(cargo_features.features.iter().cloned());
}
let mut acc = ExternResources::default();
let res = run_cargo(&args, cargo_toml.parent(), &mut |message| {
match message {
Message::BuildScriptExecuted(BuildScript { package_id, out_dir,.. }) => {
acc.out_dirs.insert(package_id, out_dir);
}
Message::CompilerArtifact(message) => {
if message.target.kind.contains(&"proc-macro".to_string()) {
let package_id = message.package_id;
if let Some(filename) = message.filenames.get(0) {
acc.proc_dylib_paths.insert(package_id, filename.clone());
}
}
}
Message::CompilerMessage(_) => (),
Message::Unknown => (),
}
true
});
if let Err(err) = res {
log::error!("Failed to load outdirs: {:?}", err);
}
acc
} | .copied()
}
pub fn workspace_root(&self) -> &Path {
&self.workspace_root | random_line_split |
cargo_workspace.rs | //! FIXME: write short doc here
use std::{
ops,
path::{Path, PathBuf},
};
use anyhow::{Context, Result};
use cargo_metadata::{BuildScript, CargoOpt, Message, MetadataCommand, PackageId};
use ra_arena::{Arena, Idx};
use ra_cargo_watch::run_cargo;
use ra_db::Edition;
use rustc_hash::FxHashMap;
use serde::Deserialize;
/// `CargoWorkspace` represents the logical structure of, well, a Cargo
/// workspace. It pretty closely mirrors `cargo metadata` output.
///
/// Note that internally, rust analyzer uses a different structure:
/// `CrateGraph`. `CrateGraph` is lower-level: it knows only about the crates,
/// while this knows about `Packages` & `Targets`: purely cargo-related
/// concepts.
#[derive(Debug, Clone)]
pub struct CargoWorkspace {
packages: Arena<PackageData>,
targets: Arena<TargetData>,
workspace_root: PathBuf,
}
impl ops::Index<Package> for CargoWorkspace {
type Output = PackageData;
fn index(&self, index: Package) -> &PackageData {
&self.packages[index]
}
}
impl ops::Index<Target> for CargoWorkspace {
type Output = TargetData;
fn index(&self, index: Target) -> &TargetData {
&self.targets[index]
}
}
#[derive(Deserialize, Clone, Debug, PartialEq, Eq)]
#[serde(rename_all = "camelCase", default)]
pub struct | {
/// Do not activate the `default` feature.
pub no_default_features: bool,
/// Activate all available features
pub all_features: bool,
/// List of features to activate.
/// This will be ignored if `cargo_all_features` is true.
pub features: Vec<String>,
/// Runs cargo check on launch to figure out the correct values of OUT_DIR
pub load_out_dirs_from_check: bool,
}
impl Default for CargoFeatures {
fn default() -> Self {
CargoFeatures {
no_default_features: false,
all_features: true,
features: Vec::new(),
load_out_dirs_from_check: false,
}
}
}
pub type Package = Idx<PackageData>;
pub type Target = Idx<TargetData>;
#[derive(Debug, Clone)]
pub struct PackageData {
pub name: String,
pub manifest: PathBuf,
pub targets: Vec<Target>,
pub is_member: bool,
pub dependencies: Vec<PackageDependency>,
pub edition: Edition,
pub features: Vec<String>,
pub out_dir: Option<PathBuf>,
pub proc_macro_dylib_path: Option<PathBuf>,
}
#[derive(Debug, Clone)]
pub struct PackageDependency {
pub pkg: Package,
pub name: String,
}
#[derive(Debug, Clone)]
pub struct TargetData {
pub package: Package,
pub name: String,
pub root: PathBuf,
pub kind: TargetKind,
pub is_proc_macro: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TargetKind {
Bin,
/// Any kind of Cargo lib crate-type (dylib, rlib, proc-macro,...).
Lib,
Example,
Test,
Bench,
Other,
}
impl TargetKind {
fn new(kinds: &[String]) -> TargetKind {
for kind in kinds {
return match kind.as_str() {
"bin" => TargetKind::Bin,
"test" => TargetKind::Test,
"bench" => TargetKind::Bench,
"example" => TargetKind::Example,
"proc-macro" => TargetKind::Lib,
_ if kind.contains("lib") => TargetKind::Lib,
_ => continue,
};
}
TargetKind::Other
}
}
impl PackageData {
pub fn root(&self) -> &Path {
self.manifest.parent().unwrap()
}
}
impl CargoWorkspace {
pub fn from_cargo_metadata(
cargo_toml: &Path,
cargo_features: &CargoFeatures,
) -> Result<CargoWorkspace> {
let mut meta = MetadataCommand::new();
meta.manifest_path(cargo_toml);
if cargo_features.all_features {
meta.features(CargoOpt::AllFeatures);
} else if cargo_features.no_default_features {
// FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures`
// https://github.com/oli-obk/cargo_metadata/issues/79
meta.features(CargoOpt::NoDefaultFeatures);
} else if!cargo_features.features.is_empty() {
meta.features(CargoOpt::SomeFeatures(cargo_features.features.clone()));
}
if let Some(parent) = cargo_toml.parent() {
meta.current_dir(parent);
}
let meta = meta.exec().with_context(|| {
format!("Failed to run `cargo metadata --manifest-path {}`", cargo_toml.display())
})?;
let mut out_dir_by_id = FxHashMap::default();
let mut proc_macro_dylib_paths = FxHashMap::default();
if cargo_features.load_out_dirs_from_check {
let resources = load_extern_resources(cargo_toml, cargo_features);
out_dir_by_id = resources.out_dirs;
proc_macro_dylib_paths = resources.proc_dylib_paths;
}
let mut pkg_by_id = FxHashMap::default();
let mut packages = Arena::default();
let mut targets = Arena::default();
let ws_members = &meta.workspace_members;
for meta_pkg in meta.packages {
let cargo_metadata::Package { id, edition, name, manifest_path,.. } = meta_pkg;
let is_member = ws_members.contains(&id);
let edition = edition
.parse::<Edition>()
.with_context(|| format!("Failed to parse edition {}", edition))?;
let pkg = packages.alloc(PackageData {
name,
manifest: manifest_path,
targets: Vec::new(),
is_member,
edition,
dependencies: Vec::new(),
features: Vec::new(),
out_dir: out_dir_by_id.get(&id).cloned(),
proc_macro_dylib_path: proc_macro_dylib_paths.get(&id).cloned(),
});
let pkg_data = &mut packages[pkg];
pkg_by_id.insert(id, pkg);
for meta_tgt in meta_pkg.targets {
let is_proc_macro = meta_tgt.kind.as_slice() == ["proc-macro"];
let tgt = targets.alloc(TargetData {
package: pkg,
name: meta_tgt.name,
root: meta_tgt.src_path.clone(),
kind: TargetKind::new(meta_tgt.kind.as_slice()),
is_proc_macro,
});
pkg_data.targets.push(tgt);
}
}
let resolve = meta.resolve.expect("metadata executed with deps");
for node in resolve.nodes {
let source = match pkg_by_id.get(&node.id) {
Some(&src) => src,
// FIXME: replace this and a similar branch below with `.unwrap`, once
// https://github.com/rust-lang/cargo/issues/7841
// is fixed and hits stable (around 1.43-is probably?).
None => {
log::error!("Node id do not match in cargo metadata, ignoring {}", node.id);
continue;
}
};
for dep_node in node.deps {
let pkg = match pkg_by_id.get(&dep_node.pkg) {
Some(&pkg) => pkg,
None => {
log::error!(
"Dep node id do not match in cargo metadata, ignoring {}",
dep_node.pkg
);
continue;
}
};
let dep = PackageDependency { name: dep_node.name, pkg };
packages[source].dependencies.push(dep);
}
packages[source].features.extend(node.features);
}
Ok(CargoWorkspace { packages, targets, workspace_root: meta.workspace_root })
}
pub fn packages<'a>(&'a self) -> impl Iterator<Item = Package> + ExactSizeIterator + 'a {
self.packages.iter().map(|(id, _pkg)| id)
}
pub fn target_by_root(&self, root: &Path) -> Option<Target> {
self.packages()
.filter_map(|pkg| self[pkg].targets.iter().find(|&&it| self[it].root == root))
.next()
.copied()
}
pub fn workspace_root(&self) -> &Path {
&self.workspace_root
}
}
#[derive(Debug, Clone, Default)]
pub struct ExternResources {
out_dirs: FxHashMap<PackageId, PathBuf>,
proc_dylib_paths: FxHashMap<PackageId, PathBuf>,
}
pub fn load_extern_resources(cargo_toml: &Path, cargo_features: &CargoFeatures) -> ExternResources {
let mut args: Vec<String> = vec![
"check".to_string(),
"--message-format=json".to_string(),
"--manifest-path".to_string(),
cargo_toml.display().to_string(),
];
if cargo_features.all_features {
args.push("--all-features".to_string());
} else if cargo_features.no_default_features {
// FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures`
// https://github.com/oli-obk/cargo_metadata/issues/79
args.push("--no-default-features".to_string());
} else {
args.extend(cargo_features.features.iter().cloned());
}
let mut acc = ExternResources::default();
let res = run_cargo(&args, cargo_toml.parent(), &mut |message| {
match message {
Message::BuildScriptExecuted(BuildScript { package_id, out_dir,.. }) => {
acc.out_dirs.insert(package_id, out_dir);
}
Message::CompilerArtifact(message) => {
if message.target.kind.contains(&"proc-macro".to_string()) {
let package_id = message.package_id;
if let Some(filename) = message.filenames.get(0) {
acc.proc_dylib_paths.insert(package_id, filename.clone());
}
}
}
Message::CompilerMessage(_) => (),
Message::Unknown => (),
}
true
});
if let Err(err) = res {
log::error!("Failed to load outdirs: {:?}", err);
}
acc
}
| CargoFeatures | identifier_name |
main.rs | use std::env;
use tokio::stream::StreamExt;
use twilight::{
cache::{
twilight_cache_inmemory::config::{EventType, InMemoryConfigBuilder},
InMemoryCache,
},
gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig},
gateway::shard::Event,
http::Client as HttpClient,
model::{
channel::{Channel, Message},
gateway::GatewayIntents,
id::{ChannelId, GuildId, UserId},
user::CurrentUser,
},
};
mod channel;
mod reaction;
mod role;
mod roles;
mod state;
mod theme;
mod utils;
use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels};
use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType};
use role::{handle_give_role, handle_remove_role, has_role};
use roles::ORGANIZER;
use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count};
use utils::{Result, send_message};
#[tokio::main]
async fn main() -> Result<()> {
dotenv::dotenv().ok();
let token = env::var("DISCORD_TOKEN")?;
// This is also the default.
let scheme = ShardScheme::Auto;
let config = ClusterConfig::builder(&token)
.shard_scheme(scheme)
// Use intents to only listen to GUILD_MESSAGES events
.intents(Some(
GatewayIntents::GUILD_MESSAGES
| GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::GUILD_MESSAGE_REACTIONS,
))
.build();
// Start up the cluster
let cluster = Cluster::new(config);
cluster.up().await?;
// The http client is seperate from the gateway,
// so startup a new one
let http = HttpClient::new(&token);
// Since we only care about messages and reactions, make
// the cache only cache message and reaction related events
let cache_config = InMemoryConfigBuilder::new()
.event_types(
EventType::MESSAGE_CREATE
| EventType::MESSAGE_DELETE
| EventType::MESSAGE_DELETE_BULK
| EventType::MESSAGE_UPDATE
| EventType::REACTION_ADD
| EventType::REACTION_REMOVE,
)
.build();
let cache = InMemoryCache::from(cache_config);
let mut events = cluster.events().await;
let current_user = http.current_user().await?;
// Startup an event loop for each event in the event stream
while let Some(event) = events.next().await {
// Update the cache
cache.update(&event.1).await.expect("Cache failed, OhNoe!");
// Spawn a new task to handle the event
handle_event(event, http.clone(), ¤t_user).await?;
}
Ok(())
}
/// Checks if the specified channel is a private message channel
async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> {
match http.channel(channel_id).await?.unwrap() {
Channel::Private(_) => Ok(true),
_ => Ok(false)
}
}
async fn handle_event(
event: (u64, Event),
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
match event {
(_, Event::MessageCreate(msg)) => {
// Don't send replies to yourself
if msg.author.id!= current_user.id {
if is_pm(&http, msg.channel_id).await? {
handle_pm(&msg, &http).await?;
}
else {
handle_potential_command(&msg, http, current_user)
.await?;
}
}
}
(_, Event::ReactionAdd(reaction)) => {
if!is_pm(&http, reaction.channel_id).await? {
handle_reaction_add(&reaction, http, ¤t_user).await?;
}
}
(_, Event::ReactionRemove(reaction)) => {
if!is_pm(&http, reaction.channel_id).await? {
handle_reaction_remove(&reaction, http).await?;
}
}
(id, Event::ShardConnected(_)) => {
println!("Connected on shard {}", id);
}
_ => {}
}
Ok(())
}
async fn handle_pm(
msg: &Message,
http: &HttpClient,
) -> Result<()> {
handle_add_theme(http, msg).await?;
Ok(())
}
async fn handle_potential_command(
msg: &Message,
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
let mut words = msg.content.split_ascii_whitespace();
match words.next() {
Some("!help") => {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to call for help in non-guild"),
).await?;
}
Some("!createchannels") => {
handle_create_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to create channels in non-guild"),
msg.author.id,
current_user.id,
http
).await?;
},
Some("!renamechannels") => {
handle_rename_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.author.id,
current_user.id,
http
).await?;
},
Some("!removechannels") => {
handle_remove_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to remove channels in non-guild"),
msg.author.id,
http
).await?;
},
Some("!clearassociations") => {
handle_clear_channel_associations(
msg.channel_id,
msg.guild_id.expect("Tried to clear channel associations in non-guild"),
msg.author.id,
http,
).await?;
}
Some("!role") => {
handle_give_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to get role in non-guild"),
&msg.author,
http
).await?;
},
Some("!leave") => {
handle_remove_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to leave role in non-guild"),
&msg.author,
http
).await?;
},
Some("!generatetheme") => {
handle_generate_theme(
msg.channel_id,
msg.guild_id.expect("Tried to generate theme in non-guild"),
&msg.author,
http
).await?;
}
Some("!showallthemes") => {
handle_show_all_themes(
msg.channel_id,
msg.guild_id.expect("Tried to show all themes in non-guild"),
&msg.author,
http
).await?;
}
Some("!showthemecount") => |
Some("!setroleassign") => {
handle_set_reaction_message(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to set role assignment message in non-guild"),
&msg.author,
http,
msg,
ReactionMessageType::RoleAssign,
).await?;
}
Some(s) if s.chars().next() == Some('!') => {
send_message(&http, msg.channel_id, msg.author.id,
format!("Unrecognised command `{}`.", s)
).await?;
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to issue a command in non-guild"),
).await?;
}
// Not a command and probably not for us
Some(_) => {
// Check if we were mentioned
if msg.mentions.contains_key(¤t_user.id) {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to mention us in non-guild"),
).await?;
}
}
None => {}
}
Ok(())
}
async fn send_help_message(
http: HttpClient,
channel_id: ChannelId,
user_id: UserId,
guild_id: GuildId,
) -> Result<()> {
let standard_message =
//"Send me a PM to submit theme ideas.\n\n\
"Get a role to signify one of your skill sets with the command `!role <role name>`\n\
and leave a role with `!leave <role name>`.\n\n\
You can also ask for text and voice channels for your game \
with the command `!createchannels <game name>`\n\
and rename them with `!renamechannels <new game name>`.";
let organizer_message = format!(
"Since you have the **{}** role, you also have access to the \
following commands:\n\
- `!generatetheme` to generate a theme.\n\
- `!showallthemes` to view all the theme ideas that have been submitted.\n\
- `!showthemecount` to see the number of theme ideas that have been submitted.\n\
- `!removechannels <mention of user>` to remove a user's created channel.\n\
- `!clearassociations` to clear all user–channel associations.\n\
- `!setroleassign <mention of channel with the message> <message ID>` to \
set the server's role assignment message.", ORGANIZER
);
let help_message =
if has_role(&http, guild_id, user_id, ORGANIZER).await? {
format!("{}\n\n{}", standard_message, organizer_message)
}
else {
standard_message.to_string()
};
send_message(&http, channel_id, user_id, help_message).await?;
Ok(())
}
| {
handle_show_theme_count(
msg.channel_id,
msg.guild_id.expect("Tried to show theme idea count in non-guild"),
&msg.author,
http
).await?;
} | conditional_block |
main.rs | use std::env;
use tokio::stream::StreamExt;
use twilight::{
cache::{
twilight_cache_inmemory::config::{EventType, InMemoryConfigBuilder},
InMemoryCache,
},
gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig},
gateway::shard::Event,
http::Client as HttpClient,
model::{
channel::{Channel, Message},
gateway::GatewayIntents,
id::{ChannelId, GuildId, UserId},
user::CurrentUser,
},
};
mod channel;
mod reaction;
mod role;
mod roles;
mod state;
mod theme;
mod utils;
use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels};
use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType};
use role::{handle_give_role, handle_remove_role, has_role};
use roles::ORGANIZER;
use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count};
use utils::{Result, send_message};
#[tokio::main]
async fn main() -> Result<()> {
dotenv::dotenv().ok();
let token = env::var("DISCORD_TOKEN")?;
// This is also the default.
let scheme = ShardScheme::Auto;
let config = ClusterConfig::builder(&token)
.shard_scheme(scheme)
// Use intents to only listen to GUILD_MESSAGES events
.intents(Some(
GatewayIntents::GUILD_MESSAGES
| GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::GUILD_MESSAGE_REACTIONS,
))
.build();
// Start up the cluster
let cluster = Cluster::new(config);
cluster.up().await?;
// The http client is seperate from the gateway,
// so startup a new one
let http = HttpClient::new(&token);
// Since we only care about messages and reactions, make
// the cache only cache message and reaction related events
let cache_config = InMemoryConfigBuilder::new()
.event_types(
EventType::MESSAGE_CREATE
| EventType::MESSAGE_DELETE
| EventType::MESSAGE_DELETE_BULK
| EventType::MESSAGE_UPDATE
| EventType::REACTION_ADD
| EventType::REACTION_REMOVE,
)
.build();
let cache = InMemoryCache::from(cache_config);
let mut events = cluster.events().await;
let current_user = http.current_user().await?;
// Startup an event loop for each event in the event stream
while let Some(event) = events.next().await {
// Update the cache
cache.update(&event.1).await.expect("Cache failed, OhNoe!");
// Spawn a new task to handle the event
handle_event(event, http.clone(), ¤t_user).await?;
}
Ok(())
}
/// Checks if the specified channel is a private message channel
async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> {
match http.channel(channel_id).await?.unwrap() {
Channel::Private(_) => Ok(true),
_ => Ok(false)
}
}
async fn handle_event(
event: (u64, Event),
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
match event {
(_, Event::MessageCreate(msg)) => {
// Don't send replies to yourself
if msg.author.id!= current_user.id {
if is_pm(&http, msg.channel_id).await? {
handle_pm(&msg, &http).await?;
}
else {
handle_potential_command(&msg, http, current_user)
.await?;
}
}
}
(_, Event::ReactionAdd(reaction)) => {
if!is_pm(&http, reaction.channel_id).await? {
handle_reaction_add(&reaction, http, ¤t_user).await?;
}
}
(_, Event::ReactionRemove(reaction)) => {
if!is_pm(&http, reaction.channel_id).await? {
handle_reaction_remove(&reaction, http).await?;
}
}
(id, Event::ShardConnected(_)) => {
println!("Connected on shard {}", id);
}
_ => {}
}
Ok(())
}
async fn handle_pm(
msg: &Message,
http: &HttpClient,
) -> Result<()> {
handle_add_theme(http, msg).await?;
Ok(())
}
async fn handle_potential_command(
msg: &Message,
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
let mut words = msg.content.split_ascii_whitespace();
match words.next() {
Some("!help") => {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to call for help in non-guild"),
).await?;
}
Some("!createchannels") => {
handle_create_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to create channels in non-guild"),
msg.author.id,
current_user.id,
http
).await?;
},
Some("!renamechannels") => {
handle_rename_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.author.id,
current_user.id,
http
).await?;
},
Some("!removechannels") => {
handle_remove_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to remove channels in non-guild"),
msg.author.id,
http
).await?;
},
Some("!clearassociations") => {
handle_clear_channel_associations(
msg.channel_id,
msg.guild_id.expect("Tried to clear channel associations in non-guild"),
msg.author.id,
http,
).await?;
}
Some("!role") => {
handle_give_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to get role in non-guild"),
&msg.author,
http
).await?;
},
Some("!leave") => {
handle_remove_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to leave role in non-guild"),
&msg.author,
http
).await?;
},
Some("!generatetheme") => {
handle_generate_theme(
msg.channel_id,
msg.guild_id.expect("Tried to generate theme in non-guild"),
&msg.author,
http
).await?;
}
Some("!showallthemes") => {
handle_show_all_themes(
msg.channel_id,
msg.guild_id.expect("Tried to show all themes in non-guild"),
&msg.author,
http
).await?;
}
Some("!showthemecount") => {
handle_show_theme_count(
msg.channel_id,
msg.guild_id.expect("Tried to show theme idea count in non-guild"),
&msg.author,
http
).await?;
}
Some("!setroleassign") => {
handle_set_reaction_message(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to set role assignment message in non-guild"),
&msg.author,
http,
msg,
ReactionMessageType::RoleAssign,
).await?;
}
Some(s) if s.chars().next() == Some('!') => {
send_message(&http, msg.channel_id, msg.author.id,
format!("Unrecognised command `{}`.", s)
).await?;
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to issue a command in non-guild"),
).await?;
}
// Not a command and probably not for us
Some(_) => {
// Check if we were mentioned
if msg.mentions.contains_key(¤t_user.id) {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to mention us in non-guild"),
).await?;
}
}
None => {}
}
Ok(())
}
async fn | (
http: HttpClient,
channel_id: ChannelId,
user_id: UserId,
guild_id: GuildId,
) -> Result<()> {
let standard_message =
//"Send me a PM to submit theme ideas.\n\n\
"Get a role to signify one of your skill sets with the command `!role <role name>`\n\
and leave a role with `!leave <role name>`.\n\n\
You can also ask for text and voice channels for your game \
with the command `!createchannels <game name>`\n\
and rename them with `!renamechannels <new game name>`.";
let organizer_message = format!(
"Since you have the **{}** role, you also have access to the \
following commands:\n\
- `!generatetheme` to generate a theme.\n\
- `!showallthemes` to view all the theme ideas that have been submitted.\n\
- `!showthemecount` to see the number of theme ideas that have been submitted.\n\
- `!removechannels <mention of user>` to remove a user's created channel.\n\
- `!clearassociations` to clear all user–channel associations.\n\
- `!setroleassign <mention of channel with the message> <message ID>` to \
set the server's role assignment message.", ORGANIZER
);
let help_message =
if has_role(&http, guild_id, user_id, ORGANIZER).await? {
format!("{}\n\n{}", standard_message, organizer_message)
}
else {
standard_message.to_string()
};
send_message(&http, channel_id, user_id, help_message).await?;
Ok(())
}
| send_help_message | identifier_name |
main.rs | use std::env;
use tokio::stream::StreamExt;
use twilight::{
cache::{
twilight_cache_inmemory::config::{EventType, InMemoryConfigBuilder},
InMemoryCache,
},
gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig},
gateway::shard::Event,
http::Client as HttpClient,
model::{
channel::{Channel, Message},
gateway::GatewayIntents,
id::{ChannelId, GuildId, UserId},
user::CurrentUser,
},
};
mod channel;
mod reaction;
mod role;
mod roles;
mod state;
mod theme;
mod utils;
use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels};
use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType};
use role::{handle_give_role, handle_remove_role, has_role};
use roles::ORGANIZER;
use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count};
use utils::{Result, send_message};
#[tokio::main]
async fn main() -> Result<()> {
dotenv::dotenv().ok();
let token = env::var("DISCORD_TOKEN")?;
// This is also the default.
let scheme = ShardScheme::Auto;
let config = ClusterConfig::builder(&token)
.shard_scheme(scheme)
// Use intents to only listen to GUILD_MESSAGES events
.intents(Some(
GatewayIntents::GUILD_MESSAGES
| GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::GUILD_MESSAGE_REACTIONS,
))
.build();
// Start up the cluster
let cluster = Cluster::new(config);
cluster.up().await?;
// The http client is seperate from the gateway,
// so startup a new one
let http = HttpClient::new(&token);
// Since we only care about messages and reactions, make
// the cache only cache message and reaction related events
let cache_config = InMemoryConfigBuilder::new()
.event_types(
EventType::MESSAGE_CREATE
| EventType::MESSAGE_DELETE
| EventType::MESSAGE_DELETE_BULK
| EventType::MESSAGE_UPDATE
| EventType::REACTION_ADD
| EventType::REACTION_REMOVE,
)
.build();
let cache = InMemoryCache::from(cache_config);
let mut events = cluster.events().await;
let current_user = http.current_user().await?;
// Startup an event loop for each event in the event stream
while let Some(event) = events.next().await {
// Update the cache
cache.update(&event.1).await.expect("Cache failed, OhNoe!");
// Spawn a new task to handle the event
handle_event(event, http.clone(), ¤t_user).await?;
}
Ok(())
}
/// Checks if the specified channel is a private message channel
async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> |
async fn handle_event(
event: (u64, Event),
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
match event {
(_, Event::MessageCreate(msg)) => {
// Don't send replies to yourself
if msg.author.id!= current_user.id {
if is_pm(&http, msg.channel_id).await? {
handle_pm(&msg, &http).await?;
}
else {
handle_potential_command(&msg, http, current_user)
.await?;
}
}
}
(_, Event::ReactionAdd(reaction)) => {
if!is_pm(&http, reaction.channel_id).await? {
handle_reaction_add(&reaction, http, ¤t_user).await?;
}
}
(_, Event::ReactionRemove(reaction)) => {
if!is_pm(&http, reaction.channel_id).await? {
handle_reaction_remove(&reaction, http).await?;
}
}
(id, Event::ShardConnected(_)) => {
println!("Connected on shard {}", id);
}
_ => {}
}
Ok(())
}
async fn handle_pm(
msg: &Message,
http: &HttpClient,
) -> Result<()> {
handle_add_theme(http, msg).await?;
Ok(())
}
async fn handle_potential_command(
msg: &Message,
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
let mut words = msg.content.split_ascii_whitespace();
match words.next() {
Some("!help") => {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to call for help in non-guild"),
).await?;
}
Some("!createchannels") => {
handle_create_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to create channels in non-guild"),
msg.author.id,
current_user.id,
http
).await?;
},
Some("!renamechannels") => {
handle_rename_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.author.id,
current_user.id,
http
).await?;
},
Some("!removechannels") => {
handle_remove_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to remove channels in non-guild"),
msg.author.id,
http
).await?;
},
Some("!clearassociations") => {
handle_clear_channel_associations(
msg.channel_id,
msg.guild_id.expect("Tried to clear channel associations in non-guild"),
msg.author.id,
http,
).await?;
}
Some("!role") => {
handle_give_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to get role in non-guild"),
&msg.author,
http
).await?;
},
Some("!leave") => {
handle_remove_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to leave role in non-guild"),
&msg.author,
http
).await?;
},
Some("!generatetheme") => {
handle_generate_theme(
msg.channel_id,
msg.guild_id.expect("Tried to generate theme in non-guild"),
&msg.author,
http
).await?;
}
Some("!showallthemes") => {
handle_show_all_themes(
msg.channel_id,
msg.guild_id.expect("Tried to show all themes in non-guild"),
&msg.author,
http
).await?;
}
Some("!showthemecount") => {
handle_show_theme_count(
msg.channel_id,
msg.guild_id.expect("Tried to show theme idea count in non-guild"),
&msg.author,
http
).await?;
}
Some("!setroleassign") => {
handle_set_reaction_message(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to set role assignment message in non-guild"),
&msg.author,
http,
msg,
ReactionMessageType::RoleAssign,
).await?;
}
Some(s) if s.chars().next() == Some('!') => {
send_message(&http, msg.channel_id, msg.author.id,
format!("Unrecognised command `{}`.", s)
).await?;
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to issue a command in non-guild"),
).await?;
}
// Not a command and probably not for us
Some(_) => {
// Check if we were mentioned
if msg.mentions.contains_key(¤t_user.id) {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to mention us in non-guild"),
).await?;
}
}
None => {}
}
Ok(())
}
async fn send_help_message(
http: HttpClient,
channel_id: ChannelId,
user_id: UserId,
guild_id: GuildId,
) -> Result<()> {
let standard_message =
//"Send me a PM to submit theme ideas.\n\n\
"Get a role to signify one of your skill sets with the command `!role <role name>`\n\
and leave a role with `!leave <role name>`.\n\n\
You can also ask for text and voice channels for your game \
with the command `!createchannels <game name>`\n\
and rename them with `!renamechannels <new game name>`.";
let organizer_message = format!(
"Since you have the **{}** role, you also have access to the \
following commands:\n\
- `!generatetheme` to generate a theme.\n\
- `!showallthemes` to view all the theme ideas that have been submitted.\n\
- `!showthemecount` to see the number of theme ideas that have been submitted.\n\
- `!removechannels <mention of user>` to remove a user's created channel.\n\
- `!clearassociations` to clear all user–channel associations.\n\
- `!setroleassign <mention of channel with the message> <message ID>` to \
set the server's role assignment message.", ORGANIZER
);
let help_message =
if has_role(&http, guild_id, user_id, ORGANIZER).await? {
format!("{}\n\n{}", standard_message, organizer_message)
}
else {
standard_message.to_string()
};
send_message(&http, channel_id, user_id, help_message).await?;
Ok(())
}
| {
match http.channel(channel_id).await?.unwrap() {
Channel::Private(_) => Ok(true),
_ => Ok(false)
}
} | identifier_body |
main.rs | use std::env;
use tokio::stream::StreamExt;
use twilight::{
cache::{
twilight_cache_inmemory::config::{EventType, InMemoryConfigBuilder},
InMemoryCache,
},
gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig},
gateway::shard::Event,
http::Client as HttpClient,
model::{
channel::{Channel, Message},
gateway::GatewayIntents,
id::{ChannelId, GuildId, UserId},
user::CurrentUser,
},
};
mod channel;
mod reaction;
mod role;
mod roles;
mod state;
mod theme;
mod utils;
use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels};
use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType};
use role::{handle_give_role, handle_remove_role, has_role};
use roles::ORGANIZER;
use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count};
use utils::{Result, send_message};
#[tokio::main]
async fn main() -> Result<()> {
dotenv::dotenv().ok();
let token = env::var("DISCORD_TOKEN")?;
// This is also the default.
let scheme = ShardScheme::Auto;
let config = ClusterConfig::builder(&token)
.shard_scheme(scheme)
// Use intents to only listen to GUILD_MESSAGES events
.intents(Some(
GatewayIntents::GUILD_MESSAGES
| GatewayIntents::DIRECT_MESSAGES
| GatewayIntents::GUILD_MESSAGE_REACTIONS,
))
.build();
// Start up the cluster
let cluster = Cluster::new(config);
cluster.up().await?;
// The http client is seperate from the gateway,
// so startup a new one
let http = HttpClient::new(&token);
// Since we only care about messages and reactions, make
// the cache only cache message and reaction related events
let cache_config = InMemoryConfigBuilder::new()
.event_types(
EventType::MESSAGE_CREATE
| EventType::MESSAGE_DELETE
| EventType::MESSAGE_DELETE_BULK
| EventType::MESSAGE_UPDATE
| EventType::REACTION_ADD
| EventType::REACTION_REMOVE,
)
.build();
let cache = InMemoryCache::from(cache_config);
let mut events = cluster.events().await;
let current_user = http.current_user().await?;
// Startup an event loop for each event in the event stream
while let Some(event) = events.next().await {
// Update the cache
cache.update(&event.1).await.expect("Cache failed, OhNoe!");
// Spawn a new task to handle the event
handle_event(event, http.clone(), ¤t_user).await?;
}
Ok(())
}
/// Checks if the specified channel is a private message channel
async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> {
match http.channel(channel_id).await?.unwrap() {
Channel::Private(_) => Ok(true),
_ => Ok(false)
}
}
async fn handle_event(
event: (u64, Event),
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
match event {
(_, Event::MessageCreate(msg)) => {
// Don't send replies to yourself
if msg.author.id!= current_user.id {
if is_pm(&http, msg.channel_id).await? {
handle_pm(&msg, &http).await?;
}
else {
handle_potential_command(&msg, http, current_user)
.await?;
}
}
}
(_, Event::ReactionAdd(reaction)) => {
if!is_pm(&http, reaction.channel_id).await? {
handle_reaction_add(&reaction, http, ¤t_user).await?;
}
}
(_, Event::ReactionRemove(reaction)) => {
if!is_pm(&http, reaction.channel_id).await? {
handle_reaction_remove(&reaction, http).await?;
}
}
(id, Event::ShardConnected(_)) => {
println!("Connected on shard {}", id);
}
_ => {}
}
Ok(())
}
async fn handle_pm(
msg: &Message,
http: &HttpClient,
) -> Result<()> {
handle_add_theme(http, msg).await?;
Ok(())
}
async fn handle_potential_command(
msg: &Message,
http: HttpClient,
current_user: &CurrentUser
) -> Result<()> {
let mut words = msg.content.split_ascii_whitespace();
match words.next() {
Some("!help") => {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to call for help in non-guild"),
).await?;
}
Some("!createchannels") => {
handle_create_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to create channels in non-guild"),
msg.author.id,
current_user.id,
http
).await?;
},
Some("!renamechannels") => {
handle_rename_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.author.id,
current_user.id,
http
).await?;
},
Some("!removechannels") => {
handle_remove_channels(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to remove channels in non-guild"),
msg.author.id,
http
).await?;
},
Some("!clearassociations") => {
handle_clear_channel_associations(
msg.channel_id,
msg.guild_id.expect("Tried to clear channel associations in non-guild"),
msg.author.id,
http,
).await?;
}
Some("!role") => {
handle_give_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to get role in non-guild"),
&msg.author,
http
).await?;
},
Some("!leave") => {
handle_remove_role(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to leave role in non-guild"),
&msg.author,
http
).await?;
},
Some("!generatetheme") => {
handle_generate_theme(
msg.channel_id,
msg.guild_id.expect("Tried to generate theme in non-guild"),
&msg.author,
http
).await?;
}
Some("!showallthemes") => {
handle_show_all_themes(
msg.channel_id,
msg.guild_id.expect("Tried to show all themes in non-guild"),
&msg.author,
http
).await?;
}
Some("!showthemecount") => {
handle_show_theme_count(
msg.channel_id,
msg.guild_id.expect("Tried to show theme idea count in non-guild"),
&msg.author,
http
).await?;
}
Some("!setroleassign") => {
handle_set_reaction_message(
&words.collect::<Vec<_>>(),
msg.channel_id,
msg.guild_id.expect("Tried to set role assignment message in non-guild"),
&msg.author,
http,
msg,
ReactionMessageType::RoleAssign,
).await?;
}
Some(s) if s.chars().next() == Some('!') => {
send_message(&http, msg.channel_id, msg.author.id,
format!("Unrecognised command `{}`.", s)
).await?;
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to issue a command in non-guild"),
).await?;
}
// Not a command and probably not for us
Some(_) => {
// Check if we were mentioned
if msg.mentions.contains_key(¤t_user.id) {
send_help_message(
http,
msg.channel_id,
msg.author.id,
msg.guild_id.expect("Tried to mention us in non-guild"),
).await?;
}
}
None => {}
}
Ok(())
}
async fn send_help_message(
http: HttpClient,
channel_id: ChannelId,
user_id: UserId,
guild_id: GuildId,
) -> Result<()> {
let standard_message =
//"Send me a PM to submit theme ideas.\n\n\
"Get a role to signify one of your skill sets with the command `!role <role name>`\n\
and leave a role with `!leave <role name>`.\n\n\
You can also ask for text and voice channels for your game \
with the command `!createchannels <game name>`\n\
and rename them with `!renamechannels <new game name>`.";
let organizer_message = format!(
"Since you have the **{}** role, you also have access to the \
following commands:\n\
- `!generatetheme` to generate a theme.\n\
- `!showallthemes` to view all the theme ideas that have been submitted.\n\
- `!showthemecount` to see the number of theme ideas that have been submitted.\n\
- `!removechannels <mention of user>` to remove a user's created channel.\n\
- `!clearassociations` to clear all user–channel associations.\n\
- `!setroleassign <mention of channel with the message> <message ID>` to \
set the server's role assignment message.", ORGANIZER
);
let help_message = | };
send_message(&http, channel_id, user_id, help_message).await?;
Ok(())
} | if has_role(&http, guild_id, user_id, ORGANIZER).await? {
format!("{}\n\n{}", standard_message, organizer_message)
}
else {
standard_message.to_string() | random_line_split |
main.rs | use futures::future::join_all;
use generational_arena::{Arena, Index};
use nalgebra::Vector2;
use std::env::args;
use std::error::Error;
use std::net::SocketAddr;
use std::num::Wrapping;
use std::time::Duration;
use std::time::SystemTime;
use tokio::net::{TcpListener, TcpStream, UdpSocket};
use tokio::prelude::*;
use tokio::sync::mpsc::{channel, Sender};
use tokio::time::interval;
use game;
#[derive(Debug)]
struct Player {
player: game::Player,
tcp_tx: Sender<game::TcpClientMessage>,
random_bytes: [u8; game::NUM_RANDOM_BYTES],
udp_addr: Option<SocketAddr>,
input: Vector2<f64>,
angle: f64,
firing: bool,
fire_counter: f64,
}
#[derive(Debug)]
struct Bullet {
bullet: game::Bullet,
velocity: Vector2<f64>,
lifetime: f64,
}
fn accept(
players: &mut Arena<Player>,
bullets: &Arena<Bullet>,
stream: TcpStream,
mut internal_tcp_tx: Sender<(Index, Option<game::TcpServerMessage>)>,
tick_rate: u32,
tick_zero: SystemTime,
tick: game::Tick,
) {
println!("connection!");
let (tx, mut rx) = channel(4);
let idx = match players.try_insert(Player {
player: game::Player {
id: 0, // Set below.
radius: 1.0,
position: Vector2::new(0.0, 0.0),
velocity: Vector2::new(0.0, 0.0),
},
tcp_tx: tx,
udp_addr: None,
random_bytes: rand::random(),
input: Vector2::new(0.0, 0.0),
angle: 0.0,
firing: false,
fire_counter: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
println!("rejecting connection; too many players");
return;
}
}; | // Set the user ID to some combinaiton of the arena index and generation.
let id = idx.into_raw_parts().0 as u8;
players[idx].player.id = id;
// TODO(jack) Broadcast PlayerLeft messages.
// Broadcast PlayerJoined messages.
let mut tcp_txs: Vec<_> = players
.iter()
.filter(|(other_idx, _)| *other_idx!= idx)
.map(|(_, p)| p.tcp_tx.clone())
.collect();
let msg = game::TcpClientMessage::PlayerJoined(id);
tokio::spawn(async move {
let join_handles = tcp_txs.iter_mut().map(|tcp_tx| tcp_tx.send(msg.clone()));
join_all(join_handles).await;
});
// Start tasks to read-from / write-to the TCP socket.
let (mut reader, mut writer) = stream.into_split();
tokio::spawn(async move {
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE];
let num_bytes = match reader.read(&mut buf).await {
Ok(0) => break,
Ok(MAX_PACKET_SIZE_PLUS_ONE) => break,
Err(err) => {
eprintln!("{}", err);
break;
}
Ok(num_bytes) => num_bytes,
};
match bincode::deserialize(&buf[..num_bytes]) {
Ok(msg) => match internal_tcp_tx.send((idx, Some(msg))).await {
Ok(_) => (),
Err(_) => break,
},
Err(err) => {
eprintln!("{}", err);
break;
}
};
}
// One consequence of every client publishing TCP packets to the same channel
// is that we don't know when any one disconnects.
// We signal it here with a `None`.
internal_tcp_tx.send((idx, None)).await.ok();
});
let random_bytes = players[idx].random_bytes.clone();
let update = game::WorldUpdate {
tick,
players: players.iter().map(|(_, p)| p.player).collect(),
bullets: bullets.iter().map(|(_, b)| b.bullet).collect(),
};
tokio::spawn(async move {
// Send the init packet.
// For now, this will just include a random sequence of bytes.
// We'll then wait for the random sequence of bytes via UDP to identify the client's external port number.
let bytes = bincode::serialize(&game::TcpClientMessage::Init(game::ClientInit {
id,
random_bytes,
update,
tick_rate: tick_rate as u8,
tick_zero,
}))
.unwrap();
if let Err(err) = writer.write_all(&bytes[..]).await {
eprintln!("{}", err);
return;
}
println!("wrote init message");
loop {
match rx.recv().await {
Some(msg) => {
if let Err(_) = writer
.write_all(bincode::serialize(&msg).unwrap().as_slice())
.await
{
break;
}
}
None => break,
};
}
});
}
fn step(players: &mut Arena<Player>, bullets: &mut Arena<Bullet>, dt: f64) {
// Apply player impulse.
for (_, player) in players.iter_mut() {
let acceleration = 64.0;
let max_velocity = 16.0;
let friction = 16.0;
// Acceleration ranges from `friction` to `friction + acceleration`,
// and is inversely proportional to the projection of the current velocity onto the input vector.
let acceleration_index = player.player.velocity.dot(&player.input) / max_velocity;
let acceleration_index = if acceleration_index < 0.0 {
0.0
} else {
acceleration_index.sqrt()
};
let adjusted_acceleration = friction + acceleration * (1.0 - acceleration_index);
player.player.velocity += adjusted_acceleration * dt * player.input;
let dampened_velocity_unclamped = player.player.velocity.magnitude() - dt * friction;
let dampened_velocity = if dampened_velocity_unclamped < 0.0 {
0.0
} else {
dampened_velocity_unclamped
};
let velocity_unit = player
.player
.velocity
.try_normalize(0.0)
.unwrap_or(Vector2::new(0.0, 0.0));
player.player.velocity = dampened_velocity * velocity_unit;
player.player.position += dt * player.player.velocity;
}
// Remove expired bullets.
let bullets_to_remove: Vec<_> = bullets
.iter()
.filter(|(_, b)| b.lifetime > 1.0)
.map(|(idx, _)| idx)
.collect();
for idx in bullets_to_remove.iter() {
bullets.remove(*idx);
}
// Fire bullets.
for (_, player) in players.iter_mut().filter(|(_, p)| p.firing) {
let rof = 30.0;
player.fire_counter += rof * dt;
if player.fire_counter >= 1.0 {
player.fire_counter %= 1.0;
let idx = match bullets.try_insert(Bullet {
bullet: game::Bullet {
id: 0,
player_id: player.player.id,
position: player.player.position,
angle: player.angle,
radius: 0.5,
},
velocity: 32.0 * Vector2::new(player.angle.cos(), player.angle.sin()),
lifetime: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
eprintln!("too many bullets!");
break;
}
};
// Set the user ID to the arena index.
let raw_parts = idx.into_raw_parts();
bullets[idx].bullet.id = ((raw_parts.0 & 10) | ((raw_parts.1 as usize) << 10)) as u16;
}
}
// Update bullets.
for (_, bullet) in bullets.iter_mut() {
bullet.bullet.position += dt * bullet.velocity;
bullet.lifetime += dt;
}
// Manage collisions.
// We have to collect the idxs to avoid borrowing `players`.
let idxs: Vec<_> = players.iter().map(|(idx, _)| idx).collect();
let idx_pairs = idxs
.iter()
.map(|a| idxs.iter().map(move |b| (a, b)))
.flatten()
.filter(|(a, b)| a.into_raw_parts().0 < b.into_raw_parts().0);
for (a, b) in idx_pairs {
let (a, b) = players.get2_mut(*a, *b);
let a = a.unwrap();
let b = b.unwrap();
let distance = match a.player.position - b.player.position {
v if v.x == 0.0 && v.y == 0.0 => Vector2::new(0.001, 0.001),
v => v,
};
let max_distance = a.player.radius + b.player.radius;
if distance.magnitude_squared() >= max_distance.powi(2) {
continue; // No collision.
}
let displacement_unit = distance.try_normalize(0.0).unwrap();
let displacement = displacement_unit * (max_distance - distance.magnitude());
a.player.position += 0.5 * displacement;
b.player.position += -0.5 * displacement;
let momentum = a.player.velocity.magnitude() + b.player.velocity.magnitude();
let elasticity = 2.0;
a.player.velocity = 0.5 * elasticity * momentum * displacement_unit;
b.player.velocity = -0.5 * elasticity * momentum * displacement_unit;
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let port: u32 = match args().nth(1).and_then(|s| s.parse().ok()) {
Some(port) => port,
None => {
eprintln!("Usage: {} PORT", args().nth(0).unwrap());
return Ok(());
}
};
let mut players = Arena::with_capacity(16);
let mut bullets = Arena::with_capacity(1024);
let tick_rate = 60;
let mut ticker = interval(Duration::from_secs(1) / tick_rate);
let snapshot_rate = 10;
let mut snapshot_ticker = interval(Duration::from_secs(1) / snapshot_rate);
let mut tcp_listener = TcpListener::bind(format!("0.0.0.0:{}", port)).await?;
let mut udp_socket = UdpSocket::bind(format!("0.0.0.0:{}", port)).await?;
// tcp_rx is our global receiver for TCP events.
// This means that each player holds a copy of tcp_tx which packets are passed to.
let (tcp_tx, mut tcp_rx) = channel(4);
let mut tick = Wrapping(0);
let tick_zero = SystemTime::now();
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE];
// TODO(jack) Redesign this select! call to execute as little code linearly as possible.
tokio::select! {
_ = ticker.tick() => {
// Update game state.
let dt = 1.0 / tick_rate as f64; // TODO(jack) Measure actual elapsed time.
step(&mut players, &mut bullets, dt);
tick = tick + Wrapping(1);
},
_ = snapshot_ticker.tick() => {
// Broadcast.
let update = game::WorldUpdate {
tick: tick.0,
players: players.iter().map(|(_, p)| p.player).collect(),
bullets: bullets.iter().map(|(_, b)| b.bullet).collect(),
};
let bytes = bincode::serialize(&game::UdpClientMessage::WorldUpdate(update)).unwrap();
for (_, player) in players.iter().filter(|(_, p)| p.udp_addr.is_some()) {
udp_socket.send_to(&bytes, player.udp_addr.unwrap()).await?;
}
},
accept_result = tcp_listener.accept() => match accept_result {
Ok((stream, _)) => accept(&mut players, &bullets, stream, tcp_tx.clone(), tick_rate, tick_zero, tick.0),
Err(err) => {
eprintln!("{}", err);
break
},
},
// TODO(jack) TCP messages from the client should end up in a channel.
result = tcp_rx.recv() => match result {
Some((idx, None)) => {
println!("disconnection!");
let id = players[idx].player.id;
// Broadcast that a player left.
let mut tcp_txs: Vec<_> = players
.iter()
.filter(|(other_idx, _)| *other_idx!= idx)
.map(|(_, p)| p.tcp_tx.clone())
.collect();
tokio::spawn(async move {
let msg = game::TcpClientMessage::PlayerLeft(id);
let join_handles = tcp_txs
.iter_mut()
.map(|tcp_tx| tcp_tx.send(msg.clone())); // TODO(jack) Can we do this without allocations?
join_all(join_handles).await;
});
players.remove(idx);
},
Some((idx, Some(msg))) => println!("{:?}: {:?}", idx, msg),
None => break,
},
result = udp_socket.recv_from(&mut buf) => match result {
Ok((0, _)) => break,
Ok((MAX_PACKET_SIZE_PLUS_ONE, _)) => break,
Ok((num_bytes, socket_addr)) => {
let bytes = &buf[..num_bytes];
let msg: game::UdpServerMessage = match bincode::deserialize(&bytes) {
Ok(msg) => msg,
Err(err) => {
eprintln!("{}", err);
continue
},
};
match msg {
game::UdpServerMessage::Init(game::ServerInit { random_bytes }) => {
println!("received init message: {:?}", random_bytes);
if let Some((_, player)) = players.iter_mut().find(|(_, player)| player.random_bytes == random_bytes) {
player.udp_addr = Some(socket_addr);
println!("{:?}", player.udp_addr);
}
},
game::UdpServerMessage::PlayerInput(inputs) => {
let (_, player) = match players.iter_mut().find(|(_, player)| player.udp_addr.is_some() && player.udp_addr.unwrap() == socket_addr) {
Some((idx, player)) => (idx, player),
None => continue,
};
// TODO(jack) Apply the inputs according to their tick.
// Right now, we're just taking the most recent one.
if inputs.len() == 0 {
continue
}
let input = inputs.iter().last().unwrap();
player.input = Vector2::new(
(input.right as i32 - input.left as i32) as f64,
(input.down as i32 - input.up as i32) as f64,
).try_normalize(0.0).unwrap_or(Vector2::new(0.0, 0.0));
player.angle = input.angle;
// TODO(jack) We probably just want to compose input in the player struct.
if input.mouse_left {
player.firing = true;
} else {
player.firing = false;
player.fire_counter = 0.0;
}
},
};
},
Err(err) => {
eprintln!("{}", err);
break
},
}
}
}
Ok(())
} | random_line_split |
|
main.rs | use futures::future::join_all;
use generational_arena::{Arena, Index};
use nalgebra::Vector2;
use std::env::args;
use std::error::Error;
use std::net::SocketAddr;
use std::num::Wrapping;
use std::time::Duration;
use std::time::SystemTime;
use tokio::net::{TcpListener, TcpStream, UdpSocket};
use tokio::prelude::*;
use tokio::sync::mpsc::{channel, Sender};
use tokio::time::interval;
use game;
#[derive(Debug)]
struct Player {
player: game::Player,
tcp_tx: Sender<game::TcpClientMessage>,
random_bytes: [u8; game::NUM_RANDOM_BYTES],
udp_addr: Option<SocketAddr>,
input: Vector2<f64>,
angle: f64,
firing: bool,
fire_counter: f64,
}
#[derive(Debug)]
struct Bullet {
bullet: game::Bullet,
velocity: Vector2<f64>,
lifetime: f64,
}
fn accept(
players: &mut Arena<Player>,
bullets: &Arena<Bullet>,
stream: TcpStream,
mut internal_tcp_tx: Sender<(Index, Option<game::TcpServerMessage>)>,
tick_rate: u32,
tick_zero: SystemTime,
tick: game::Tick,
) {
println!("connection!");
let (tx, mut rx) = channel(4);
let idx = match players.try_insert(Player {
player: game::Player {
id: 0, // Set below.
radius: 1.0,
position: Vector2::new(0.0, 0.0),
velocity: Vector2::new(0.0, 0.0),
},
tcp_tx: tx,
udp_addr: None,
random_bytes: rand::random(),
input: Vector2::new(0.0, 0.0),
angle: 0.0,
firing: false,
fire_counter: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
println!("rejecting connection; too many players");
return;
}
};
// Set the user ID to some combinaiton of the arena index and generation.
let id = idx.into_raw_parts().0 as u8;
players[idx].player.id = id;
// TODO(jack) Broadcast PlayerLeft messages.
// Broadcast PlayerJoined messages.
let mut tcp_txs: Vec<_> = players
.iter()
.filter(|(other_idx, _)| *other_idx!= idx)
.map(|(_, p)| p.tcp_tx.clone())
.collect();
let msg = game::TcpClientMessage::PlayerJoined(id);
tokio::spawn(async move {
let join_handles = tcp_txs.iter_mut().map(|tcp_tx| tcp_tx.send(msg.clone()));
join_all(join_handles).await;
});
// Start tasks to read-from / write-to the TCP socket.
let (mut reader, mut writer) = stream.into_split();
tokio::spawn(async move {
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE];
let num_bytes = match reader.read(&mut buf).await {
Ok(0) => break,
Ok(MAX_PACKET_SIZE_PLUS_ONE) => break,
Err(err) => {
eprintln!("{}", err);
break;
}
Ok(num_bytes) => num_bytes,
};
match bincode::deserialize(&buf[..num_bytes]) {
Ok(msg) => match internal_tcp_tx.send((idx, Some(msg))).await {
Ok(_) => (),
Err(_) => break,
},
Err(err) => {
eprintln!("{}", err);
break;
}
};
}
// One consequence of every client publishing TCP packets to the same channel
// is that we don't know when any one disconnects.
// We signal it here with a `None`.
internal_tcp_tx.send((idx, None)).await.ok();
});
let random_bytes = players[idx].random_bytes.clone();
let update = game::WorldUpdate {
tick,
players: players.iter().map(|(_, p)| p.player).collect(),
bullets: bullets.iter().map(|(_, b)| b.bullet).collect(),
};
tokio::spawn(async move {
// Send the init packet.
// For now, this will just include a random sequence of bytes.
// We'll then wait for the random sequence of bytes via UDP to identify the client's external port number.
let bytes = bincode::serialize(&game::TcpClientMessage::Init(game::ClientInit {
id,
random_bytes,
update,
tick_rate: tick_rate as u8,
tick_zero,
}))
.unwrap();
if let Err(err) = writer.write_all(&bytes[..]).await {
eprintln!("{}", err);
return;
}
println!("wrote init message");
loop {
match rx.recv().await {
Some(msg) => {
if let Err(_) = writer
.write_all(bincode::serialize(&msg).unwrap().as_slice())
.await
{
break;
}
}
None => break,
};
}
});
}
fn step(players: &mut Arena<Player>, bullets: &mut Arena<Bullet>, dt: f64) | 0.0
} else {
dampened_velocity_unclamped
};
let velocity_unit = player
.player
.velocity
.try_normalize(0.0)
.unwrap_or(Vector2::new(0.0, 0.0));
player.player.velocity = dampened_velocity * velocity_unit;
player.player.position += dt * player.player.velocity;
}
// Remove expired bullets.
let bullets_to_remove: Vec<_> = bullets
.iter()
.filter(|(_, b)| b.lifetime > 1.0)
.map(|(idx, _)| idx)
.collect();
for idx in bullets_to_remove.iter() {
bullets.remove(*idx);
}
// Fire bullets.
for (_, player) in players.iter_mut().filter(|(_, p)| p.firing) {
let rof = 30.0;
player.fire_counter += rof * dt;
if player.fire_counter >= 1.0 {
player.fire_counter %= 1.0;
let idx = match bullets.try_insert(Bullet {
bullet: game::Bullet {
id: 0,
player_id: player.player.id,
position: player.player.position,
angle: player.angle,
radius: 0.5,
},
velocity: 32.0 * Vector2::new(player.angle.cos(), player.angle.sin()),
lifetime: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
eprintln!("too many bullets!");
break;
}
};
// Set the user ID to the arena index.
let raw_parts = idx.into_raw_parts();
bullets[idx].bullet.id = ((raw_parts.0 & 10) | ((raw_parts.1 as usize) << 10)) as u16;
}
}
// Update bullets.
for (_, bullet) in bullets.iter_mut() {
bullet.bullet.position += dt * bullet.velocity;
bullet.lifetime += dt;
}
// Manage collisions.
// We have to collect the idxs to avoid borrowing `players`.
let idxs: Vec<_> = players.iter().map(|(idx, _)| idx).collect();
let idx_pairs = idxs
.iter()
.map(|a| idxs.iter().map(move |b| (a, b)))
.flatten()
.filter(|(a, b)| a.into_raw_parts().0 < b.into_raw_parts().0);
for (a, b) in idx_pairs {
let (a, b) = players.get2_mut(*a, *b);
let a = a.unwrap();
let b = b.unwrap();
let distance = match a.player.position - b.player.position {
v if v.x == 0.0 && v.y == 0.0 => Vector2::new(0.001, 0.001),
v => v,
};
let max_distance = a.player.radius + b.player.radius;
if distance.magnitude_squared() >= max_distance.powi(2) {
continue; // No collision.
}
let displacement_unit = distance.try_normalize(0.0).unwrap();
let displacement = displacement_unit * (max_distance - distance.magnitude());
a.player.position += 0.5 * displacement;
b.player.position += -0.5 * displacement;
let momentum = a.player.velocity.magnitude() + b.player.velocity.magnitude();
let elasticity = 2.0;
a.player.velocity = 0.5 * elasticity * momentum * displacement_unit;
b.player.velocity = -0.5 * elasticity * momentum * displacement_unit;
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let port: u32 = match args().nth(1).and_then(|s| s.parse().ok()) {
Some(port) => port,
None => {
eprintln!("Usage: {} PORT", args().nth(0).unwrap());
return Ok(());
}
};
let mut players = Arena::with_capacity(16);
let mut bullets = Arena::with_capacity(1024);
let tick_rate = 60;
let mut ticker = interval(Duration::from_secs(1) / tick_rate);
let snapshot_rate = 10;
let mut snapshot_ticker = interval(Duration::from_secs(1) / snapshot_rate);
let mut tcp_listener = TcpListener::bind(format!("0.0.0.0:{}", port)).await?;
let mut udp_socket = UdpSocket::bind(format!("0.0.0.0:{}", port)).await?;
// tcp_rx is our global receiver for TCP events.
// This means that each player holds a copy of tcp_tx which packets are passed to.
let (tcp_tx, mut tcp_rx) = channel(4);
let mut tick = Wrapping(0);
let tick_zero = SystemTime::now();
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE];
// TODO(jack) Redesign this select! call to execute as little code linearly as possible.
tokio::select! {
_ = ticker.tick() => {
// Update game state.
let dt = 1.0 / tick_rate as f64; // TODO(jack) Measure actual elapsed time.
step(&mut players, &mut bullets, dt);
tick = tick + Wrapping(1);
},
_ = snapshot_ticker.tick() => {
// Broadcast.
let update = game::WorldUpdate {
tick: tick.0,
players: players.iter().map(|(_, p)| p.player).collect(),
bullets: bullets.iter().map(|(_, b)| b.bullet).collect(),
};
let bytes = bincode::serialize(&game::UdpClientMessage::WorldUpdate(update)).unwrap();
for (_, player) in players.iter().filter(|(_, p)| p.udp_addr.is_some()) {
udp_socket.send_to(&bytes, player.udp_addr.unwrap()).await?;
}
},
accept_result = tcp_listener.accept() => match accept_result {
Ok((stream, _)) => accept(&mut players, &bullets, stream, tcp_tx.clone(), tick_rate, tick_zero, tick.0),
Err(err) => {
eprintln!("{}", err);
break
},
},
// TODO(jack) TCP messages from the client should end up in a channel.
result = tcp_rx.recv() => match result {
Some((idx, None)) => {
println!("disconnection!");
let id = players[idx].player.id;
// Broadcast that a player left.
let mut tcp_txs: Vec<_> = players
.iter()
.filter(|(other_idx, _)| *other_idx!= idx)
.map(|(_, p)| p.tcp_tx.clone())
.collect();
tokio::spawn(async move {
let msg = game::TcpClientMessage::PlayerLeft(id);
let join_handles = tcp_txs
.iter_mut()
.map(|tcp_tx| tcp_tx.send(msg.clone())); // TODO(jack) Can we do this without allocations?
join_all(join_handles).await;
});
players.remove(idx);
},
Some((idx, Some(msg))) => println!("{:?}: {:?}", idx, msg),
None => break,
},
result = udp_socket.recv_from(&mut buf) => match result {
Ok((0, _)) => break,
Ok((MAX_PACKET_SIZE_PLUS_ONE, _)) => break,
Ok((num_bytes, socket_addr)) => {
let bytes = &buf[..num_bytes];
let msg: game::UdpServerMessage = match bincode::deserialize(&bytes) {
Ok(msg) => msg,
Err(err) => {
eprintln!("{}", err);
continue
},
};
match msg {
game::UdpServerMessage::Init(game::ServerInit { random_bytes }) => {
println!("received init message: {:?}", random_bytes);
if let Some((_, player)) = players.iter_mut().find(|(_, player)| player.random_bytes == random_bytes) {
player.udp_addr = Some(socket_addr);
println!("{:?}", player.udp_addr);
}
},
game::UdpServerMessage::PlayerInput(inputs) => {
let (_, player) = match players.iter_mut().find(|(_, player)| player.udp_addr.is_some() && player.udp_addr.unwrap() == socket_addr) {
Some((idx, player)) => (idx, player),
None => continue,
};
// TODO(jack) Apply the inputs according to their tick.
// Right now, we're just taking the most recent one.
if inputs.len() == 0 {
continue
}
let input = inputs.iter().last().unwrap();
player.input = Vector2::new(
(input.right as i32 - input.left as i32) as f64,
(input.down as i32 - input.up as i32) as f64,
).try_normalize(0.0).unwrap_or(Vector2::new(0.0, 0.0));
player.angle = input.angle;
// TODO(jack) We probably just want to compose input in the player struct.
if input.mouse_left {
player.firing = true;
} else {
player.firing = false;
player.fire_counter = 0.0;
}
},
};
},
Err(err) => {
eprintln!("{}", err);
break
},
}
}
}
Ok(())
}
| {
// Apply player impulse.
for (_, player) in players.iter_mut() {
let acceleration = 64.0;
let max_velocity = 16.0;
let friction = 16.0;
// Acceleration ranges from `friction` to `friction + acceleration`,
// and is inversely proportional to the projection of the current velocity onto the input vector.
let acceleration_index = player.player.velocity.dot(&player.input) / max_velocity;
let acceleration_index = if acceleration_index < 0.0 {
0.0
} else {
acceleration_index.sqrt()
};
let adjusted_acceleration = friction + acceleration * (1.0 - acceleration_index);
player.player.velocity += adjusted_acceleration * dt * player.input;
let dampened_velocity_unclamped = player.player.velocity.magnitude() - dt * friction;
let dampened_velocity = if dampened_velocity_unclamped < 0.0 { | identifier_body |
main.rs | use futures::future::join_all;
use generational_arena::{Arena, Index};
use nalgebra::Vector2;
use std::env::args;
use std::error::Error;
use std::net::SocketAddr;
use std::num::Wrapping;
use std::time::Duration;
use std::time::SystemTime;
use tokio::net::{TcpListener, TcpStream, UdpSocket};
use tokio::prelude::*;
use tokio::sync::mpsc::{channel, Sender};
use tokio::time::interval;
use game;
#[derive(Debug)]
struct Player {
player: game::Player,
tcp_tx: Sender<game::TcpClientMessage>,
random_bytes: [u8; game::NUM_RANDOM_BYTES],
udp_addr: Option<SocketAddr>,
input: Vector2<f64>,
angle: f64,
firing: bool,
fire_counter: f64,
}
#[derive(Debug)]
struct Bullet {
bullet: game::Bullet,
velocity: Vector2<f64>,
lifetime: f64,
}
fn | (
players: &mut Arena<Player>,
bullets: &Arena<Bullet>,
stream: TcpStream,
mut internal_tcp_tx: Sender<(Index, Option<game::TcpServerMessage>)>,
tick_rate: u32,
tick_zero: SystemTime,
tick: game::Tick,
) {
println!("connection!");
let (tx, mut rx) = channel(4);
let idx = match players.try_insert(Player {
player: game::Player {
id: 0, // Set below.
radius: 1.0,
position: Vector2::new(0.0, 0.0),
velocity: Vector2::new(0.0, 0.0),
},
tcp_tx: tx,
udp_addr: None,
random_bytes: rand::random(),
input: Vector2::new(0.0, 0.0),
angle: 0.0,
firing: false,
fire_counter: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
println!("rejecting connection; too many players");
return;
}
};
// Set the user ID to some combinaiton of the arena index and generation.
let id = idx.into_raw_parts().0 as u8;
players[idx].player.id = id;
// TODO(jack) Broadcast PlayerLeft messages.
// Broadcast PlayerJoined messages.
let mut tcp_txs: Vec<_> = players
.iter()
.filter(|(other_idx, _)| *other_idx!= idx)
.map(|(_, p)| p.tcp_tx.clone())
.collect();
let msg = game::TcpClientMessage::PlayerJoined(id);
tokio::spawn(async move {
let join_handles = tcp_txs.iter_mut().map(|tcp_tx| tcp_tx.send(msg.clone()));
join_all(join_handles).await;
});
// Start tasks to read-from / write-to the TCP socket.
let (mut reader, mut writer) = stream.into_split();
tokio::spawn(async move {
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE];
let num_bytes = match reader.read(&mut buf).await {
Ok(0) => break,
Ok(MAX_PACKET_SIZE_PLUS_ONE) => break,
Err(err) => {
eprintln!("{}", err);
break;
}
Ok(num_bytes) => num_bytes,
};
match bincode::deserialize(&buf[..num_bytes]) {
Ok(msg) => match internal_tcp_tx.send((idx, Some(msg))).await {
Ok(_) => (),
Err(_) => break,
},
Err(err) => {
eprintln!("{}", err);
break;
}
};
}
// One consequence of every client publishing TCP packets to the same channel
// is that we don't know when any one disconnects.
// We signal it here with a `None`.
internal_tcp_tx.send((idx, None)).await.ok();
});
let random_bytes = players[idx].random_bytes.clone();
let update = game::WorldUpdate {
tick,
players: players.iter().map(|(_, p)| p.player).collect(),
bullets: bullets.iter().map(|(_, b)| b.bullet).collect(),
};
tokio::spawn(async move {
// Send the init packet.
// For now, this will just include a random sequence of bytes.
// We'll then wait for the random sequence of bytes via UDP to identify the client's external port number.
let bytes = bincode::serialize(&game::TcpClientMessage::Init(game::ClientInit {
id,
random_bytes,
update,
tick_rate: tick_rate as u8,
tick_zero,
}))
.unwrap();
if let Err(err) = writer.write_all(&bytes[..]).await {
eprintln!("{}", err);
return;
}
println!("wrote init message");
loop {
match rx.recv().await {
Some(msg) => {
if let Err(_) = writer
.write_all(bincode::serialize(&msg).unwrap().as_slice())
.await
{
break;
}
}
None => break,
};
}
});
}
fn step(players: &mut Arena<Player>, bullets: &mut Arena<Bullet>, dt: f64) {
// Apply player impulse.
for (_, player) in players.iter_mut() {
let acceleration = 64.0;
let max_velocity = 16.0;
let friction = 16.0;
// Acceleration ranges from `friction` to `friction + acceleration`,
// and is inversely proportional to the projection of the current velocity onto the input vector.
let acceleration_index = player.player.velocity.dot(&player.input) / max_velocity;
let acceleration_index = if acceleration_index < 0.0 {
0.0
} else {
acceleration_index.sqrt()
};
let adjusted_acceleration = friction + acceleration * (1.0 - acceleration_index);
player.player.velocity += adjusted_acceleration * dt * player.input;
let dampened_velocity_unclamped = player.player.velocity.magnitude() - dt * friction;
let dampened_velocity = if dampened_velocity_unclamped < 0.0 {
0.0
} else {
dampened_velocity_unclamped
};
let velocity_unit = player
.player
.velocity
.try_normalize(0.0)
.unwrap_or(Vector2::new(0.0, 0.0));
player.player.velocity = dampened_velocity * velocity_unit;
player.player.position += dt * player.player.velocity;
}
// Remove expired bullets.
let bullets_to_remove: Vec<_> = bullets
.iter()
.filter(|(_, b)| b.lifetime > 1.0)
.map(|(idx, _)| idx)
.collect();
for idx in bullets_to_remove.iter() {
bullets.remove(*idx);
}
// Fire bullets.
for (_, player) in players.iter_mut().filter(|(_, p)| p.firing) {
let rof = 30.0;
player.fire_counter += rof * dt;
if player.fire_counter >= 1.0 {
player.fire_counter %= 1.0;
let idx = match bullets.try_insert(Bullet {
bullet: game::Bullet {
id: 0,
player_id: player.player.id,
position: player.player.position,
angle: player.angle,
radius: 0.5,
},
velocity: 32.0 * Vector2::new(player.angle.cos(), player.angle.sin()),
lifetime: 0.0,
}) {
Ok(idx) => idx,
Err(_) => {
eprintln!("too many bullets!");
break;
}
};
// Set the user ID to the arena index.
let raw_parts = idx.into_raw_parts();
bullets[idx].bullet.id = ((raw_parts.0 & 10) | ((raw_parts.1 as usize) << 10)) as u16;
}
}
// Update bullets.
for (_, bullet) in bullets.iter_mut() {
bullet.bullet.position += dt * bullet.velocity;
bullet.lifetime += dt;
}
// Manage collisions.
// We have to collect the idxs to avoid borrowing `players`.
let idxs: Vec<_> = players.iter().map(|(idx, _)| idx).collect();
let idx_pairs = idxs
.iter()
.map(|a| idxs.iter().map(move |b| (a, b)))
.flatten()
.filter(|(a, b)| a.into_raw_parts().0 < b.into_raw_parts().0);
for (a, b) in idx_pairs {
let (a, b) = players.get2_mut(*a, *b);
let a = a.unwrap();
let b = b.unwrap();
let distance = match a.player.position - b.player.position {
v if v.x == 0.0 && v.y == 0.0 => Vector2::new(0.001, 0.001),
v => v,
};
let max_distance = a.player.radius + b.player.radius;
if distance.magnitude_squared() >= max_distance.powi(2) {
continue; // No collision.
}
let displacement_unit = distance.try_normalize(0.0).unwrap();
let displacement = displacement_unit * (max_distance - distance.magnitude());
a.player.position += 0.5 * displacement;
b.player.position += -0.5 * displacement;
let momentum = a.player.velocity.magnitude() + b.player.velocity.magnitude();
let elasticity = 2.0;
a.player.velocity = 0.5 * elasticity * momentum * displacement_unit;
b.player.velocity = -0.5 * elasticity * momentum * displacement_unit;
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let port: u32 = match args().nth(1).and_then(|s| s.parse().ok()) {
Some(port) => port,
None => {
eprintln!("Usage: {} PORT", args().nth(0).unwrap());
return Ok(());
}
};
let mut players = Arena::with_capacity(16);
let mut bullets = Arena::with_capacity(1024);
let tick_rate = 60;
let mut ticker = interval(Duration::from_secs(1) / tick_rate);
let snapshot_rate = 10;
let mut snapshot_ticker = interval(Duration::from_secs(1) / snapshot_rate);
let mut tcp_listener = TcpListener::bind(format!("0.0.0.0:{}", port)).await?;
let mut udp_socket = UdpSocket::bind(format!("0.0.0.0:{}", port)).await?;
// tcp_rx is our global receiver for TCP events.
// This means that each player holds a copy of tcp_tx which packets are passed to.
let (tcp_tx, mut tcp_rx) = channel(4);
let mut tick = Wrapping(0);
let tick_zero = SystemTime::now();
loop {
const MAX_PACKET_SIZE_PLUS_ONE: usize = 64;
let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE];
// TODO(jack) Redesign this select! call to execute as little code linearly as possible.
tokio::select! {
_ = ticker.tick() => {
// Update game state.
let dt = 1.0 / tick_rate as f64; // TODO(jack) Measure actual elapsed time.
step(&mut players, &mut bullets, dt);
tick = tick + Wrapping(1);
},
_ = snapshot_ticker.tick() => {
// Broadcast.
let update = game::WorldUpdate {
tick: tick.0,
players: players.iter().map(|(_, p)| p.player).collect(),
bullets: bullets.iter().map(|(_, b)| b.bullet).collect(),
};
let bytes = bincode::serialize(&game::UdpClientMessage::WorldUpdate(update)).unwrap();
for (_, player) in players.iter().filter(|(_, p)| p.udp_addr.is_some()) {
udp_socket.send_to(&bytes, player.udp_addr.unwrap()).await?;
}
},
accept_result = tcp_listener.accept() => match accept_result {
Ok((stream, _)) => accept(&mut players, &bullets, stream, tcp_tx.clone(), tick_rate, tick_zero, tick.0),
Err(err) => {
eprintln!("{}", err);
break
},
},
// TODO(jack) TCP messages from the client should end up in a channel.
result = tcp_rx.recv() => match result {
Some((idx, None)) => {
println!("disconnection!");
let id = players[idx].player.id;
// Broadcast that a player left.
let mut tcp_txs: Vec<_> = players
.iter()
.filter(|(other_idx, _)| *other_idx!= idx)
.map(|(_, p)| p.tcp_tx.clone())
.collect();
tokio::spawn(async move {
let msg = game::TcpClientMessage::PlayerLeft(id);
let join_handles = tcp_txs
.iter_mut()
.map(|tcp_tx| tcp_tx.send(msg.clone())); // TODO(jack) Can we do this without allocations?
join_all(join_handles).await;
});
players.remove(idx);
},
Some((idx, Some(msg))) => println!("{:?}: {:?}", idx, msg),
None => break,
},
result = udp_socket.recv_from(&mut buf) => match result {
Ok((0, _)) => break,
Ok((MAX_PACKET_SIZE_PLUS_ONE, _)) => break,
Ok((num_bytes, socket_addr)) => {
let bytes = &buf[..num_bytes];
let msg: game::UdpServerMessage = match bincode::deserialize(&bytes) {
Ok(msg) => msg,
Err(err) => {
eprintln!("{}", err);
continue
},
};
match msg {
game::UdpServerMessage::Init(game::ServerInit { random_bytes }) => {
println!("received init message: {:?}", random_bytes);
if let Some((_, player)) = players.iter_mut().find(|(_, player)| player.random_bytes == random_bytes) {
player.udp_addr = Some(socket_addr);
println!("{:?}", player.udp_addr);
}
},
game::UdpServerMessage::PlayerInput(inputs) => {
let (_, player) = match players.iter_mut().find(|(_, player)| player.udp_addr.is_some() && player.udp_addr.unwrap() == socket_addr) {
Some((idx, player)) => (idx, player),
None => continue,
};
// TODO(jack) Apply the inputs according to their tick.
// Right now, we're just taking the most recent one.
if inputs.len() == 0 {
continue
}
let input = inputs.iter().last().unwrap();
player.input = Vector2::new(
(input.right as i32 - input.left as i32) as f64,
(input.down as i32 - input.up as i32) as f64,
).try_normalize(0.0).unwrap_or(Vector2::new(0.0, 0.0));
player.angle = input.angle;
// TODO(jack) We probably just want to compose input in the player struct.
if input.mouse_left {
player.firing = true;
} else {
player.firing = false;
player.fire_counter = 0.0;
}
},
};
},
Err(err) => {
eprintln!("{}", err);
break
},
}
}
}
Ok(())
}
| accept | identifier_name |
main.rs | .help("include path")
.takes_value(true),
)
.arg(
Arg::new("define")
.short('D')
.long("define")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("DEFINE")
.help("macro definition")
.takes_value(true),
)
.arg(
Arg::new("header")
.long("header")
.multiple_occurrences(true)
.number_of_values(1)
.required(true)
.value_name("HEADER")
.help("header file name")
.takes_value(true),
)
.arg(
Arg::new("directive")
.short('d')
.long("directive")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("DIRECTIVE")
.help("directives to put within include_cpp!")
.takes_value(true),
)
)
.subcommand(Command::new("repro")
.about("reduce a repro case JSON file")
.arg(
Arg::new("repro")
.short('r')
.long("repro")
.required(true)
.value_name("REPRODUCTION CASE JSON")
.help("reproduction case JSON file name")
.takes_value(true),
)
.arg(
Arg::new("header")
.long("header")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("HEADER")
.help("header file name; specify to resume a part-completed run")
.takes_value(true),
)
)
.arg(
Arg::new("problem")
.short('p')
.long("problem")
.required(true)
.value_name("PROBLEM")
.help("problem string we're looking for... may be in logs, or in generated C++, or generated.rs")
.takes_value(true),
)
.arg(
Arg::new("creduce")
.long("creduce")
.value_name("PATH")
.help("creduce binary location")
.default_value("creduce")
.takes_value(true),
)
.arg(
Arg::new("output")
.short('o')
.long("output")
.value_name("OUTPUT")
.help("where to write minimized output")
.takes_value(true),
)
.arg(
Arg::new("gen-cmd")
.short('g')
.long("gen-cmd")
.value_name("GEN-CMD")
.help("where to find autocxx-gen")
.default_value(&default_gen_cmd)
.takes_value(true),
)
.arg(
Arg::new("rustc")
.long("rustc")
.value_name("RUSTC")
.help("where to find rustc")
.default_value("rustc")
.takes_value(true),
)
.arg(
Arg::new("rlibs")
.long("rlibs")
.value_name("LIBDIR")
.help("where to find rlibs/rmetas for cxx and autocxx")
.default_values(default_rlibs)
.multiple_values(true)
.takes_value(true),
)
.arg(
Arg::new("keep")
.short('k')
.long("keep-dir")
.help("keep the temporary directory for debugging purposes"),
)
.arg(
Arg::new("clang-args")
.short('c')
.long("clang-arg")
.multiple_occurrences(true)
.value_name("CLANG_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("creduce-args")
.long("creduce-arg")
.multiple_occurrences(true)
.value_name("CREDUCE_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("no-precompile")
.long("no-precompile")
.help("Do not precompile the C++ header before passing to autocxxgen"),
)
.arg(
Arg::new("no-postcompile")
.long("no-postcompile")
.help("Do not post-compile the C++ generated by autocxxgen"),
)
.arg(
Arg::new("no-rustc")
.long("no-rustc")
.help("Do not compile the rust generated by autocxxgen"),
)
.arg(
Arg::new("suppress-cxx-inclusions")
.long("suppress-cxx-inclusions")
.takes_value(true)
.possible_value("yes")
.possible_value("no")
.possible_value("auto")
.default_value("auto")
.help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.")
)
.arg_required_else_help(true)
.get_matches();
run(matches).unwrap();
}
fn run(matches: ArgMatches) -> Result<(), std::io::Error> {
let keep_tmp = matches.is_present("keep");
let tmp_dir = TempDir::new()?;
let r = do_run(matches, &tmp_dir);
if keep_tmp {
println!(
"Keeping temp dir created at: {}",
tmp_dir.into_path().to_str().unwrap()
);
}
r
}
#[derive(serde_derive::Deserialize)]
struct | {
config: String,
header: String,
}
fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> {
let rs_path = tmp_dir.path().join("input.rs");
let concat_path = tmp_dir.path().join("concat.h");
match matches.subcommand_matches("repro") {
None => {
let submatches = matches.subcommand_matches("file").unwrap();
let incs: Vec<_> = submatches
.values_of("inc")
.unwrap_or_default()
.map(PathBuf::from)
.collect();
let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect();
let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect();
assert!(!headers.is_empty());
let listing_path = tmp_dir.path().join("listing.h");
create_concatenated_header(&headers, &listing_path)?;
announce_progress(&format!(
"Preprocessing {listing_path:?} to {concat_path:?}"
));
preprocess(&listing_path, &concat_path, &incs, &defs)?;
let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string())
.chain(
submatches
.values_of("directive")
.unwrap_or_default()
.map(|s| format!("{s}\n")),
)
.collect();
create_rs_file(&rs_path, &directives)?;
}
Some(submatches) => {
let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from(
submatches.value_of("repro").unwrap(),
))?)
.unwrap();
// Replace the headers in the config
let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap();
config.replace_included_headers("concat.h");
create_file(
&rs_path,
&format!("autocxx::include_cpp!({});", config.to_token_stream()),
)?;
if let Some(header) = submatches.value_of("header") {
std::fs::copy(PathBuf::from(header), &concat_path)?;
} else {
create_file(&concat_path, &case.header)?
}
}
}
let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() {
"yes" => true,
"no" => false,
"auto" => detect_cxx_h(&concat_path)?,
_ => panic!("unexpected value"),
};
let cxx_suppressions = if suppress_cxx_classes {
get_cxx_suppressions()
} else {
Vec::new()
};
let extra_clang_args: Vec<_> = matches
.values_of("clang-args")
.unwrap_or_default()
.map(Cow::Borrowed)
.chain(cxx_suppressions.into_iter().map(Cow::Owned))
.collect();
let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec();
let gen_cmd = matches.value_of("gen-cmd").unwrap();
if!Path::new(gen_cmd).exists() {
panic!(
"autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen"
);
}
run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?;
// Create and run an interestingness test which does not filter its output through grep.
let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test");
std::fs::create_dir(&demo_interestingness_test_dir).unwrap();
let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
None,
&rs_path,
&extra_clang_args,
)?;
let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h");
std::fs::copy(&concat_path, demo_dir_concat_path).unwrap();
run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap();
// Now the main interestingness test
let interestingness_test = tmp_dir.path().join("test.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
Some(matches.value_of("problem").unwrap()),
&rs_path,
&extra_clang_args,
)?;
run_creduce(
matches.value_of("creduce").unwrap(),
&interestingness_test,
&concat_path,
matches.values_of("creduce-args").unwrap_or_default(),
);
announce_progress("creduce completed");
let output_path = matches.value_of("output");
match output_path {
None => print_minimized_case(&concat_path)?,
Some(output_path) => {
std::fs::copy(&concat_path, PathBuf::from(output_path))?;
}
};
Ok(())
}
/// Try to detect whether the preprocessed source code already contains
/// a preprocessed version of cxx.h. This is hard because all the comments
/// and preprocessor symbols may have been removed, and in fact if we're
/// part way through reduction, parts of the code may have been removed too.
fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> {
let haystack = std::fs::read_to_string(concat_path)?;
Ok(["class Box", "class Vec", "class Slice"]
.iter()
.all(|needle| haystack.contains(needle)))
}
fn announce_progress(msg: &str) {
println!("=== {msg} ===");
}
fn print_minimized_case(concat_path: &Path) -> Result<(), std::io::Error> {
announce_progress("Completed. Minimized test case:");
let contents = std::fs::read_to_string(concat_path)?;
println!("{contents}");
Ok(())
}
/// Arguments we pass to creduce if supported. This pass always seems to cause a crash
/// as far as I can tell, so always exclude it. It may be environment-dependent,
/// of course, but as I'm the primary user of this tool I am ruthlessly removing it.
const REMOVE_PASS_LINE_MARKERS: &[&str] = &["--remove-pass", "pass_line_markers", "*"];
const SKIP_INITIAL_PASSES: &[&str] = &["--skip-initial-passes"];
fn creduce_supports_remove_pass(creduce_cmd: &str) -> bool {
let cmd = std::process::Command::new(creduce_cmd)
.arg("--help")
.output();
let msg = match cmd {
Err(error) => panic!("failed to run creduce. creduce_cmd = {creduce_cmd}. hint: autocxx-reduce --creduce /path/to/creduce. error = {error}"),
Ok(result) => result.stdout
};
let msg = std::str::from_utf8(&msg).unwrap();
msg.contains("--remove-pass")
}
fn run_creduce<'a>(
creduce_cmd: &str,
interestingness_test: &'a Path,
concat_path: &'a Path,
creduce_args: impl Iterator<Item = &'a str>,
) {
announce_progress("creduce");
let args = std::iter::once(interestingness_test.to_str().unwrap())
.chain(std::iter::once(concat_path.to_str().unwrap()))
.chain(creduce_args)
.chain(
if creduce_supports_remove_pass(creduce_cmd) {
REMOVE_PASS_LINE_MARKERS
} else {
SKIP_INITIAL_PASSES
}
.iter()
.copied(),
)
.collect::<Vec<_>>();
println!("Command: {} {}", creduce_cmd, args.join(" "));
std::process::Command::new(creduce_cmd)
.args(args)
.status()
.expect("failed to creduce");
}
fn run_sample_gen_cmd(
gen_cmd: &str,
rs_file: &Path,
tmp_dir: &Path,
extra_clang_args: &[&str],
) -> Result<(), std::io::Error> {
let args = format_gen_cmd(rs_file, tmp_dir.to_str().unwrap(), extra_clang_args);
let args = args.collect | ReproCase | identifier_name |
main.rs | .help("include path")
.takes_value(true),
)
.arg(
Arg::new("define")
.short('D')
.long("define")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("DEFINE")
.help("macro definition")
.takes_value(true),
)
.arg(
Arg::new("header")
.long("header")
.multiple_occurrences(true)
.number_of_values(1)
.required(true)
.value_name("HEADER")
.help("header file name")
.takes_value(true),
)
.arg(
Arg::new("directive")
.short('d')
.long("directive")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("DIRECTIVE")
.help("directives to put within include_cpp!")
.takes_value(true),
)
)
.subcommand(Command::new("repro")
.about("reduce a repro case JSON file")
.arg(
Arg::new("repro")
.short('r')
.long("repro")
.required(true)
.value_name("REPRODUCTION CASE JSON")
.help("reproduction case JSON file name")
.takes_value(true),
)
.arg(
Arg::new("header")
.long("header")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("HEADER")
.help("header file name; specify to resume a part-completed run")
.takes_value(true),
)
)
.arg(
Arg::new("problem")
.short('p')
.long("problem")
.required(true)
.value_name("PROBLEM")
.help("problem string we're looking for... may be in logs, or in generated C++, or generated.rs")
.takes_value(true),
)
.arg(
Arg::new("creduce")
.long("creduce")
.value_name("PATH")
.help("creduce binary location")
.default_value("creduce")
.takes_value(true),
)
.arg(
Arg::new("output")
.short('o')
.long("output")
.value_name("OUTPUT")
.help("where to write minimized output")
.takes_value(true),
)
.arg(
Arg::new("gen-cmd")
.short('g')
.long("gen-cmd")
.value_name("GEN-CMD")
.help("where to find autocxx-gen")
.default_value(&default_gen_cmd)
.takes_value(true),
)
.arg(
Arg::new("rustc")
.long("rustc")
.value_name("RUSTC")
.help("where to find rustc")
.default_value("rustc")
.takes_value(true),
)
.arg(
Arg::new("rlibs")
.long("rlibs")
.value_name("LIBDIR")
.help("where to find rlibs/rmetas for cxx and autocxx")
.default_values(default_rlibs)
.multiple_values(true)
.takes_value(true),
)
.arg(
Arg::new("keep")
.short('k')
.long("keep-dir")
.help("keep the temporary directory for debugging purposes"),
)
.arg(
Arg::new("clang-args")
.short('c')
.long("clang-arg")
.multiple_occurrences(true)
.value_name("CLANG_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("creduce-args")
.long("creduce-arg")
.multiple_occurrences(true)
.value_name("CREDUCE_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("no-precompile")
.long("no-precompile")
.help("Do not precompile the C++ header before passing to autocxxgen"),
)
.arg(
Arg::new("no-postcompile")
.long("no-postcompile")
.help("Do not post-compile the C++ generated by autocxxgen"),
)
.arg(
Arg::new("no-rustc")
.long("no-rustc")
.help("Do not compile the rust generated by autocxxgen"),
)
.arg(
Arg::new("suppress-cxx-inclusions")
.long("suppress-cxx-inclusions")
.takes_value(true)
.possible_value("yes")
.possible_value("no")
.possible_value("auto")
.default_value("auto")
.help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.")
)
.arg_required_else_help(true)
.get_matches();
run(matches).unwrap();
}
fn run(matches: ArgMatches) -> Result<(), std::io::Error> {
let keep_tmp = matches.is_present("keep");
let tmp_dir = TempDir::new()?;
let r = do_run(matches, &tmp_dir);
if keep_tmp {
println!(
"Keeping temp dir created at: {}",
tmp_dir.into_path().to_str().unwrap()
);
}
r
}
#[derive(serde_derive::Deserialize)]
struct ReproCase {
config: String,
header: String,
}
fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> {
let rs_path = tmp_dir.path().join("input.rs");
let concat_path = tmp_dir.path().join("concat.h");
match matches.subcommand_matches("repro") {
None => {
let submatches = matches.subcommand_matches("file").unwrap();
let incs: Vec<_> = submatches
.values_of("inc")
.unwrap_or_default()
.map(PathBuf::from)
.collect();
let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect();
let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect();
assert!(!headers.is_empty());
let listing_path = tmp_dir.path().join("listing.h");
create_concatenated_header(&headers, &listing_path)?;
announce_progress(&format!(
"Preprocessing {listing_path:?} to {concat_path:?}"
));
preprocess(&listing_path, &concat_path, &incs, &defs)?;
let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string())
.chain(
submatches
.values_of("directive")
.unwrap_or_default()
.map(|s| format!("{s}\n")),
)
.collect();
create_rs_file(&rs_path, &directives)?;
}
Some(submatches) => {
let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from(
submatches.value_of("repro").unwrap(),
))?)
.unwrap();
// Replace the headers in the config
let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap();
config.replace_included_headers("concat.h");
create_file(
&rs_path,
&format!("autocxx::include_cpp!({});", config.to_token_stream()),
)?;
if let Some(header) = submatches.value_of("header") {
std::fs::copy(PathBuf::from(header), &concat_path)?;
} else {
create_file(&concat_path, &case.header)?
}
}
}
let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() {
"yes" => true,
"no" => false,
"auto" => detect_cxx_h(&concat_path)?,
_ => panic!("unexpected value"),
};
let cxx_suppressions = if suppress_cxx_classes {
get_cxx_suppressions()
} else {
Vec::new()
};
let extra_clang_args: Vec<_> = matches
.values_of("clang-args")
.unwrap_or_default()
.map(Cow::Borrowed)
.chain(cxx_suppressions.into_iter().map(Cow::Owned))
.collect();
let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec();
let gen_cmd = matches.value_of("gen-cmd").unwrap();
if!Path::new(gen_cmd).exists() {
panic!(
"autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen"
);
}
run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?;
// Create and run an interestingness test which does not filter its output through grep.
let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test");
std::fs::create_dir(&demo_interestingness_test_dir).unwrap();
let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
None,
&rs_path,
&extra_clang_args,
)?;
let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h");
std::fs::copy(&concat_path, demo_dir_concat_path).unwrap();
run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap();
// Now the main interestingness test
let interestingness_test = tmp_dir.path().join("test.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
Some(matches.value_of("problem").unwrap()),
&rs_path,
&extra_clang_args,
)?;
run_creduce(
matches.value_of("creduce").unwrap(),
&interestingness_test,
&concat_path,
matches.values_of("creduce-args").unwrap_or_default(),
);
announce_progress("creduce completed");
let output_path = matches.value_of("output");
match output_path {
None => print_minimized_case(&concat_path)?,
Some(output_path) => {
std::fs::copy(&concat_path, PathBuf::from(output_path))?;
}
};
Ok(())
}
/// Try to detect whether the preprocessed source code already contains
/// a preprocessed version of cxx.h. This is hard because all the comments
/// and preprocessor symbols may have been removed, and in fact if we're
/// part way through reduction, parts of the code may have been removed too.
fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> |
fn announce_progress(msg: &str) {
println!("=== {msg} ===");
}
fn print_minimized_case(concat_path: &Path) -> Result<(), std::io::Error> {
announce_progress("Completed. Minimized test case:");
let contents = std::fs::read_to_string(concat_path)?;
println!("{contents}");
Ok(())
}
/// Arguments we pass to creduce if supported. This pass always seems to cause a crash
/// as far as I can tell, so always exclude it. It may be environment-dependent,
/// of course, but as I'm the primary user of this tool I am ruthlessly removing it.
const REMOVE_PASS_LINE_MARKERS: &[&str] = &["--remove-pass", "pass_line_markers", "*"];
const SKIP_INITIAL_PASSES: &[&str] = &["--skip-initial-passes"];
fn creduce_supports_remove_pass(creduce_cmd: &str) -> bool {
let cmd = std::process::Command::new(creduce_cmd)
.arg("--help")
.output();
let msg = match cmd {
Err(error) => panic!("failed to run creduce. creduce_cmd = {creduce_cmd}. hint: autocxx-reduce --creduce /path/to/creduce. error = {error}"),
Ok(result) => result.stdout
};
let msg = std::str::from_utf8(&msg).unwrap();
msg.contains("--remove-pass")
}
fn run_creduce<'a>(
creduce_cmd: &str,
interestingness_test: &'a Path,
concat_path: &'a Path,
creduce_args: impl Iterator<Item = &'a str>,
) {
announce_progress("creduce");
let args = std::iter::once(interestingness_test.to_str().unwrap())
.chain(std::iter::once(concat_path.to_str().unwrap()))
.chain(creduce_args)
.chain(
if creduce_supports_remove_pass(creduce_cmd) {
REMOVE_PASS_LINE_MARKERS
} else {
SKIP_INITIAL_PASSES
}
.iter()
.copied(),
)
.collect::<Vec<_>>();
println!("Command: {} {}", creduce_cmd, args.join(" "));
std::process::Command::new(creduce_cmd)
.args(args)
.status()
.expect("failed to creduce");
}
fn run_sample_gen_cmd(
gen_cmd: &str,
rs_file: &Path,
tmp_dir: &Path,
extra_clang_args: &[&str],
) -> Result<(), std::io::Error> {
let args = format_gen_cmd(rs_file, tmp_dir.to_str().unwrap(), extra_clang_args);
let args = args.collect | {
let haystack = std::fs::read_to_string(concat_path)?;
Ok(["class Box", "class Vec", "class Slice"]
.iter()
.all(|needle| haystack.contains(needle)))
} | identifier_body |
main.rs | .takes_value(true),
)
.arg(
Arg::new("header")
.long("header")
.multiple_occurrences(true)
.number_of_values(1)
.required(true)
.value_name("HEADER")
.help("header file name")
.takes_value(true),
)
.arg(
Arg::new("directive")
.short('d')
.long("directive")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("DIRECTIVE")
.help("directives to put within include_cpp!")
.takes_value(true),
)
)
.subcommand(Command::new("repro")
.about("reduce a repro case JSON file")
.arg(
Arg::new("repro")
.short('r')
.long("repro")
.required(true)
.value_name("REPRODUCTION CASE JSON")
.help("reproduction case JSON file name")
.takes_value(true),
)
.arg(
Arg::new("header")
.long("header")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("HEADER")
.help("header file name; specify to resume a part-completed run")
.takes_value(true),
)
)
.arg(
Arg::new("problem")
.short('p')
.long("problem")
.required(true)
.value_name("PROBLEM")
.help("problem string we're looking for... may be in logs, or in generated C++, or generated.rs")
.takes_value(true),
)
.arg(
Arg::new("creduce")
.long("creduce")
.value_name("PATH")
.help("creduce binary location")
.default_value("creduce")
.takes_value(true),
)
.arg(
Arg::new("output")
.short('o')
.long("output")
.value_name("OUTPUT")
.help("where to write minimized output")
.takes_value(true),
)
.arg(
Arg::new("gen-cmd")
.short('g')
.long("gen-cmd")
.value_name("GEN-CMD")
.help("where to find autocxx-gen")
.default_value(&default_gen_cmd)
.takes_value(true),
)
.arg(
Arg::new("rustc")
.long("rustc")
.value_name("RUSTC")
.help("where to find rustc")
.default_value("rustc")
.takes_value(true),
)
.arg(
Arg::new("rlibs")
.long("rlibs")
.value_name("LIBDIR")
.help("where to find rlibs/rmetas for cxx and autocxx")
.default_values(default_rlibs)
.multiple_values(true)
.takes_value(true),
)
.arg(
Arg::new("keep")
.short('k')
.long("keep-dir")
.help("keep the temporary directory for debugging purposes"),
)
.arg(
Arg::new("clang-args")
.short('c')
.long("clang-arg")
.multiple_occurrences(true)
.value_name("CLANG_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("creduce-args")
.long("creduce-arg")
.multiple_occurrences(true)
.value_name("CREDUCE_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("no-precompile")
.long("no-precompile")
.help("Do not precompile the C++ header before passing to autocxxgen"),
)
.arg(
Arg::new("no-postcompile")
.long("no-postcompile")
.help("Do not post-compile the C++ generated by autocxxgen"),
)
.arg(
Arg::new("no-rustc")
.long("no-rustc")
.help("Do not compile the rust generated by autocxxgen"),
)
.arg(
Arg::new("suppress-cxx-inclusions")
.long("suppress-cxx-inclusions")
.takes_value(true)
.possible_value("yes")
.possible_value("no")
.possible_value("auto")
.default_value("auto")
.help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.")
)
.arg_required_else_help(true)
.get_matches();
run(matches).unwrap();
}
fn run(matches: ArgMatches) -> Result<(), std::io::Error> {
let keep_tmp = matches.is_present("keep");
let tmp_dir = TempDir::new()?;
let r = do_run(matches, &tmp_dir);
if keep_tmp {
println!(
"Keeping temp dir created at: {}",
tmp_dir.into_path().to_str().unwrap()
);
}
r
}
#[derive(serde_derive::Deserialize)]
struct ReproCase {
config: String,
header: String,
}
fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> {
let rs_path = tmp_dir.path().join("input.rs");
let concat_path = tmp_dir.path().join("concat.h");
match matches.subcommand_matches("repro") {
None => {
let submatches = matches.subcommand_matches("file").unwrap();
let incs: Vec<_> = submatches
.values_of("inc")
.unwrap_or_default()
.map(PathBuf::from)
.collect();
let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect();
let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect();
assert!(!headers.is_empty());
let listing_path = tmp_dir.path().join("listing.h");
create_concatenated_header(&headers, &listing_path)?;
announce_progress(&format!(
"Preprocessing {listing_path:?} to {concat_path:?}"
));
preprocess(&listing_path, &concat_path, &incs, &defs)?;
let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string())
.chain(
submatches
.values_of("directive")
.unwrap_or_default()
.map(|s| format!("{s}\n")),
)
.collect();
create_rs_file(&rs_path, &directives)?;
}
Some(submatches) => {
let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from(
submatches.value_of("repro").unwrap(),
))?)
.unwrap();
// Replace the headers in the config
let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap();
config.replace_included_headers("concat.h");
create_file(
&rs_path,
&format!("autocxx::include_cpp!({});", config.to_token_stream()),
)?;
if let Some(header) = submatches.value_of("header") {
std::fs::copy(PathBuf::from(header), &concat_path)?;
} else {
create_file(&concat_path, &case.header)?
}
}
}
let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() {
"yes" => true,
"no" => false,
"auto" => detect_cxx_h(&concat_path)?,
_ => panic!("unexpected value"),
};
let cxx_suppressions = if suppress_cxx_classes {
get_cxx_suppressions()
} else {
Vec::new()
};
let extra_clang_args: Vec<_> = matches
.values_of("clang-args")
.unwrap_or_default()
.map(Cow::Borrowed)
.chain(cxx_suppressions.into_iter().map(Cow::Owned))
.collect();
let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec();
let gen_cmd = matches.value_of("gen-cmd").unwrap();
if!Path::new(gen_cmd).exists() {
panic!(
"autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen"
);
}
run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?;
// Create and run an interestingness test which does not filter its output through grep.
let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test");
std::fs::create_dir(&demo_interestingness_test_dir).unwrap();
let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
None,
&rs_path,
&extra_clang_args,
)?;
let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h");
std::fs::copy(&concat_path, demo_dir_concat_path).unwrap();
run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap();
// Now the main interestingness test
let interestingness_test = tmp_dir.path().join("test.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
Some(matches.value_of("problem").unwrap()),
&rs_path,
&extra_clang_args,
)?;
run_creduce(
matches.value_of("creduce").unwrap(),
&interestingness_test,
&concat_path,
matches.values_of("creduce-args").unwrap_or_default(),
);
announce_progress("creduce completed");
let output_path = matches.value_of("output");
match output_path {
None => print_minimized_case(&concat_path)?,
Some(output_path) => {
std::fs::copy(&concat_path, PathBuf::from(output_path))?;
}
};
Ok(())
}
/// Try to detect whether the preprocessed source code already contains
/// a preprocessed version of cxx.h. This is hard because all the comments
/// and preprocessor symbols may have been removed, and in fact if we're
/// part way through reduction, parts of the code may have been removed too.
fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> {
let haystack = std::fs::read_to_string(concat_path)?;
Ok(["class Box", "class Vec", "class Slice"]
.iter()
.all(|needle| haystack.contains(needle)))
}
fn announce_progress(msg: &str) {
println!("=== {msg} ===");
}
fn print_minimized_case(concat_path: &Path) -> Result<(), std::io::Error> {
announce_progress("Completed. Minimized test case:");
let contents = std::fs::read_to_string(concat_path)?;
println!("{contents}");
Ok(())
}
/// Arguments we pass to creduce if supported. This pass always seems to cause a crash
/// as far as I can tell, so always exclude it. It may be environment-dependent,
/// of course, but as I'm the primary user of this tool I am ruthlessly removing it.
const REMOVE_PASS_LINE_MARKERS: &[&str] = &["--remove-pass", "pass_line_markers", "*"];
const SKIP_INITIAL_PASSES: &[&str] = &["--skip-initial-passes"];
fn creduce_supports_remove_pass(creduce_cmd: &str) -> bool {
let cmd = std::process::Command::new(creduce_cmd)
.arg("--help")
.output();
let msg = match cmd {
Err(error) => panic!("failed to run creduce. creduce_cmd = {creduce_cmd}. hint: autocxx-reduce --creduce /path/to/creduce. error = {error}"),
Ok(result) => result.stdout
};
let msg = std::str::from_utf8(&msg).unwrap();
msg.contains("--remove-pass")
}
fn run_creduce<'a>(
creduce_cmd: &str,
interestingness_test: &'a Path,
concat_path: &'a Path,
creduce_args: impl Iterator<Item = &'a str>,
) {
announce_progress("creduce");
let args = std::iter::once(interestingness_test.to_str().unwrap())
.chain(std::iter::once(concat_path.to_str().unwrap()))
.chain(creduce_args)
.chain(
if creduce_supports_remove_pass(creduce_cmd) {
REMOVE_PASS_LINE_MARKERS
} else {
SKIP_INITIAL_PASSES
}
.iter()
.copied(),
)
.collect::<Vec<_>>();
println!("Command: {} {}", creduce_cmd, args.join(" "));
std::process::Command::new(creduce_cmd)
.args(args)
.status()
.expect("failed to creduce");
}
fn run_sample_gen_cmd(
gen_cmd: &str,
rs_file: &Path,
tmp_dir: &Path,
extra_clang_args: &[&str],
) -> Result<(), std::io::Error> {
let args = format_gen_cmd(rs_file, tmp_dir.to_str().unwrap(), extra_clang_args);
let args = args.collect::<Vec<_>>();
let args_str = args.join(" ");
announce_progress(&format!("Running sample gen cmd: {gen_cmd} {args_str}"));
std::process::Command::new(gen_cmd).args(args).status()?;
Ok(())
}
fn run_demo_interestingness_test(demo_dir: &Path, test: &Path) -> Result<(), std::io::Error> {
announce_progress(&format!(
"Running demo interestingness test in {}",
demo_dir.to_string_lossy()
));
std::process::Command::new(test)
.current_dir(demo_dir)
.status()?;
Ok(())
}
fn format_gen_cmd<'a>(
rs_file: &Path,
dir: &str,
extra_clang_args: &'a [&str],
) -> impl Iterator<Item = String> + 'a {
let args = [
"-o".to_string(),
dir.to_string(),
"-I".to_string(),
dir.to_string(),
rs_file.to_str().unwrap().to_string(),
"--gen-rs-include".to_string(),
"--gen-cpp".to_string(),
"--suppress-system-headers".to_string(),
"--".to_string(),
]
.to_vec();
args.into_iter()
.chain(extra_clang_args.iter().map(|s| s.to_string()))
}
fn create_interestingness_test(
matches: &ArgMatches, | gen_cmd: &str, | random_line_split |
|
main.rs | .help("include path")
.takes_value(true),
)
.arg(
Arg::new("define")
.short('D')
.long("define")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("DEFINE")
.help("macro definition")
.takes_value(true),
)
.arg(
Arg::new("header")
.long("header")
.multiple_occurrences(true)
.number_of_values(1)
.required(true)
.value_name("HEADER")
.help("header file name")
.takes_value(true),
)
.arg(
Arg::new("directive")
.short('d')
.long("directive")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("DIRECTIVE")
.help("directives to put within include_cpp!")
.takes_value(true),
)
)
.subcommand(Command::new("repro")
.about("reduce a repro case JSON file")
.arg(
Arg::new("repro")
.short('r')
.long("repro")
.required(true)
.value_name("REPRODUCTION CASE JSON")
.help("reproduction case JSON file name")
.takes_value(true),
)
.arg(
Arg::new("header")
.long("header")
.multiple_occurrences(true)
.number_of_values(1)
.value_name("HEADER")
.help("header file name; specify to resume a part-completed run")
.takes_value(true),
)
)
.arg(
Arg::new("problem")
.short('p')
.long("problem")
.required(true)
.value_name("PROBLEM")
.help("problem string we're looking for... may be in logs, or in generated C++, or generated.rs")
.takes_value(true),
)
.arg(
Arg::new("creduce")
.long("creduce")
.value_name("PATH")
.help("creduce binary location")
.default_value("creduce")
.takes_value(true),
)
.arg(
Arg::new("output")
.short('o')
.long("output")
.value_name("OUTPUT")
.help("where to write minimized output")
.takes_value(true),
)
.arg(
Arg::new("gen-cmd")
.short('g')
.long("gen-cmd")
.value_name("GEN-CMD")
.help("where to find autocxx-gen")
.default_value(&default_gen_cmd)
.takes_value(true),
)
.arg(
Arg::new("rustc")
.long("rustc")
.value_name("RUSTC")
.help("where to find rustc")
.default_value("rustc")
.takes_value(true),
)
.arg(
Arg::new("rlibs")
.long("rlibs")
.value_name("LIBDIR")
.help("where to find rlibs/rmetas for cxx and autocxx")
.default_values(default_rlibs)
.multiple_values(true)
.takes_value(true),
)
.arg(
Arg::new("keep")
.short('k')
.long("keep-dir")
.help("keep the temporary directory for debugging purposes"),
)
.arg(
Arg::new("clang-args")
.short('c')
.long("clang-arg")
.multiple_occurrences(true)
.value_name("CLANG_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("creduce-args")
.long("creduce-arg")
.multiple_occurrences(true)
.value_name("CREDUCE_ARG")
.help("Extra arguments to pass to Clang"),
)
.arg(
Arg::new("no-precompile")
.long("no-precompile")
.help("Do not precompile the C++ header before passing to autocxxgen"),
)
.arg(
Arg::new("no-postcompile")
.long("no-postcompile")
.help("Do not post-compile the C++ generated by autocxxgen"),
)
.arg(
Arg::new("no-rustc")
.long("no-rustc")
.help("Do not compile the rust generated by autocxxgen"),
)
.arg(
Arg::new("suppress-cxx-inclusions")
.long("suppress-cxx-inclusions")
.takes_value(true)
.possible_value("yes")
.possible_value("no")
.possible_value("auto")
.default_value("auto")
.help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.")
)
.arg_required_else_help(true)
.get_matches();
run(matches).unwrap();
}
fn run(matches: ArgMatches) -> Result<(), std::io::Error> {
let keep_tmp = matches.is_present("keep");
let tmp_dir = TempDir::new()?;
let r = do_run(matches, &tmp_dir);
if keep_tmp {
println!(
"Keeping temp dir created at: {}",
tmp_dir.into_path().to_str().unwrap()
);
}
r
}
#[derive(serde_derive::Deserialize)]
struct ReproCase {
config: String,
header: String,
}
fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> {
let rs_path = tmp_dir.path().join("input.rs");
let concat_path = tmp_dir.path().join("concat.h");
match matches.subcommand_matches("repro") {
None => {
let submatches = matches.subcommand_matches("file").unwrap();
let incs: Vec<_> = submatches
.values_of("inc")
.unwrap_or_default()
.map(PathBuf::from)
.collect();
let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect();
let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect();
assert!(!headers.is_empty());
let listing_path = tmp_dir.path().join("listing.h");
create_concatenated_header(&headers, &listing_path)?;
announce_progress(&format!(
"Preprocessing {listing_path:?} to {concat_path:?}"
));
preprocess(&listing_path, &concat_path, &incs, &defs)?;
let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string())
.chain(
submatches
.values_of("directive")
.unwrap_or_default()
.map(|s| format!("{s}\n")),
)
.collect();
create_rs_file(&rs_path, &directives)?;
}
Some(submatches) => {
let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from(
submatches.value_of("repro").unwrap(),
))?)
.unwrap();
// Replace the headers in the config
let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap();
config.replace_included_headers("concat.h");
create_file(
&rs_path,
&format!("autocxx::include_cpp!({});", config.to_token_stream()),
)?;
if let Some(header) = submatches.value_of("header") {
std::fs::copy(PathBuf::from(header), &concat_path)?;
} else {
create_file(&concat_path, &case.header)?
}
}
}
let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() {
"yes" => true,
"no" => false,
"auto" => detect_cxx_h(&concat_path)?,
_ => panic!("unexpected value"),
};
let cxx_suppressions = if suppress_cxx_classes {
get_cxx_suppressions()
} else {
Vec::new()
};
let extra_clang_args: Vec<_> = matches
.values_of("clang-args")
.unwrap_or_default()
.map(Cow::Borrowed)
.chain(cxx_suppressions.into_iter().map(Cow::Owned))
.collect();
let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec();
let gen_cmd = matches.value_of("gen-cmd").unwrap();
if!Path::new(gen_cmd).exists() {
panic!(
"autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen"
);
}
run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?;
// Create and run an interestingness test which does not filter its output through grep.
let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test");
std::fs::create_dir(&demo_interestingness_test_dir).unwrap();
let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
None,
&rs_path,
&extra_clang_args,
)?;
let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h");
std::fs::copy(&concat_path, demo_dir_concat_path).unwrap();
run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap();
// Now the main interestingness test
let interestingness_test = tmp_dir.path().join("test.sh");
create_interestingness_test(
&matches,
gen_cmd,
&interestingness_test,
Some(matches.value_of("problem").unwrap()),
&rs_path,
&extra_clang_args,
)?;
run_creduce(
matches.value_of("creduce").unwrap(),
&interestingness_test,
&concat_path,
matches.values_of("creduce-args").unwrap_or_default(),
);
announce_progress("creduce completed");
let output_path = matches.value_of("output");
match output_path {
None => print_minimized_case(&concat_path)?,
Some(output_path) => |
};
Ok(())
}
/// Try to detect whether the preprocessed source code already contains
/// a preprocessed version of cxx.h. This is hard because all the comments
/// and preprocessor symbols may have been removed, and in fact if we're
/// part way through reduction, parts of the code may have been removed too.
fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> {
let haystack = std::fs::read_to_string(concat_path)?;
Ok(["class Box", "class Vec", "class Slice"]
.iter()
.all(|needle| haystack.contains(needle)))
}
fn announce_progress(msg: &str) {
println!("=== {msg} ===");
}
fn print_minimized_case(concat_path: &Path) -> Result<(), std::io::Error> {
announce_progress("Completed. Minimized test case:");
let contents = std::fs::read_to_string(concat_path)?;
println!("{contents}");
Ok(())
}
/// Arguments we pass to creduce if supported. This pass always seems to cause a crash
/// as far as I can tell, so always exclude it. It may be environment-dependent,
/// of course, but as I'm the primary user of this tool I am ruthlessly removing it.
const REMOVE_PASS_LINE_MARKERS: &[&str] = &["--remove-pass", "pass_line_markers", "*"];
const SKIP_INITIAL_PASSES: &[&str] = &["--skip-initial-passes"];
fn creduce_supports_remove_pass(creduce_cmd: &str) -> bool {
let cmd = std::process::Command::new(creduce_cmd)
.arg("--help")
.output();
let msg = match cmd {
Err(error) => panic!("failed to run creduce. creduce_cmd = {creduce_cmd}. hint: autocxx-reduce --creduce /path/to/creduce. error = {error}"),
Ok(result) => result.stdout
};
let msg = std::str::from_utf8(&msg).unwrap();
msg.contains("--remove-pass")
}
fn run_creduce<'a>(
creduce_cmd: &str,
interestingness_test: &'a Path,
concat_path: &'a Path,
creduce_args: impl Iterator<Item = &'a str>,
) {
announce_progress("creduce");
let args = std::iter::once(interestingness_test.to_str().unwrap())
.chain(std::iter::once(concat_path.to_str().unwrap()))
.chain(creduce_args)
.chain(
if creduce_supports_remove_pass(creduce_cmd) {
REMOVE_PASS_LINE_MARKERS
} else {
SKIP_INITIAL_PASSES
}
.iter()
.copied(),
)
.collect::<Vec<_>>();
println!("Command: {} {}", creduce_cmd, args.join(" "));
std::process::Command::new(creduce_cmd)
.args(args)
.status()
.expect("failed to creduce");
}
fn run_sample_gen_cmd(
gen_cmd: &str,
rs_file: &Path,
tmp_dir: &Path,
extra_clang_args: &[&str],
) -> Result<(), std::io::Error> {
let args = format_gen_cmd(rs_file, tmp_dir.to_str().unwrap(), extra_clang_args);
let args = args.collect | {
std::fs::copy(&concat_path, PathBuf::from(output_path))?;
} | conditional_block |
block_stream.rs | use anyhow::Error;
use async_stream::stream;
use futures03::Stream;
use std::fmt;
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::mpsc::{self, Receiver, Sender};
use super::{Block, BlockPtr, Blockchain};
use crate::anyhow::Result;
use crate::components::store::{BlockNumber, DeploymentLocator};
use crate::data::subgraph::UnifiedMappingApiVersion;
use crate::firehose::{self, FirehoseEndpoint};
use crate::substreams_rpc::response::Message;
use crate::{prelude::*, prometheus::labels};
pub struct BufferedBlockStream<C: Blockchain> { | stream: Box<dyn BlockStream<C>>,
size_hint: usize,
) -> Box<dyn BlockStream<C>> {
let (sender, receiver) = mpsc::channel::<Result<BlockStreamEvent<C>, Error>>(size_hint);
crate::spawn(async move { BufferedBlockStream::stream_blocks(stream, sender).await });
Box::new(BufferedBlockStream::new(receiver))
}
pub fn new(mut receiver: Receiver<Result<BlockStreamEvent<C>, Error>>) -> Self {
let inner = stream! {
loop {
let event = match receiver.recv().await {
Some(evt) => evt,
None => return,
};
yield event
}
};
Self {
inner: Box::pin(inner),
}
}
pub async fn stream_blocks(
mut stream: Box<dyn BlockStream<C>>,
sender: Sender<Result<BlockStreamEvent<C>, Error>>,
) -> Result<(), Error> {
while let Some(event) = stream.next().await {
match sender.send(event).await {
Ok(_) => continue,
Err(err) => {
return Err(anyhow!(
"buffered blockstream channel is closed, stopping. Err: {}",
err
))
}
}
}
Ok(())
}
}
impl<C: Blockchain> BlockStream<C> for BufferedBlockStream<C> {}
impl<C: Blockchain> Stream for BufferedBlockStream<C> {
type Item = Result<BlockStreamEvent<C>, Error>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
self.inner.poll_next_unpin(cx)
}
}
pub trait BlockStream<C: Blockchain>:
Stream<Item = Result<BlockStreamEvent<C>, Error>> + Unpin + Send
{
}
/// BlockRefetcher abstraction allows a chain to decide if a block must be refetched after a dynamic data source was added
#[async_trait]
pub trait BlockRefetcher<C: Blockchain>: Send + Sync {
fn required(&self, chain: &C) -> bool;
async fn get_block(
&self,
chain: &C,
logger: &Logger,
cursor: FirehoseCursor,
) -> Result<C::Block, Error>;
}
/// BlockStreamBuilder is an abstraction that would separate the logic for building streams from the blockchain trait
#[async_trait]
pub trait BlockStreamBuilder<C: Blockchain>: Send + Sync {
async fn build_firehose(
&self,
chain: &C,
deployment: DeploymentLocator,
block_cursor: FirehoseCursor,
start_blocks: Vec<BlockNumber>,
subgraph_current_block: Option<BlockPtr>,
filter: Arc<C::TriggerFilter>,
unified_api_version: UnifiedMappingApiVersion,
) -> Result<Box<dyn BlockStream<C>>>;
async fn build_polling(
&self,
chain: &C,
deployment: DeploymentLocator,
start_blocks: Vec<BlockNumber>,
subgraph_current_block: Option<BlockPtr>,
filter: Arc<C::TriggerFilter>,
unified_api_version: UnifiedMappingApiVersion,
) -> Result<Box<dyn BlockStream<C>>>;
}
#[derive(Debug, Clone)]
pub struct FirehoseCursor(Option<String>);
impl FirehoseCursor {
#[allow(non_upper_case_globals)]
pub const None: Self = FirehoseCursor(None);
pub fn is_none(&self) -> bool {
self.0.is_none()
}
}
impl fmt::Display for FirehoseCursor {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.write_str(self.0.as_deref().unwrap_or(""))
}
}
impl From<String> for FirehoseCursor {
fn from(cursor: String) -> Self {
// Treat a cursor of "" as None, not absolutely necessary for correctness since the firehose
// treats both as the same, but makes it a little clearer.
if cursor.is_empty() {
FirehoseCursor::None
} else {
FirehoseCursor(Some(cursor))
}
}
}
impl From<Option<String>> for FirehoseCursor {
fn from(cursor: Option<String>) -> Self {
match cursor {
None => FirehoseCursor::None,
Some(s) => FirehoseCursor::from(s),
}
}
}
impl AsRef<Option<String>> for FirehoseCursor {
fn as_ref(&self) -> &Option<String> {
&self.0
}
}
#[derive(Debug)]
pub struct BlockWithTriggers<C: Blockchain> {
pub block: C::Block,
pub trigger_data: Vec<C::TriggerData>,
}
impl<C: Blockchain> Clone for BlockWithTriggers<C>
where
C::TriggerData: Clone,
{
fn clone(&self) -> Self {
Self {
block: self.block.clone(),
trigger_data: self.trigger_data.clone(),
}
}
}
impl<C: Blockchain> BlockWithTriggers<C> {
/// Creates a BlockWithTriggers structure, which holds
/// the trigger data ordered and without any duplicates.
pub fn new(block: C::Block, mut trigger_data: Vec<C::TriggerData>, logger: &Logger) -> Self {
// This is where triggers get sorted.
trigger_data.sort();
let old_len = trigger_data.len();
// This is removing the duplicate triggers in the case of multiple
// data sources fetching the same event/call/etc.
trigger_data.dedup();
let new_len = trigger_data.len();
if new_len!= old_len {
debug!(
logger,
"Trigger data had duplicate triggers";
"block_number" => block.number(),
"block_hash" => block.hash().hash_hex(),
"old_length" => old_len,
"new_length" => new_len,
);
}
Self {
block,
trigger_data,
}
}
pub fn trigger_count(&self) -> usize {
self.trigger_data.len()
}
pub fn ptr(&self) -> BlockPtr {
self.block.ptr()
}
pub fn parent_ptr(&self) -> Option<BlockPtr> {
self.block.parent_ptr()
}
}
#[async_trait]
pub trait TriggersAdapter<C: Blockchain>: Send + Sync {
// Return the block that is `offset` blocks before the block pointed to
// by `ptr` from the local cache. An offset of 0 means the block itself,
// an offset of 1 means the block's parent etc. If the block is not in
// the local cache, return `None`
async fn ancestor_block(
&self,
ptr: BlockPtr,
offset: BlockNumber,
) -> Result<Option<C::Block>, Error>;
// Returns a sequence of blocks in increasing order of block number.
// Each block will include all of its triggers that match the given `filter`.
// The sequence may omit blocks that contain no triggers,
// but all returned blocks must part of a same chain starting at `chain_base`.
// At least one block will be returned, even if it contains no triggers.
// `step_size` is the suggested number blocks to be scanned.
async fn scan_triggers(
&self,
from: BlockNumber,
to: BlockNumber,
filter: &C::TriggerFilter,
) -> Result<Vec<BlockWithTriggers<C>>, Error>;
// Used for reprocessing blocks when creating a data source.
async fn triggers_in_block(
&self,
logger: &Logger,
block: C::Block,
filter: &C::TriggerFilter,
) -> Result<BlockWithTriggers<C>, Error>;
/// Return `true` if the block with the given hash and number is on the
/// main chain, i.e., the chain going back from the current chain head.
async fn is_on_main_chain(&self, ptr: BlockPtr) -> Result<bool, Error>;
/// Get pointer to parent of `block`. This is called when reverting `block`.
async fn parent_ptr(&self, block: &BlockPtr) -> Result<Option<BlockPtr>, Error>;
}
#[async_trait]
pub trait FirehoseMapper<C: Blockchain>: Send + Sync {
async fn to_block_stream_event(
&self,
logger: &Logger,
response: &firehose::Response,
adapter: &Arc<dyn TriggersAdapter<C>>,
filter: &C::TriggerFilter,
) -> Result<BlockStreamEvent<C>, FirehoseError>;
/// Returns the [BlockPtr] value for this given block number. This is the block pointer
/// of the longuest according to Firehose view of the blockchain state.
///
/// This is a thin wrapper around [FirehoseEndpoint#block_ptr_for_number] to make
/// it chain agnostic and callable from chain agnostic [FirehoseBlockStream].
async fn block_ptr_for_number(
&self,
logger: &Logger,
endpoint: &Arc<FirehoseEndpoint>,
number: BlockNumber,
) -> Result<BlockPtr, Error>;
/// Returns the closest final block ptr to the block ptr received.
/// On probablitics chain like Ethereum, final is determined by
/// the confirmations threshold configured for the Firehose stack (currently
/// hard-coded to 200).
///
/// On some other chain like NEAR, the actual final block number is determined
/// from the block itself since it contains information about which block number
/// is final against the current block.
///
/// To take an example, assuming we are on Ethereum, the final block pointer
/// for block #10212 would be the determined final block #10012 (10212 - 200 = 10012).
async fn final_block_ptr_for(
&self,
logger: &Logger,
endpoint: &Arc<FirehoseEndpoint>,
block: &C::Block,
) -> Result<BlockPtr, Error>;
}
#[async_trait]
pub trait SubstreamsMapper<C: Blockchain>: Send + Sync {
async fn to_block_stream_event(
&self,
logger: &Logger,
response: Option<Message>,
// adapter: &Arc<dyn TriggersAdapter<C>>,
// filter: &C::TriggerFilter,
) -> Result<Option<BlockStreamEvent<C>>, SubstreamsError>;
}
#[derive(Error, Debug)]
pub enum FirehoseError {
/// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block)
#[error("received gRPC block payload cannot be decoded: {0}")]
DecodingError(#[from] prost::DecodeError),
/// Some unknown error occurred
#[error("unknown error")]
UnknownError(#[from] anyhow::Error),
}
#[derive(Error, Debug)]
pub enum SubstreamsError {
#[error("response is missing the clock information")]
MissingClockError,
#[error("invalid undo message")]
InvalidUndoError,
/// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block)
#[error("received gRPC block payload cannot be decoded: {0}")]
DecodingError(#[from] prost::DecodeError),
/// Some unknown error occurred
#[error("unknown error")]
UnknownError(#[from] anyhow::Error),
#[error("multiple module output error")]
MultipleModuleOutputError,
#[error("module output was not available (none) or wrong data provided")]
ModuleOutputNotPresentOrUnexpected,
#[error("unexpected store delta output")]
UnexpectedStoreDeltaOutput,
}
#[derive(Debug)]
pub enum BlockStreamEvent<C: Blockchain> {
// The payload is the block the subgraph should revert to, so it becomes the new subgraph head.
Revert(BlockPtr, FirehoseCursor),
ProcessBlock(BlockWithTriggers<C>, FirehoseCursor),
}
impl<C: Blockchain> Clone for BlockStreamEvent<C>
where
C::TriggerData: Clone,
{
fn clone(&self) -> Self {
match self {
Self::Revert(arg0, arg1) => Self::Revert(arg0.clone(), arg1.clone()),
Self::ProcessBlock(arg0, arg1) => Self::ProcessBlock(arg0.clone(), arg1.clone()),
}
}
}
#[derive(Clone)]
pub struct BlockStreamMetrics {
pub deployment_head: Box<Gauge>,
pub deployment_failed: Box<Gauge>,
pub reverted_blocks: Gauge,
pub stopwatch: StopwatchMetrics,
}
impl BlockStreamMetrics {
pub fn new(
registry: Arc<MetricsRegistry>,
deployment_id: &DeploymentHash,
network: String,
shard: String,
stopwatch: StopwatchMetrics,
) -> Self {
let reverted_blocks = registry
.new_deployment_gauge(
"deployment_reverted_blocks",
"Track the last reverted block for a subgraph deployment",
deployment_id.as_str(),
)
.expect("Failed to create `deployment_reverted_blocks` gauge");
let labels = labels! {
String::from("deployment") => deployment_id.to_string(),
String::from("network") => network,
String::from("shard") => shard
};
let deployment_head = registry
.new_gauge(
"deployment_head",
"Track the head block number for a deployment",
labels.clone(),
)
.expect("failed to create `deployment_head` gauge");
let deployment_failed = registry
.new_gauge(
"deployment_failed",
"Boolean gauge to indicate whether the deployment has failed (1 == failed)",
labels,
)
.expect("failed to create `deployment_failed` gauge");
Self {
deployment_head,
deployment_failed,
reverted_blocks,
stopwatch,
}
}
}
/// Notifications about the chain head advancing. The block ingestor sends
/// an update on this stream whenever the head of the underlying chain
/// changes. The updates have no payload, receivers should call
/// `Store::chain_head_ptr` to check what the latest block is.
pub type ChainHeadUpdateStream = Box<dyn Stream<Item = ()> + Send + Unpin>;
pub trait ChainHeadUpdateListener: Send + Sync +'static {
/// Subscribe to chain head updates for the given network.
fn subscribe(&self, network: String, logger: Logger) -> ChainHeadUpdateStream;
}
#[cfg(test)]
mod test {
use std::{collections::HashSet, task::Poll};
use anyhow::Error;
use futures03::{Stream, StreamExt, TryStreamExt};
use crate::{
blockchain::mock::{MockBlock, MockBlockchain},
ext::futures::{CancelableError, SharedCancelGuard, StreamExtension},
};
use super::{
BlockStream, BlockStreamEvent, BlockWithTriggers, BufferedBlockStream, FirehoseCursor,
};
#[derive(Debug)]
struct TestStream {
number: u64,
}
impl BlockStream<MockBlockchain> for TestStream {}
impl Stream for TestStream {
type Item = Result<BlockStreamEvent<MockBlockchain>, Error>;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
self.number += 1;
Poll::Ready(Some(Ok(BlockStreamEvent::ProcessBlock(
BlockWithTriggers::<MockBlockchain> {
block: MockBlock {
number: self.number - 1,
},
trigger_data: vec![],
},
FirehoseCursor::None,
))))
}
}
#[tokio::test]
async fn consume_stream() {
let initial_block = 100;
let buffer_size = 5;
let stream = Box::new(TestStream {
number: initial_block,
});
let guard = SharedCancelGuard::new();
let mut stream = BufferedBlockStream::spawn_from_stream(stream, buffer_size)
.map_err(CancelableError::Error)
.cancelable(&guard, || Err(CancelableError::Cancel));
let mut blocks = HashSet::<MockBlock>::new();
let mut count = 0;
loop {
match stream.next().await {
None if blocks.is_empty() => panic!("None before blocks"),
Some(Err(CancelableError::Cancel)) => {
assert!(guard.is_canceled(), "Guard shouldn't be called yet");
break;
}
Some(Ok(BlockStreamEvent::ProcessBlock(block_triggers, _))) => {
let block = block_triggers.block;
blocks.insert(block.clone());
count += 1;
if block.number > initial_block + buffer_size as u64 {
guard.cancel();
}
}
_ => panic!("Should not happen"),
};
}
assert!(
blocks.len() > buffer_size,
"should consume at least a full buffer, consumed {}",
count
);
assert_eq!(count, blocks.len(), "should not have duplicated blocks");
}
} | inner: Pin<Box<dyn Stream<Item = Result<BlockStreamEvent<C>, Error>> + Send>>,
}
impl<C: Blockchain + 'static> BufferedBlockStream<C> {
pub fn spawn_from_stream( | random_line_split |
block_stream.rs | use anyhow::Error;
use async_stream::stream;
use futures03::Stream;
use std::fmt;
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::mpsc::{self, Receiver, Sender};
use super::{Block, BlockPtr, Blockchain};
use crate::anyhow::Result;
use crate::components::store::{BlockNumber, DeploymentLocator};
use crate::data::subgraph::UnifiedMappingApiVersion;
use crate::firehose::{self, FirehoseEndpoint};
use crate::substreams_rpc::response::Message;
use crate::{prelude::*, prometheus::labels};
pub struct BufferedBlockStream<C: Blockchain> {
inner: Pin<Box<dyn Stream<Item = Result<BlockStreamEvent<C>, Error>> + Send>>,
}
impl<C: Blockchain +'static> BufferedBlockStream<C> {
pub fn spawn_from_stream(
stream: Box<dyn BlockStream<C>>,
size_hint: usize,
) -> Box<dyn BlockStream<C>> {
let (sender, receiver) = mpsc::channel::<Result<BlockStreamEvent<C>, Error>>(size_hint);
crate::spawn(async move { BufferedBlockStream::stream_blocks(stream, sender).await });
Box::new(BufferedBlockStream::new(receiver))
}
pub fn new(mut receiver: Receiver<Result<BlockStreamEvent<C>, Error>>) -> Self {
let inner = stream! {
loop {
let event = match receiver.recv().await {
Some(evt) => evt,
None => return,
};
yield event
}
};
Self {
inner: Box::pin(inner),
}
}
pub async fn stream_blocks(
mut stream: Box<dyn BlockStream<C>>,
sender: Sender<Result<BlockStreamEvent<C>, Error>>,
) -> Result<(), Error> {
while let Some(event) = stream.next().await {
match sender.send(event).await {
Ok(_) => continue,
Err(err) => {
return Err(anyhow!(
"buffered blockstream channel is closed, stopping. Err: {}",
err
))
}
}
}
Ok(())
}
}
impl<C: Blockchain> BlockStream<C> for BufferedBlockStream<C> {}
impl<C: Blockchain> Stream for BufferedBlockStream<C> {
type Item = Result<BlockStreamEvent<C>, Error>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
self.inner.poll_next_unpin(cx)
}
}
pub trait BlockStream<C: Blockchain>:
Stream<Item = Result<BlockStreamEvent<C>, Error>> + Unpin + Send
{
}
/// BlockRefetcher abstraction allows a chain to decide if a block must be refetched after a dynamic data source was added
#[async_trait]
pub trait BlockRefetcher<C: Blockchain>: Send + Sync {
fn required(&self, chain: &C) -> bool;
async fn get_block(
&self,
chain: &C,
logger: &Logger,
cursor: FirehoseCursor,
) -> Result<C::Block, Error>;
}
/// BlockStreamBuilder is an abstraction that would separate the logic for building streams from the blockchain trait
#[async_trait]
pub trait BlockStreamBuilder<C: Blockchain>: Send + Sync {
async fn build_firehose(
&self,
chain: &C,
deployment: DeploymentLocator,
block_cursor: FirehoseCursor,
start_blocks: Vec<BlockNumber>,
subgraph_current_block: Option<BlockPtr>,
filter: Arc<C::TriggerFilter>,
unified_api_version: UnifiedMappingApiVersion,
) -> Result<Box<dyn BlockStream<C>>>;
async fn build_polling(
&self,
chain: &C,
deployment: DeploymentLocator,
start_blocks: Vec<BlockNumber>,
subgraph_current_block: Option<BlockPtr>,
filter: Arc<C::TriggerFilter>,
unified_api_version: UnifiedMappingApiVersion,
) -> Result<Box<dyn BlockStream<C>>>;
}
#[derive(Debug, Clone)]
pub struct FirehoseCursor(Option<String>);
impl FirehoseCursor {
#[allow(non_upper_case_globals)]
pub const None: Self = FirehoseCursor(None);
pub fn is_none(&self) -> bool {
self.0.is_none()
}
}
impl fmt::Display for FirehoseCursor {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.write_str(self.0.as_deref().unwrap_or(""))
}
}
impl From<String> for FirehoseCursor {
fn from(cursor: String) -> Self {
// Treat a cursor of "" as None, not absolutely necessary for correctness since the firehose
// treats both as the same, but makes it a little clearer.
if cursor.is_empty() {
FirehoseCursor::None
} else {
FirehoseCursor(Some(cursor))
}
}
}
impl From<Option<String>> for FirehoseCursor {
fn from(cursor: Option<String>) -> Self {
match cursor {
None => FirehoseCursor::None,
Some(s) => FirehoseCursor::from(s),
}
}
}
impl AsRef<Option<String>> for FirehoseCursor {
fn as_ref(&self) -> &Option<String> {
&self.0
}
}
#[derive(Debug)]
pub struct BlockWithTriggers<C: Blockchain> {
pub block: C::Block,
pub trigger_data: Vec<C::TriggerData>,
}
impl<C: Blockchain> Clone for BlockWithTriggers<C>
where
C::TriggerData: Clone,
{
fn clone(&self) -> Self {
Self {
block: self.block.clone(),
trigger_data: self.trigger_data.clone(),
}
}
}
impl<C: Blockchain> BlockWithTriggers<C> {
/// Creates a BlockWithTriggers structure, which holds
/// the trigger data ordered and without any duplicates.
pub fn | (block: C::Block, mut trigger_data: Vec<C::TriggerData>, logger: &Logger) -> Self {
// This is where triggers get sorted.
trigger_data.sort();
let old_len = trigger_data.len();
// This is removing the duplicate triggers in the case of multiple
// data sources fetching the same event/call/etc.
trigger_data.dedup();
let new_len = trigger_data.len();
if new_len!= old_len {
debug!(
logger,
"Trigger data had duplicate triggers";
"block_number" => block.number(),
"block_hash" => block.hash().hash_hex(),
"old_length" => old_len,
"new_length" => new_len,
);
}
Self {
block,
trigger_data,
}
}
pub fn trigger_count(&self) -> usize {
self.trigger_data.len()
}
pub fn ptr(&self) -> BlockPtr {
self.block.ptr()
}
pub fn parent_ptr(&self) -> Option<BlockPtr> {
self.block.parent_ptr()
}
}
#[async_trait]
pub trait TriggersAdapter<C: Blockchain>: Send + Sync {
// Return the block that is `offset` blocks before the block pointed to
// by `ptr` from the local cache. An offset of 0 means the block itself,
// an offset of 1 means the block's parent etc. If the block is not in
// the local cache, return `None`
async fn ancestor_block(
&self,
ptr: BlockPtr,
offset: BlockNumber,
) -> Result<Option<C::Block>, Error>;
// Returns a sequence of blocks in increasing order of block number.
// Each block will include all of its triggers that match the given `filter`.
// The sequence may omit blocks that contain no triggers,
// but all returned blocks must part of a same chain starting at `chain_base`.
// At least one block will be returned, even if it contains no triggers.
// `step_size` is the suggested number blocks to be scanned.
async fn scan_triggers(
&self,
from: BlockNumber,
to: BlockNumber,
filter: &C::TriggerFilter,
) -> Result<Vec<BlockWithTriggers<C>>, Error>;
// Used for reprocessing blocks when creating a data source.
async fn triggers_in_block(
&self,
logger: &Logger,
block: C::Block,
filter: &C::TriggerFilter,
) -> Result<BlockWithTriggers<C>, Error>;
/// Return `true` if the block with the given hash and number is on the
/// main chain, i.e., the chain going back from the current chain head.
async fn is_on_main_chain(&self, ptr: BlockPtr) -> Result<bool, Error>;
/// Get pointer to parent of `block`. This is called when reverting `block`.
async fn parent_ptr(&self, block: &BlockPtr) -> Result<Option<BlockPtr>, Error>;
}
#[async_trait]
pub trait FirehoseMapper<C: Blockchain>: Send + Sync {
async fn to_block_stream_event(
&self,
logger: &Logger,
response: &firehose::Response,
adapter: &Arc<dyn TriggersAdapter<C>>,
filter: &C::TriggerFilter,
) -> Result<BlockStreamEvent<C>, FirehoseError>;
/// Returns the [BlockPtr] value for this given block number. This is the block pointer
/// of the longuest according to Firehose view of the blockchain state.
///
/// This is a thin wrapper around [FirehoseEndpoint#block_ptr_for_number] to make
/// it chain agnostic and callable from chain agnostic [FirehoseBlockStream].
async fn block_ptr_for_number(
&self,
logger: &Logger,
endpoint: &Arc<FirehoseEndpoint>,
number: BlockNumber,
) -> Result<BlockPtr, Error>;
/// Returns the closest final block ptr to the block ptr received.
/// On probablitics chain like Ethereum, final is determined by
/// the confirmations threshold configured for the Firehose stack (currently
/// hard-coded to 200).
///
/// On some other chain like NEAR, the actual final block number is determined
/// from the block itself since it contains information about which block number
/// is final against the current block.
///
/// To take an example, assuming we are on Ethereum, the final block pointer
/// for block #10212 would be the determined final block #10012 (10212 - 200 = 10012).
async fn final_block_ptr_for(
&self,
logger: &Logger,
endpoint: &Arc<FirehoseEndpoint>,
block: &C::Block,
) -> Result<BlockPtr, Error>;
}
#[async_trait]
pub trait SubstreamsMapper<C: Blockchain>: Send + Sync {
async fn to_block_stream_event(
&self,
logger: &Logger,
response: Option<Message>,
// adapter: &Arc<dyn TriggersAdapter<C>>,
// filter: &C::TriggerFilter,
) -> Result<Option<BlockStreamEvent<C>>, SubstreamsError>;
}
#[derive(Error, Debug)]
pub enum FirehoseError {
/// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block)
#[error("received gRPC block payload cannot be decoded: {0}")]
DecodingError(#[from] prost::DecodeError),
/// Some unknown error occurred
#[error("unknown error")]
UnknownError(#[from] anyhow::Error),
}
#[derive(Error, Debug)]
pub enum SubstreamsError {
#[error("response is missing the clock information")]
MissingClockError,
#[error("invalid undo message")]
InvalidUndoError,
/// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block)
#[error("received gRPC block payload cannot be decoded: {0}")]
DecodingError(#[from] prost::DecodeError),
/// Some unknown error occurred
#[error("unknown error")]
UnknownError(#[from] anyhow::Error),
#[error("multiple module output error")]
MultipleModuleOutputError,
#[error("module output was not available (none) or wrong data provided")]
ModuleOutputNotPresentOrUnexpected,
#[error("unexpected store delta output")]
UnexpectedStoreDeltaOutput,
}
#[derive(Debug)]
pub enum BlockStreamEvent<C: Blockchain> {
// The payload is the block the subgraph should revert to, so it becomes the new subgraph head.
Revert(BlockPtr, FirehoseCursor),
ProcessBlock(BlockWithTriggers<C>, FirehoseCursor),
}
impl<C: Blockchain> Clone for BlockStreamEvent<C>
where
C::TriggerData: Clone,
{
fn clone(&self) -> Self {
match self {
Self::Revert(arg0, arg1) => Self::Revert(arg0.clone(), arg1.clone()),
Self::ProcessBlock(arg0, arg1) => Self::ProcessBlock(arg0.clone(), arg1.clone()),
}
}
}
#[derive(Clone)]
pub struct BlockStreamMetrics {
pub deployment_head: Box<Gauge>,
pub deployment_failed: Box<Gauge>,
pub reverted_blocks: Gauge,
pub stopwatch: StopwatchMetrics,
}
impl BlockStreamMetrics {
pub fn new(
registry: Arc<MetricsRegistry>,
deployment_id: &DeploymentHash,
network: String,
shard: String,
stopwatch: StopwatchMetrics,
) -> Self {
let reverted_blocks = registry
.new_deployment_gauge(
"deployment_reverted_blocks",
"Track the last reverted block for a subgraph deployment",
deployment_id.as_str(),
)
.expect("Failed to create `deployment_reverted_blocks` gauge");
let labels = labels! {
String::from("deployment") => deployment_id.to_string(),
String::from("network") => network,
String::from("shard") => shard
};
let deployment_head = registry
.new_gauge(
"deployment_head",
"Track the head block number for a deployment",
labels.clone(),
)
.expect("failed to create `deployment_head` gauge");
let deployment_failed = registry
.new_gauge(
"deployment_failed",
"Boolean gauge to indicate whether the deployment has failed (1 == failed)",
labels,
)
.expect("failed to create `deployment_failed` gauge");
Self {
deployment_head,
deployment_failed,
reverted_blocks,
stopwatch,
}
}
}
/// Notifications about the chain head advancing. The block ingestor sends
/// an update on this stream whenever the head of the underlying chain
/// changes. The updates have no payload, receivers should call
/// `Store::chain_head_ptr` to check what the latest block is.
pub type ChainHeadUpdateStream = Box<dyn Stream<Item = ()> + Send + Unpin>;
pub trait ChainHeadUpdateListener: Send + Sync +'static {
/// Subscribe to chain head updates for the given network.
fn subscribe(&self, network: String, logger: Logger) -> ChainHeadUpdateStream;
}
#[cfg(test)]
mod test {
use std::{collections::HashSet, task::Poll};
use anyhow::Error;
use futures03::{Stream, StreamExt, TryStreamExt};
use crate::{
blockchain::mock::{MockBlock, MockBlockchain},
ext::futures::{CancelableError, SharedCancelGuard, StreamExtension},
};
use super::{
BlockStream, BlockStreamEvent, BlockWithTriggers, BufferedBlockStream, FirehoseCursor,
};
#[derive(Debug)]
struct TestStream {
number: u64,
}
impl BlockStream<MockBlockchain> for TestStream {}
impl Stream for TestStream {
type Item = Result<BlockStreamEvent<MockBlockchain>, Error>;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
self.number += 1;
Poll::Ready(Some(Ok(BlockStreamEvent::ProcessBlock(
BlockWithTriggers::<MockBlockchain> {
block: MockBlock {
number: self.number - 1,
},
trigger_data: vec![],
},
FirehoseCursor::None,
))))
}
}
#[tokio::test]
async fn consume_stream() {
let initial_block = 100;
let buffer_size = 5;
let stream = Box::new(TestStream {
number: initial_block,
});
let guard = SharedCancelGuard::new();
let mut stream = BufferedBlockStream::spawn_from_stream(stream, buffer_size)
.map_err(CancelableError::Error)
.cancelable(&guard, || Err(CancelableError::Cancel));
let mut blocks = HashSet::<MockBlock>::new();
let mut count = 0;
loop {
match stream.next().await {
None if blocks.is_empty() => panic!("None before blocks"),
Some(Err(CancelableError::Cancel)) => {
assert!(guard.is_canceled(), "Guard shouldn't be called yet");
break;
}
Some(Ok(BlockStreamEvent::ProcessBlock(block_triggers, _))) => {
let block = block_triggers.block;
blocks.insert(block.clone());
count += 1;
if block.number > initial_block + buffer_size as u64 {
guard.cancel();
}
}
_ => panic!("Should not happen"),
};
}
assert!(
blocks.len() > buffer_size,
"should consume at least a full buffer, consumed {}",
count
);
assert_eq!(count, blocks.len(), "should not have duplicated blocks");
}
}
| new | identifier_name |
lib.rs | // Copyright 2018 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![recursion_limit = "128"]
extern crate proc_macro;
extern crate proc_macro2;
#[macro_use]
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro2::{Ident, Span, TokenStream};
use quote::ToTokens;
use syn::punctuated::Punctuated;
use syn::token::Comma;
fn create_function_params(num_args: usize) -> TokenStream {
let mut tokens = TokenStream::new();
for i in 0..num_args {
let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site());
tokens.extend(quote!(
#arg_name,
));
}
tokens
}
fn extract_arg_data(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream {
let mut get_args_stream = TokenStream::new();
for (i, arg) in inputs.iter().enumerate() {
let arg_type: &syn::Type = match *arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
panic!("self functions not supported")
}
syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"),
syn::FnArg::Captured(ref captured) => &captured.ty,
syn::FnArg::Ignored(ref ty) => ty,
};
let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site());
let arg_error = format!("unsupported function argument type for {}", arg_name);
let get_arg = quote!(
let #arg_name: #arg_type = pg_extend::pg_datum::TryFromPgDatum::try_from(
pg_extend::pg_datum::PgDatum::from_raw(
*args.next().expect("wrong number of args passed into get_args for args?"),
args_null.next().expect("wrong number of args passed into get_args for args_null?")
),
)
.expect(#arg_error);
);
get_args_stream.extend(get_arg);
}
get_args_stream
}
fn sql_param_list(num_args: usize) -> String {
let mut tokens = String::new();
if num_args == 0 {
return tokens;
}
let arg_name = |num: usize| format!("{{sql_{}}}", num);
for i in 0..(num_args - 1) {
let arg_name = arg_name(i);
tokens.push_str(&format!("{},", arg_name));
}
let arg_name = arg_name(num_args - 1);
tokens.push_str(&arg_name);
tokens
}
fn sql_param_types(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream {
let mut tokens = TokenStream::new();
for (i, arg) in inputs.iter().enumerate() {
let arg_type: &syn::Type = match *arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
panic!("self functions not supported")
}
syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"),
syn::FnArg::Captured(ref captured) => &captured.ty,
syn::FnArg::Ignored(ref ty) => ty,
};
let sql_name = Ident::new(&format!("sql_{}", i), Span::call_site());
let sql_param = quote!(
#sql_name = pg_extend::pg_type::PgType::from_rust::<#arg_type>().as_str(),
);
tokens.extend(sql_param);
}
tokens
}
fn sql_return_type(outputs: &syn::ReturnType) -> TokenStream {
let ty = match outputs {
syn::ReturnType::Default => quote!(()),
syn::ReturnType::Type(_, ty) => quote!(#ty),
};
quote!(pg_extend::pg_type::PgType::from_rust::<#ty>().return_stmt())
}
fn impl_info_for_fdw(item: &syn::Item) -> TokenStream {
let typ = if let syn::Item::Struct(typ) = item {
typ
} else {
panic!("Annotation only supported on structs")
};
let mut decl = item.clone().into_token_stream();
let struct_name = &typ.ident;
let func_name = syn::Ident::new(
&format!("fdw_{}", struct_name),
Span::call_site(),
);
let info_fn = get_info_fn(&func_name);
let fdw_fn = quote!(
#[no_mangle]
pub extern "C" fn #func_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
pg_extend::pg_fdw::ForeignWrapper::<#struct_name>::into_datum()
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", struct_name), Span::call_site());
let sql_stmt = format!(
"
CREATE OR REPLACE FUNCTION {0}() RETURNS fdw_handler AS '{{library_path}}', '{1}' LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER {0} handler {0} NO VALIDATOR;
",
struct_name, func_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
library_path = library_path
)
}
);
decl.extend(info_fn);
decl.extend(create_sql_def);
decl.extend(fdw_fn);
decl
}
fn get_info_fn(func_name: &syn::Ident) -> TokenStream {
let func_info_name = syn::Ident::new(
&format!("pg_finfo_{}", func_name),
Span::call_site(),
);
// create the postgres info
quote!(
#[no_mangle]
pub extern "C" fn #func_info_name () -> &'static pg_extend::pg_sys::Pg_finfo_record {
const my_finfo: pg_extend::pg_sys::Pg_finfo_record = pg_extend::pg_sys::Pg_finfo_record { api_version: 1 };
&my_finfo
}
)
}
fn impl_info_for_fn(item: &syn::Item) -> TokenStream {
let func = if let syn::Item::Fn(func) = item {
func
} else {
panic!("annotation only supported on functions");
};
let func_name = &func.ident;
let func_decl = &func.decl;
if func_decl.variadic.is_some() {
panic!("variadic functions (...) not supported")
}
//let generics = &func_decl.generics;
let inputs = &func_decl.inputs;
let output = &func_decl.output;
//let func_block = &func.block;
// declare the function
let mut function = item.clone().into_token_stream();
let func_wrapper_name = syn::Ident::new(&format!("pg_{}", func_name), Span::call_site());
let func_info = get_info_fn(&func_wrapper_name);
// join the function information in
function.extend(func_info);
let get_args_from_datums = extract_arg_data(inputs);
let func_params = create_function_params(inputs.len());
// wrap the original function in a pg_wrapper function
let func_wrapper = quote!(
#[no_mangle]
pub extern "C" fn #func_wrapper_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
use std::panic;
let func_info: &mut pg_extend::pg_sys::FunctionCallInfoData = unsafe {
func_call_info
.as_mut()
.expect("func_call_info was unexpectedly NULL")
};
// guard the Postgres process against the panic, and give us an oportunity to cleanup
let panic_result = panic::catch_unwind(|| {
// extract the argument list
let (mut args, mut args_null) = pg_extend::get_args(func_info);
// arbitrary Datum conversions occur here, and could panic
// so this is inside the catch unwind
#get_args_from_datums
// this is the meat of the function call into the extension code
let result = #func_name(#func_params);
// arbitrary Rust code could panic, so this is guarded
pg_extend::pg_datum::PgDatum::from(result)
});
// see if we caught a panic
match panic_result {
Ok(result) => {
// in addition to the null case, we should handle result types probably
let isnull: pg_extend::pg_bool::Bool = result.is_null().into();
func_info.isnull = isnull.into();
// return the datum
result.into_datum()
}
Err(err) => {
// ensure the return value is null
func_info.isnull = pg_extend::pg_bool::Bool::from(true).into();
// TODO: anything else to cean up before resuming the panic?
panic::resume_unwind(err)
}
}
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", func_name), Span::call_site());
let sql_params = sql_param_list(inputs.len());
let sql_param_types = sql_param_types(inputs);
let sql_return = sql_return_type(output);
// ret and library_path are replacements at runtime
let sql_stmt = format!(
// FIXME: Add/remove STRICT keywords based on Option<> arguments.
"CREATE or REPLACE FUNCTION {}({}) {{ret}} AS '{{library_path}}', '{}' LANGUAGE C STRICT;",
func_name, sql_params, func_wrapper_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
#sql_param_types
ret = #sql_return,
library_path = library_path
)
}
);
function.extend(func_wrapper);
function.extend(create_sql_def);
function
}
/// An attribute macro for wrapping Rust functions with boiler plate for defining and
/// calling conventions between Postgres and Rust.
///
/// This mimics the C macro for defining functions
///
/// ```c
/// #define PG_FUNCTION_INFO_V1(funcname) \
/// extern Datum funcname(PG_FUNCTION_ARGS); \
/// extern PGDLLEXPORT const Pg_finfo_record * CppConcat(pg_finfo_,funcname)(void); \
/// const Pg_finfo_record * \
/// CppConcat(pg_finfo_,funcname) (void) \
/// { \
/// static const Pg_finfo_record my_finfo = { 1 }; \
/// return &my_finfo; \
/// } \
/// ```
///
/// # Returns
///
/// The result of this macro will be to produce a new function wrapping the one annotated but prepended with
/// `pg_` to distinquish them and also declares a function for Postgres to get the Function information;
///
/// For example: if the signature `fn add_one(value: i32) -> i32` is annotated, two functions will be produced,
/// the wrapper function with a signature of:
///
/// ```rust,no_run
/// extern crate pg_extend;
/// use pg_extend::pg_sys;
///
/// #[no_mangle]
/// pub extern "C" fn pg_add_one(func_call_info: pg_sys::FunctionCallInfo) -> pg_sys::Datum
/// # {
/// # unimplemented!()
/// # }
/// ```
///
/// and the info function with a signature of:
///
/// ```rust,no_run
/// extern crate pg_extend;
/// use pg_extend::pg_sys;
///
/// #[no_mangle]
/// pub extern "C" fn pg_finfo_pg_add_one() -> &'static pg_sys::Pg_finfo_record
/// # {
/// # unimplemented!()
/// # }
/// ```
///
#[proc_macro_attribute]
#[allow(clippy::needless_pass_by_value)]
pub fn pg_extern(
_attr: proc_macro::TokenStream,
item: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
// get a usable token stream
let ast: syn::Item = parse_macro_input!(item as syn::Item);
// Build the impl
let expanded: TokenStream = impl_info_for_fn(&ast);
// Return the generated impl
proc_macro::TokenStream::from(expanded)
}
/// An attribute macro for wrapping Rust structs with boiler plate for defining and exposing a foreign data wrapper
/// This is mostly a slimmed down version of pg_extern, with none of the data argument handling.
#[proc_macro_attribute]
#[allow(clippy::needless_pass_by_value)]
pub fn | (
_attr: proc_macro::TokenStream,
item: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
// get a usable token stream
let ast: syn::Item = parse_macro_input!(item as syn::Item);
// Build the impl
let expanded: TokenStream = impl_info_for_fdw(&ast);
// Return the generated impl
proc_macro::TokenStream::from(expanded)
}
| pg_foreignwrapper | identifier_name |
lib.rs | // Copyright 2018 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![recursion_limit = "128"]
extern crate proc_macro;
extern crate proc_macro2;
#[macro_use]
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro2::{Ident, Span, TokenStream};
use quote::ToTokens;
use syn::punctuated::Punctuated;
use syn::token::Comma;
fn create_function_params(num_args: usize) -> TokenStream {
let mut tokens = TokenStream::new();
for i in 0..num_args {
let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site());
tokens.extend(quote!(
#arg_name,
));
}
tokens
}
fn extract_arg_data(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream {
let mut get_args_stream = TokenStream::new();
for (i, arg) in inputs.iter().enumerate() {
let arg_type: &syn::Type = match *arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
panic!("self functions not supported")
}
syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"),
syn::FnArg::Captured(ref captured) => &captured.ty,
syn::FnArg::Ignored(ref ty) => ty,
};
let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site());
let arg_error = format!("unsupported function argument type for {}", arg_name);
let get_arg = quote!(
let #arg_name: #arg_type = pg_extend::pg_datum::TryFromPgDatum::try_from(
pg_extend::pg_datum::PgDatum::from_raw(
*args.next().expect("wrong number of args passed into get_args for args?"),
args_null.next().expect("wrong number of args passed into get_args for args_null?")
),
)
.expect(#arg_error);
);
get_args_stream.extend(get_arg);
}
get_args_stream
}
fn sql_param_list(num_args: usize) -> String {
let mut tokens = String::new();
if num_args == 0 {
return tokens;
}
let arg_name = |num: usize| format!("{{sql_{}}}", num);
for i in 0..(num_args - 1) {
let arg_name = arg_name(i);
tokens.push_str(&format!("{},", arg_name));
}
let arg_name = arg_name(num_args - 1);
tokens.push_str(&arg_name);
tokens
}
fn sql_param_types(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream {
let mut tokens = TokenStream::new();
for (i, arg) in inputs.iter().enumerate() {
let arg_type: &syn::Type = match *arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
panic!("self functions not supported")
}
syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"),
syn::FnArg::Captured(ref captured) => &captured.ty,
syn::FnArg::Ignored(ref ty) => ty,
};
let sql_name = Ident::new(&format!("sql_{}", i), Span::call_site());
let sql_param = quote!(
#sql_name = pg_extend::pg_type::PgType::from_rust::<#arg_type>().as_str(),
);
tokens.extend(sql_param);
}
tokens
}
fn sql_return_type(outputs: &syn::ReturnType) -> TokenStream |
fn impl_info_for_fdw(item: &syn::Item) -> TokenStream {
let typ = if let syn::Item::Struct(typ) = item {
typ
} else {
panic!("Annotation only supported on structs")
};
let mut decl = item.clone().into_token_stream();
let struct_name = &typ.ident;
let func_name = syn::Ident::new(
&format!("fdw_{}", struct_name),
Span::call_site(),
);
let info_fn = get_info_fn(&func_name);
let fdw_fn = quote!(
#[no_mangle]
pub extern "C" fn #func_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
pg_extend::pg_fdw::ForeignWrapper::<#struct_name>::into_datum()
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", struct_name), Span::call_site());
let sql_stmt = format!(
"
CREATE OR REPLACE FUNCTION {0}() RETURNS fdw_handler AS '{{library_path}}', '{1}' LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER {0} handler {0} NO VALIDATOR;
",
struct_name, func_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
library_path = library_path
)
}
);
decl.extend(info_fn);
decl.extend(create_sql_def);
decl.extend(fdw_fn);
decl
}
fn get_info_fn(func_name: &syn::Ident) -> TokenStream {
let func_info_name = syn::Ident::new(
&format!("pg_finfo_{}", func_name),
Span::call_site(),
);
// create the postgres info
quote!(
#[no_mangle]
pub extern "C" fn #func_info_name () -> &'static pg_extend::pg_sys::Pg_finfo_record {
const my_finfo: pg_extend::pg_sys::Pg_finfo_record = pg_extend::pg_sys::Pg_finfo_record { api_version: 1 };
&my_finfo
}
)
}
fn impl_info_for_fn(item: &syn::Item) -> TokenStream {
let func = if let syn::Item::Fn(func) = item {
func
} else {
panic!("annotation only supported on functions");
};
let func_name = &func.ident;
let func_decl = &func.decl;
if func_decl.variadic.is_some() {
panic!("variadic functions (...) not supported")
}
//let generics = &func_decl.generics;
let inputs = &func_decl.inputs;
let output = &func_decl.output;
//let func_block = &func.block;
// declare the function
let mut function = item.clone().into_token_stream();
let func_wrapper_name = syn::Ident::new(&format!("pg_{}", func_name), Span::call_site());
let func_info = get_info_fn(&func_wrapper_name);
// join the function information in
function.extend(func_info);
let get_args_from_datums = extract_arg_data(inputs);
let func_params = create_function_params(inputs.len());
// wrap the original function in a pg_wrapper function
let func_wrapper = quote!(
#[no_mangle]
pub extern "C" fn #func_wrapper_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
use std::panic;
let func_info: &mut pg_extend::pg_sys::FunctionCallInfoData = unsafe {
func_call_info
.as_mut()
.expect("func_call_info was unexpectedly NULL")
};
// guard the Postgres process against the panic, and give us an oportunity to cleanup
let panic_result = panic::catch_unwind(|| {
// extract the argument list
let (mut args, mut args_null) = pg_extend::get_args(func_info);
// arbitrary Datum conversions occur here, and could panic
// so this is inside the catch unwind
#get_args_from_datums
// this is the meat of the function call into the extension code
let result = #func_name(#func_params);
// arbitrary Rust code could panic, so this is guarded
pg_extend::pg_datum::PgDatum::from(result)
});
// see if we caught a panic
match panic_result {
Ok(result) => {
// in addition to the null case, we should handle result types probably
let isnull: pg_extend::pg_bool::Bool = result.is_null().into();
func_info.isnull = isnull.into();
// return the datum
result.into_datum()
}
Err(err) => {
// ensure the return value is null
func_info.isnull = pg_extend::pg_bool::Bool::from(true).into();
// TODO: anything else to cean up before resuming the panic?
panic::resume_unwind(err)
}
}
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", func_name), Span::call_site());
let sql_params = sql_param_list(inputs.len());
let sql_param_types = sql_param_types(inputs);
let sql_return = sql_return_type(output);
// ret and library_path are replacements at runtime
let sql_stmt = format!(
// FIXME: Add/remove STRICT keywords based on Option<> arguments.
"CREATE or REPLACE FUNCTION {}({}) {{ret}} AS '{{library_path}}', '{}' LANGUAGE C STRICT;",
func_name, sql_params, func_wrapper_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
#sql_param_types
ret = #sql_return,
library_path = library_path
)
}
);
function.extend(func_wrapper);
function.extend(create_sql_def);
function
}
/// An attribute macro for wrapping Rust functions with boiler plate for defining and
/// calling conventions between Postgres and Rust.
///
/// This mimics the C macro for defining functions
///
/// ```c
/// #define PG_FUNCTION_INFO_V1(funcname) \
/// extern Datum funcname(PG_FUNCTION_ARGS); \
/// extern PGDLLEXPORT const Pg_finfo_record * CppConcat(pg_finfo_,funcname)(void); \
/// const Pg_finfo_record * \
/// CppConcat(pg_finfo_,funcname) (void) \
/// { \
/// static const Pg_finfo_record my_finfo = { 1 }; \
/// return &my_finfo; \
/// } \
/// ```
///
/// # Returns
///
/// The result of this macro will be to produce a new function wrapping the one annotated but prepended with
/// `pg_` to distinquish them and also declares a function for Postgres to get the Function information;
///
/// For example: if the signature `fn add_one(value: i32) -> i32` is annotated, two functions will be produced,
/// the wrapper function with a signature of:
///
/// ```rust,no_run
/// extern crate pg_extend;
/// use pg_extend::pg_sys;
///
/// #[no_mangle]
/// pub extern "C" fn pg_add_one(func_call_info: pg_sys::FunctionCallInfo) -> pg_sys::Datum
/// # {
/// # unimplemented!()
/// # }
/// ```
///
/// and the info function with a signature of:
///
/// ```rust,no_run
/// extern crate pg_extend;
/// use pg_extend::pg_sys;
///
/// #[no_mangle]
/// pub extern "C" fn pg_finfo_pg_add_one() -> &'static pg_sys::Pg_finfo_record
/// # {
/// # unimplemented!()
/// # }
/// ```
///
#[proc_macro_attribute]
#[allow(clippy::needless_pass_by_value)]
pub fn pg_extern(
_attr: proc_macro::TokenStream,
item: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
// get a usable token stream
let ast: syn::Item = parse_macro_input!(item as syn::Item);
// Build the impl
let expanded: TokenStream = impl_info_for_fn(&ast);
// Return the generated impl
proc_macro::TokenStream::from(expanded)
}
/// An attribute macro for wrapping Rust structs with boiler plate for defining and exposing a foreign data wrapper
/// This is mostly a slimmed down version of pg_extern, with none of the data argument handling.
#[proc_macro_attribute]
#[allow(clippy::needless_pass_by_value)]
pub fn pg_foreignwrapper(
_attr: proc_macro::TokenStream,
item: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
// get a usable token stream
let ast: syn::Item = parse_macro_input!(item as syn::Item);
// Build the impl
let expanded: TokenStream = impl_info_for_fdw(&ast);
// Return the generated impl
proc_macro::TokenStream::from(expanded)
}
| {
let ty = match outputs {
syn::ReturnType::Default => quote!(()),
syn::ReturnType::Type(_, ty) => quote!(#ty),
};
quote!(pg_extend::pg_type::PgType::from_rust::<#ty>().return_stmt())
} | identifier_body |
lib.rs | // Copyright 2018 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![recursion_limit = "128"]
extern crate proc_macro;
extern crate proc_macro2;
#[macro_use]
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro2::{Ident, Span, TokenStream};
use quote::ToTokens;
use syn::punctuated::Punctuated;
use syn::token::Comma;
fn create_function_params(num_args: usize) -> TokenStream {
let mut tokens = TokenStream::new();
for i in 0..num_args {
let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site());
tokens.extend(quote!(
#arg_name,
));
}
tokens
}
fn extract_arg_data(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream {
let mut get_args_stream = TokenStream::new();
for (i, arg) in inputs.iter().enumerate() {
let arg_type: &syn::Type = match *arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
panic!("self functions not supported")
}
syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"),
syn::FnArg::Captured(ref captured) => &captured.ty,
syn::FnArg::Ignored(ref ty) => ty,
};
let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site());
let arg_error = format!("unsupported function argument type for {}", arg_name);
let get_arg = quote!(
let #arg_name: #arg_type = pg_extend::pg_datum::TryFromPgDatum::try_from(
pg_extend::pg_datum::PgDatum::from_raw(
*args.next().expect("wrong number of args passed into get_args for args?"),
args_null.next().expect("wrong number of args passed into get_args for args_null?")
),
)
.expect(#arg_error);
);
get_args_stream.extend(get_arg);
}
get_args_stream
}
fn sql_param_list(num_args: usize) -> String {
let mut tokens = String::new();
if num_args == 0 {
return tokens;
}
let arg_name = |num: usize| format!("{{sql_{}}}", num);
for i in 0..(num_args - 1) {
let arg_name = arg_name(i);
tokens.push_str(&format!("{},", arg_name));
}
let arg_name = arg_name(num_args - 1);
tokens.push_str(&arg_name);
tokens
}
fn sql_param_types(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream {
let mut tokens = TokenStream::new();
for (i, arg) in inputs.iter().enumerate() {
let arg_type: &syn::Type = match *arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
panic!("self functions not supported")
}
syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"),
syn::FnArg::Captured(ref captured) => &captured.ty,
syn::FnArg::Ignored(ref ty) => ty,
};
let sql_name = Ident::new(&format!("sql_{}", i), Span::call_site());
let sql_param = quote!(
#sql_name = pg_extend::pg_type::PgType::from_rust::<#arg_type>().as_str(),
);
tokens.extend(sql_param);
}
tokens
}
fn sql_return_type(outputs: &syn::ReturnType) -> TokenStream {
let ty = match outputs {
syn::ReturnType::Default => quote!(()),
syn::ReturnType::Type(_, ty) => quote!(#ty),
};
quote!(pg_extend::pg_type::PgType::from_rust::<#ty>().return_stmt())
}
fn impl_info_for_fdw(item: &syn::Item) -> TokenStream {
let typ = if let syn::Item::Struct(typ) = item {
typ
} else {
panic!("Annotation only supported on structs")
};
let mut decl = item.clone().into_token_stream();
let struct_name = &typ.ident;
let func_name = syn::Ident::new(
&format!("fdw_{}", struct_name),
Span::call_site(),
);
let info_fn = get_info_fn(&func_name);
let fdw_fn = quote!(
#[no_mangle]
pub extern "C" fn #func_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
pg_extend::pg_fdw::ForeignWrapper::<#struct_name>::into_datum()
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", struct_name), Span::call_site());
let sql_stmt = format!(
"
CREATE OR REPLACE FUNCTION {0}() RETURNS fdw_handler AS '{{library_path}}', '{1}' LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER {0} handler {0} NO VALIDATOR;
",
struct_name, func_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
library_path = library_path
)
}
);
decl.extend(info_fn);
decl.extend(create_sql_def);
decl.extend(fdw_fn);
decl
}
fn get_info_fn(func_name: &syn::Ident) -> TokenStream {
let func_info_name = syn::Ident::new(
&format!("pg_finfo_{}", func_name),
Span::call_site(),
);
// create the postgres info
quote!(
#[no_mangle]
pub extern "C" fn #func_info_name () -> &'static pg_extend::pg_sys::Pg_finfo_record {
const my_finfo: pg_extend::pg_sys::Pg_finfo_record = pg_extend::pg_sys::Pg_finfo_record { api_version: 1 };
&my_finfo
}
)
}
fn impl_info_for_fn(item: &syn::Item) -> TokenStream {
let func = if let syn::Item::Fn(func) = item {
func
} else {
panic!("annotation only supported on functions");
};
let func_name = &func.ident;
let func_decl = &func.decl;
if func_decl.variadic.is_some() {
panic!("variadic functions (...) not supported")
}
//let generics = &func_decl.generics;
let inputs = &func_decl.inputs;
let output = &func_decl.output;
//let func_block = &func.block;
// declare the function
let mut function = item.clone().into_token_stream();
let func_wrapper_name = syn::Ident::new(&format!("pg_{}", func_name), Span::call_site());
let func_info = get_info_fn(&func_wrapper_name);
// join the function information in
function.extend(func_info);
let get_args_from_datums = extract_arg_data(inputs);
let func_params = create_function_params(inputs.len());
// wrap the original function in a pg_wrapper function
let func_wrapper = quote!(
#[no_mangle]
pub extern "C" fn #func_wrapper_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum {
use std::panic;
let func_info: &mut pg_extend::pg_sys::FunctionCallInfoData = unsafe {
func_call_info
.as_mut()
.expect("func_call_info was unexpectedly NULL")
};
|
// arbitrary Datum conversions occur here, and could panic
// so this is inside the catch unwind
#get_args_from_datums
// this is the meat of the function call into the extension code
let result = #func_name(#func_params);
// arbitrary Rust code could panic, so this is guarded
pg_extend::pg_datum::PgDatum::from(result)
});
// see if we caught a panic
match panic_result {
Ok(result) => {
// in addition to the null case, we should handle result types probably
let isnull: pg_extend::pg_bool::Bool = result.is_null().into();
func_info.isnull = isnull.into();
// return the datum
result.into_datum()
}
Err(err) => {
// ensure the return value is null
func_info.isnull = pg_extend::pg_bool::Bool::from(true).into();
// TODO: anything else to cean up before resuming the panic?
panic::resume_unwind(err)
}
}
}
);
let create_sql_name =
syn::Ident::new(&format!("{}_pg_create_stmt", func_name), Span::call_site());
let sql_params = sql_param_list(inputs.len());
let sql_param_types = sql_param_types(inputs);
let sql_return = sql_return_type(output);
// ret and library_path are replacements at runtime
let sql_stmt = format!(
// FIXME: Add/remove STRICT keywords based on Option<> arguments.
"CREATE or REPLACE FUNCTION {}({}) {{ret}} AS '{{library_path}}', '{}' LANGUAGE C STRICT;",
func_name, sql_params, func_wrapper_name,
);
// declare a function that can be used to output a create statement for the externed function
// all create statements will be put into a common module for access
let create_sql_def = quote!(
#[allow(unused)]
pub fn #create_sql_name(library_path: &str) -> String {
use pg_extend::pg_type::PgTypeInfo;
format!(
#sql_stmt,
#sql_param_types
ret = #sql_return,
library_path = library_path
)
}
);
function.extend(func_wrapper);
function.extend(create_sql_def);
function
}
/// An attribute macro for wrapping Rust functions with boiler plate for defining and
/// calling conventions between Postgres and Rust.
///
/// This mimics the C macro for defining functions
///
/// ```c
/// #define PG_FUNCTION_INFO_V1(funcname) \
/// extern Datum funcname(PG_FUNCTION_ARGS); \
/// extern PGDLLEXPORT const Pg_finfo_record * CppConcat(pg_finfo_,funcname)(void); \
/// const Pg_finfo_record * \
/// CppConcat(pg_finfo_,funcname) (void) \
/// { \
/// static const Pg_finfo_record my_finfo = { 1 }; \
/// return &my_finfo; \
/// } \
/// ```
///
/// # Returns
///
/// The result of this macro will be to produce a new function wrapping the one annotated but prepended with
/// `pg_` to distinquish them and also declares a function for Postgres to get the Function information;
///
/// For example: if the signature `fn add_one(value: i32) -> i32` is annotated, two functions will be produced,
/// the wrapper function with a signature of:
///
/// ```rust,no_run
/// extern crate pg_extend;
/// use pg_extend::pg_sys;
///
/// #[no_mangle]
/// pub extern "C" fn pg_add_one(func_call_info: pg_sys::FunctionCallInfo) -> pg_sys::Datum
/// # {
/// # unimplemented!()
/// # }
/// ```
///
/// and the info function with a signature of:
///
/// ```rust,no_run
/// extern crate pg_extend;
/// use pg_extend::pg_sys;
///
/// #[no_mangle]
/// pub extern "C" fn pg_finfo_pg_add_one() -> &'static pg_sys::Pg_finfo_record
/// # {
/// # unimplemented!()
/// # }
/// ```
///
#[proc_macro_attribute]
#[allow(clippy::needless_pass_by_value)]
pub fn pg_extern(
_attr: proc_macro::TokenStream,
item: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
// get a usable token stream
let ast: syn::Item = parse_macro_input!(item as syn::Item);
// Build the impl
let expanded: TokenStream = impl_info_for_fn(&ast);
// Return the generated impl
proc_macro::TokenStream::from(expanded)
}
/// An attribute macro for wrapping Rust structs with boiler plate for defining and exposing a foreign data wrapper
/// This is mostly a slimmed down version of pg_extern, with none of the data argument handling.
#[proc_macro_attribute]
#[allow(clippy::needless_pass_by_value)]
pub fn pg_foreignwrapper(
_attr: proc_macro::TokenStream,
item: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
// get a usable token stream
let ast: syn::Item = parse_macro_input!(item as syn::Item);
// Build the impl
let expanded: TokenStream = impl_info_for_fdw(&ast);
// Return the generated impl
proc_macro::TokenStream::from(expanded)
} | // guard the Postgres process against the panic, and give us an oportunity to cleanup
let panic_result = panic::catch_unwind(|| {
// extract the argument list
let (mut args, mut args_null) = pg_extend::get_args(func_info); | random_line_split |
sync.rs | // Copyright 2017 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Architecture for synchronizing a CRDT with the ledger. Separated into a
//! module so that it is easier to add other sync stores later.
use std::io::Write;
use std::sync::mpsc::{Receiver, RecvError, Sender};
use log;
use apps_ledger_services_public::*;
use fidl::{self, Future, Promise};
use fuchsia::read_entire_vmo;
use magenta::{Channel, ChannelOpts, HandleBase};
use serde_json;
use super::ledger::{self, ledger_crash_callback};
use tabs::{BufferContainerRef, BufferIdentifier};
use xi_rope::engine::Engine;
// TODO switch these to bincode
fn state_to_buf(state: &Engine) -> Vec<u8> {
serde_json::to_vec(state).unwrap()
}
fn buf_to_state(buf: &[u8]) -> Result<Engine, serde_json::Error> {
serde_json::from_slice(buf)
}
/// Stores state needed by the container to perform synchronization.
pub struct SyncStore {
page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
transaction_pending: bool,
buffer: BufferIdentifier,
}
impl SyncStore {
/// - `page` is a reference to the Ledger page to store data under.
/// - `key` is the key the `Syncable` managed by this `SyncStore` will be stored under.
/// This example only supports storing things under a single key per page.
/// - `updates` is a channel to a `SyncUpdater` that will handle events.
///
/// Returns a sync store and schedules the loading of initial
/// state and subscribes to state updates for this document.
pub fn new(
mut page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
) -> SyncStore {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let watcher_client = PageWatcher_Client::from_handle(s1.into_handle());
let watcher_client_ptr =
::fidl::InterfacePtr { inner: watcher_client, version: PageWatcher_Metadata::VERSION };
let watcher = PageWatcherServer { updates: updates.clone(), buffer: buffer.clone() };
let _ = fidl::Server::new(watcher, s2).spawn();
let (mut snap, snap_request) = PageSnapshot_new_pair();
page.get_snapshot(snap_request, Some(key.clone()), Some(watcher_client_ptr))
.with(ledger_crash_callback);
let initial_state_chan = updates.clone();
let initial_buffer = buffer.clone();
snap.get(key.clone()).with(move |raw_res| {
match raw_res.map(|res| ledger::value_result(res)) {
Ok(Ok(Some(buf))) => {
initial_state_chan
.send(SyncMsg::NewState {
buffer: initial_buffer,
new_buf: buf,
done: None,
})
.unwrap();
}
Ok(Ok(None)) => (), // No initial state saved yet
Err(err) => error!("FIDL failed on initial response: {:?}", err),
Ok(Err(err)) => error!("Ledger failed to retrieve key: {:?}", err),
}
});
SyncStore { page, key, updates, buffer, transaction_pending: false }
}
/// Called whenever this app changed its own state and would like to
/// persist the changes to the ledger. Changes can't be committed
/// immediately since we have to wait for PageWatcher changes that may not
/// have arrived yet.
pub fn state_changed(&mut self) {
if!self.transaction_pending {
self.transaction_pending = true;
let ready_future = self.page.start_transaction();
let done_chan = self.updates.clone();
let buffer = self.buffer.clone();
ready_future.with(move |res| match res {
Ok(ledger::OK) => {
done_chan.send(SyncMsg::TransactionReady { buffer }).unwrap();
}
Ok(err_status) => error!("Ledger failed to start transaction: {:?}", err_status),
Err(err) => error!("FIDL failed on starting transaction: {:?}", err),
});
}
}
/// Should be called in SyncContainer::transaction_ready to persist the current state.
pub fn commit_transaction(&mut self, state: &Engine) {
assert!(self.transaction_pending, "must call state_changed (and wait) before commit");
self.page.put(self.key.clone(), state_to_buf(state)).with(ledger_crash_callback);
self.page.commit().with(ledger_crash_callback);
self.transaction_pending = false;
}
}
/// All the different asynchronous events the updater thread needs to listen for and act on
pub enum SyncMsg {
NewState {
buffer: BufferIdentifier,
new_buf: Vec<u8>,
done: Option<Promise<Option<PageSnapshot_Server>, fidl::Error>>,
},
TransactionReady {
buffer: BufferIdentifier,
},
/// Shut down the updater thread
Stop,
}
/// We want to be able to register to receive events from inside the
/// `SyncStore`/`SyncContainer` but from there we don't have access to the
/// Mutex that holds the container, so we give channel Senders to all the
/// futures so that they can all trigger events in one place that does have
/// the right reference.
///
/// Additionally, the individual `Editor`s aren't wrapped in a `Mutex` so we
/// have to hold a `BufferContainerRef` and use `BufferIdentifier`s with one
/// `SyncUpdater` for all buffers.
pub struct SyncUpdater<W: Write> {
container_ref: BufferContainerRef<W>,
chan: Receiver<SyncMsg>,
}
impl<W: Write + Send +'static> SyncUpdater<W> {
pub fn new(container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>) -> SyncUpdater<W> |
/// Run this in a thread, it will return when it encounters an error
/// reading the channel or when the `Stop` message is recieved.
pub fn work(&self) -> Result<(), RecvError> {
loop {
let msg = self.chan.recv()?;
match msg {
SyncMsg::Stop => return Ok(()),
SyncMsg::TransactionReady { buffer } => {
let mut container = self.container_ref.lock();
// if the buffer was closed, hopefully the page connection was as well, which I hope aborts transactions
if let Some(mut editor) = container.editor_for_buffer_mut(&buffer) {
editor.transaction_ready();
}
}
SyncMsg::NewState { new_buf, done, buffer } => {
let mut container = self.container_ref.lock();
match (container.editor_for_buffer_mut(&buffer), buf_to_state(&new_buf)) {
(Some(mut editor), Ok(new_state)) => {
editor.merge_new_state(new_state);
if let Some(promise) = done {
promise.set_ok(None);
}
}
(None, _) => (), // buffer was closed
(_, Err(err)) => error!("Ledger was set to invalid state: {:?}", err),
}
}
}
}
}
}
struct PageWatcherServer {
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
}
impl PageWatcher for PageWatcherServer {
fn on_change(
&mut self,
page_change: PageChange,
result_state: ResultState,
) -> Future<Option<PageSnapshot_Server>, fidl::Error> {
let (future, done) = Future::make_promise();
let value_opt = page_change.changes.get(0).and_then(|c| c.value.as_ref());
if let (ledger::RESULT_COMPLETED, Some(value_vmo)) = (result_state, value_opt) {
let new_buf = read_entire_vmo(value_vmo).expect("failed to read key Vmo");
self.updates
.send(SyncMsg::NewState { buffer: self.buffer.clone(), new_buf, done: Some(done) })
.unwrap();
} else {
error!("Xi state corrupted, should have one key but has multiple.");
// I don't think this should be a FIDL-level error, so set okay
done.set_ok(None);
}
future
}
}
impl PageWatcher_Stub for PageWatcherServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(PageWatcherServer: PageWatcher_Stub);
// ============= Conflict resolution
pub fn start_conflict_resolver_factory(ledger: &mut Ledger_Proxy, key: Vec<u8>) {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let resolver_client = ConflictResolverFactory_Client::from_handle(s1.into_handle());
let resolver_client_ptr = ::fidl::InterfacePtr {
inner: resolver_client,
version: ConflictResolverFactory_Metadata::VERSION,
};
let _ = fidl::Server::new(ConflictResolverFactoryServer { key }, s2).spawn();
ledger.set_conflict_resolver_factory(Some(resolver_client_ptr)).with(ledger_crash_callback);
}
struct ConflictResolverFactoryServer {
key: Vec<u8>,
}
impl ConflictResolverFactory for ConflictResolverFactoryServer {
fn get_policy(&mut self, _page_id: Vec<u8>) -> Future<MergePolicy, ::fidl::Error> {
Future::done(Ok(MergePolicy_Custom))
}
/// Our resolvers are the same for every page
fn new_conflict_resolver(&mut self, _page_id: Vec<u8>, resolver: ConflictResolver_Server) {
let _ = fidl::Server::new(
ConflictResolverServer { key: self.key.clone() },
resolver.into_channel(),
)
.spawn();
}
}
impl ConflictResolverFactory_Stub for ConflictResolverFactoryServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(ConflictResolverFactoryServer: ConflictResolverFactory_Stub);
fn state_from_snapshot<F>(
snapshot: ::fidl::InterfacePtr<PageSnapshot_Client>,
key: Vec<u8>,
done: F,
) where
F: Send + FnOnce(Result<Option<Engine>, ()>) +'static,
{
assert_eq!(PageSnapshot_Metadata::VERSION, snapshot.version);
let mut snapshot_proxy = PageSnapshot_new_Proxy(snapshot.inner);
// TODO get a reference when too big
snapshot_proxy.get(key).with(move |raw_res| {
let state = match raw_res.map(|res| ledger::value_result(res)) {
// the.ok() has the behavior of acting like invalid state is empty
// and thus deleting invalid state and overwriting it with good state
Ok(Ok(Some(buf))) => Ok(buf_to_state(&buf).ok()),
Ok(Ok(None)) => {
info!("No state in conflicting page");
Ok(None)
}
Err(err) => {
warn!("FIDL failed on initial response: {:?}", err);
Err(())
}
Ok(Err(err)) => {
warn!("Ledger failed to retrieve key: {:?}", err);
Err(())
}
};
done(state);
});
}
struct ConflictResolverServer {
key: Vec<u8>,
}
impl ConflictResolver for ConflictResolverServer {
fn resolve(
&mut self,
left: ::fidl::InterfacePtr<PageSnapshot_Client>,
right: ::fidl::InterfacePtr<PageSnapshot_Client>,
_common_version: Option<::fidl::InterfacePtr<PageSnapshot_Client>>,
result_provider: ::fidl::InterfacePtr<MergeResultProvider_Client>,
) {
// TODO in the futures-rs future, do this in parallel with Future combinators
let key2 = self.key.clone();
state_from_snapshot(left, self.key.clone(), move |e1_opt| {
let key3 = key2.clone();
state_from_snapshot(right, key2, move |e2_opt| {
let result_opt = match (e1_opt, e2_opt) {
(Ok(Some(mut e1)), Ok(Some(e2))) => {
e1.merge(&e2);
Some(e1)
}
// one engine didn't exist yet, I'm not sure if Ledger actually generates a conflict in this case
(Ok(Some(e)), Ok(None)) | (Ok(None), Ok(Some(e))) => Some(e),
// failed to get one of the engines, we can't do the merge properly
(Err(()), _) | (_, Err(())) => None,
// if state is invalid or missing on both sides, can't merge
(Ok(None), Ok(None)) => None,
};
if let Some(out_state) = result_opt {
let buf = state_to_buf(&out_state);
// TODO use a reference here when buf is too big
let new_value = Some(Box::new(BytesOrReference::Bytes(buf)));
let merged = MergedValue {
key: key3,
source: ValueSource_New,
new_value,
priority: Priority_Eager,
};
assert_eq!(MergeResultProvider_Metadata::VERSION, result_provider.version);
let mut result_provider_proxy =
MergeResultProvider_new_Proxy(result_provider.inner);
result_provider_proxy.merge(vec![merged]);
result_provider_proxy.done().with(ledger_crash_callback);
}
});
});
}
}
impl ConflictResolver_Stub for ConflictResolverServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(ConflictResolverServer: ConflictResolver_Stub);
| {
SyncUpdater { container_ref, chan }
} | identifier_body |
sync.rs | // Copyright 2017 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Architecture for synchronizing a CRDT with the ledger. Separated into a
//! module so that it is easier to add other sync stores later.
use std::io::Write;
use std::sync::mpsc::{Receiver, RecvError, Sender};
use log;
use apps_ledger_services_public::*;
use fidl::{self, Future, Promise};
use fuchsia::read_entire_vmo;
use magenta::{Channel, ChannelOpts, HandleBase};
use serde_json;
use super::ledger::{self, ledger_crash_callback};
use tabs::{BufferContainerRef, BufferIdentifier};
use xi_rope::engine::Engine;
// TODO switch these to bincode
fn state_to_buf(state: &Engine) -> Vec<u8> {
serde_json::to_vec(state).unwrap()
}
fn buf_to_state(buf: &[u8]) -> Result<Engine, serde_json::Error> {
serde_json::from_slice(buf)
}
/// Stores state needed by the container to perform synchronization.
pub struct SyncStore {
page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
transaction_pending: bool,
buffer: BufferIdentifier,
}
impl SyncStore {
/// - `page` is a reference to the Ledger page to store data under.
/// - `key` is the key the `Syncable` managed by this `SyncStore` will be stored under.
/// This example only supports storing things under a single key per page.
/// - `updates` is a channel to a `SyncUpdater` that will handle events.
///
/// Returns a sync store and schedules the loading of initial
/// state and subscribes to state updates for this document.
pub fn new(
mut page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
) -> SyncStore {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let watcher_client = PageWatcher_Client::from_handle(s1.into_handle());
let watcher_client_ptr =
::fidl::InterfacePtr { inner: watcher_client, version: PageWatcher_Metadata::VERSION };
let watcher = PageWatcherServer { updates: updates.clone(), buffer: buffer.clone() };
let _ = fidl::Server::new(watcher, s2).spawn();
let (mut snap, snap_request) = PageSnapshot_new_pair();
page.get_snapshot(snap_request, Some(key.clone()), Some(watcher_client_ptr))
.with(ledger_crash_callback);
let initial_state_chan = updates.clone();
let initial_buffer = buffer.clone();
snap.get(key.clone()).with(move |raw_res| {
match raw_res.map(|res| ledger::value_result(res)) {
Ok(Ok(Some(buf))) => {
initial_state_chan
.send(SyncMsg::NewState {
buffer: initial_buffer,
new_buf: buf,
done: None,
})
.unwrap();
}
Ok(Ok(None)) => (), // No initial state saved yet
Err(err) => error!("FIDL failed on initial response: {:?}", err),
Ok(Err(err)) => error!("Ledger failed to retrieve key: {:?}", err),
}
});
SyncStore { page, key, updates, buffer, transaction_pending: false }
}
/// Called whenever this app changed its own state and would like to
/// persist the changes to the ledger. Changes can't be committed
/// immediately since we have to wait for PageWatcher changes that may not
/// have arrived yet.
pub fn state_changed(&mut self) {
if!self.transaction_pending {
self.transaction_pending = true;
let ready_future = self.page.start_transaction();
let done_chan = self.updates.clone();
let buffer = self.buffer.clone();
ready_future.with(move |res| match res {
Ok(ledger::OK) => {
done_chan.send(SyncMsg::TransactionReady { buffer }).unwrap();
}
Ok(err_status) => error!("Ledger failed to start transaction: {:?}", err_status),
Err(err) => error!("FIDL failed on starting transaction: {:?}", err),
});
}
}
/// Should be called in SyncContainer::transaction_ready to persist the current state.
pub fn commit_transaction(&mut self, state: &Engine) {
assert!(self.transaction_pending, "must call state_changed (and wait) before commit");
self.page.put(self.key.clone(), state_to_buf(state)).with(ledger_crash_callback);
self.page.commit().with(ledger_crash_callback);
self.transaction_pending = false;
}
}
/// All the different asynchronous events the updater thread needs to listen for and act on
pub enum SyncMsg {
NewState {
buffer: BufferIdentifier,
new_buf: Vec<u8>,
done: Option<Promise<Option<PageSnapshot_Server>, fidl::Error>>,
},
TransactionReady {
buffer: BufferIdentifier,
},
/// Shut down the updater thread
Stop,
}
/// We want to be able to register to receive events from inside the
/// `SyncStore`/`SyncContainer` but from there we don't have access to the
/// Mutex that holds the container, so we give channel Senders to all the
/// futures so that they can all trigger events in one place that does have
/// the right reference.
///
/// Additionally, the individual `Editor`s aren't wrapped in a `Mutex` so we
/// have to hold a `BufferContainerRef` and use `BufferIdentifier`s with one
/// `SyncUpdater` for all buffers.
pub struct SyncUpdater<W: Write> {
container_ref: BufferContainerRef<W>,
chan: Receiver<SyncMsg>,
}
impl<W: Write + Send +'static> SyncUpdater<W> {
pub fn new(container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>) -> SyncUpdater<W> {
SyncUpdater { container_ref, chan }
}
/// Run this in a thread, it will return when it encounters an error
/// reading the channel or when the `Stop` message is recieved.
pub fn work(&self) -> Result<(), RecvError> {
loop {
let msg = self.chan.recv()?;
match msg {
SyncMsg::Stop => return Ok(()),
SyncMsg::TransactionReady { buffer } => {
let mut container = self.container_ref.lock();
// if the buffer was closed, hopefully the page connection was as well, which I hope aborts transactions
if let Some(mut editor) = container.editor_for_buffer_mut(&buffer) {
editor.transaction_ready();
}
}
SyncMsg::NewState { new_buf, done, buffer } => {
let mut container = self.container_ref.lock();
match (container.editor_for_buffer_mut(&buffer), buf_to_state(&new_buf)) {
(Some(mut editor), Ok(new_state)) => {
editor.merge_new_state(new_state);
if let Some(promise) = done {
promise.set_ok(None);
}
}
(None, _) => (), // buffer was closed
(_, Err(err)) => error!("Ledger was set to invalid state: {:?}", err),
}
}
}
}
}
}
struct PageWatcherServer {
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
}
impl PageWatcher for PageWatcherServer {
fn on_change(
&mut self,
page_change: PageChange,
result_state: ResultState,
) -> Future<Option<PageSnapshot_Server>, fidl::Error> {
let (future, done) = Future::make_promise();
let value_opt = page_change.changes.get(0).and_then(|c| c.value.as_ref());
if let (ledger::RESULT_COMPLETED, Some(value_vmo)) = (result_state, value_opt) {
let new_buf = read_entire_vmo(value_vmo).expect("failed to read key Vmo");
self.updates
.send(SyncMsg::NewState { buffer: self.buffer.clone(), new_buf, done: Some(done) })
.unwrap();
} else {
error!("Xi state corrupted, should have one key but has multiple.");
// I don't think this should be a FIDL-level error, so set okay
done.set_ok(None);
}
future
}
}
impl PageWatcher_Stub for PageWatcherServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(PageWatcherServer: PageWatcher_Stub);
// ============= Conflict resolution
pub fn | (ledger: &mut Ledger_Proxy, key: Vec<u8>) {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let resolver_client = ConflictResolverFactory_Client::from_handle(s1.into_handle());
let resolver_client_ptr = ::fidl::InterfacePtr {
inner: resolver_client,
version: ConflictResolverFactory_Metadata::VERSION,
};
let _ = fidl::Server::new(ConflictResolverFactoryServer { key }, s2).spawn();
ledger.set_conflict_resolver_factory(Some(resolver_client_ptr)).with(ledger_crash_callback);
}
struct ConflictResolverFactoryServer {
key: Vec<u8>,
}
impl ConflictResolverFactory for ConflictResolverFactoryServer {
fn get_policy(&mut self, _page_id: Vec<u8>) -> Future<MergePolicy, ::fidl::Error> {
Future::done(Ok(MergePolicy_Custom))
}
/// Our resolvers are the same for every page
fn new_conflict_resolver(&mut self, _page_id: Vec<u8>, resolver: ConflictResolver_Server) {
let _ = fidl::Server::new(
ConflictResolverServer { key: self.key.clone() },
resolver.into_channel(),
)
.spawn();
}
}
impl ConflictResolverFactory_Stub for ConflictResolverFactoryServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(ConflictResolverFactoryServer: ConflictResolverFactory_Stub);
fn state_from_snapshot<F>(
snapshot: ::fidl::InterfacePtr<PageSnapshot_Client>,
key: Vec<u8>,
done: F,
) where
F: Send + FnOnce(Result<Option<Engine>, ()>) +'static,
{
assert_eq!(PageSnapshot_Metadata::VERSION, snapshot.version);
let mut snapshot_proxy = PageSnapshot_new_Proxy(snapshot.inner);
// TODO get a reference when too big
snapshot_proxy.get(key).with(move |raw_res| {
let state = match raw_res.map(|res| ledger::value_result(res)) {
// the.ok() has the behavior of acting like invalid state is empty
// and thus deleting invalid state and overwriting it with good state
Ok(Ok(Some(buf))) => Ok(buf_to_state(&buf).ok()),
Ok(Ok(None)) => {
info!("No state in conflicting page");
Ok(None)
}
Err(err) => {
warn!("FIDL failed on initial response: {:?}", err);
Err(())
}
Ok(Err(err)) => {
warn!("Ledger failed to retrieve key: {:?}", err);
Err(())
}
};
done(state);
});
}
struct ConflictResolverServer {
key: Vec<u8>,
}
impl ConflictResolver for ConflictResolverServer {
fn resolve(
&mut self,
left: ::fidl::InterfacePtr<PageSnapshot_Client>,
right: ::fidl::InterfacePtr<PageSnapshot_Client>,
_common_version: Option<::fidl::InterfacePtr<PageSnapshot_Client>>,
result_provider: ::fidl::InterfacePtr<MergeResultProvider_Client>,
) {
// TODO in the futures-rs future, do this in parallel with Future combinators
let key2 = self.key.clone();
state_from_snapshot(left, self.key.clone(), move |e1_opt| {
let key3 = key2.clone();
state_from_snapshot(right, key2, move |e2_opt| {
let result_opt = match (e1_opt, e2_opt) {
(Ok(Some(mut e1)), Ok(Some(e2))) => {
e1.merge(&e2);
Some(e1)
}
// one engine didn't exist yet, I'm not sure if Ledger actually generates a conflict in this case
(Ok(Some(e)), Ok(None)) | (Ok(None), Ok(Some(e))) => Some(e),
// failed to get one of the engines, we can't do the merge properly
(Err(()), _) | (_, Err(())) => None,
// if state is invalid or missing on both sides, can't merge
(Ok(None), Ok(None)) => None,
};
if let Some(out_state) = result_opt {
let buf = state_to_buf(&out_state);
// TODO use a reference here when buf is too big
let new_value = Some(Box::new(BytesOrReference::Bytes(buf)));
let merged = MergedValue {
key: key3,
source: ValueSource_New,
new_value,
priority: Priority_Eager,
};
assert_eq!(MergeResultProvider_Metadata::VERSION, result_provider.version);
let mut result_provider_proxy =
MergeResultProvider_new_Proxy(result_provider.inner);
result_provider_proxy.merge(vec![merged]);
result_provider_proxy.done().with(ledger_crash_callback);
}
});
});
}
}
impl ConflictResolver_Stub for ConflictResolverServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(ConflictResolverServer: ConflictResolver_Stub);
| start_conflict_resolver_factory | identifier_name |
sync.rs | // Copyright 2017 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Architecture for synchronizing a CRDT with the ledger. Separated into a
//! module so that it is easier to add other sync stores later.
use std::io::Write;
use std::sync::mpsc::{Receiver, RecvError, Sender};
use log;
use apps_ledger_services_public::*;
use fidl::{self, Future, Promise};
use fuchsia::read_entire_vmo;
use magenta::{Channel, ChannelOpts, HandleBase};
use serde_json;
use super::ledger::{self, ledger_crash_callback};
use tabs::{BufferContainerRef, BufferIdentifier};
use xi_rope::engine::Engine;
// TODO switch these to bincode
fn state_to_buf(state: &Engine) -> Vec<u8> {
serde_json::to_vec(state).unwrap()
}
fn buf_to_state(buf: &[u8]) -> Result<Engine, serde_json::Error> {
serde_json::from_slice(buf)
}
/// Stores state needed by the container to perform synchronization.
pub struct SyncStore {
page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
transaction_pending: bool,
buffer: BufferIdentifier,
}
impl SyncStore {
/// - `page` is a reference to the Ledger page to store data under.
/// - `key` is the key the `Syncable` managed by this `SyncStore` will be stored under.
/// This example only supports storing things under a single key per page.
/// - `updates` is a channel to a `SyncUpdater` that will handle events.
///
/// Returns a sync store and schedules the loading of initial
/// state and subscribes to state updates for this document.
pub fn new(
mut page: Page_Proxy,
key: Vec<u8>,
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
) -> SyncStore {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap(); | let watcher = PageWatcherServer { updates: updates.clone(), buffer: buffer.clone() };
let _ = fidl::Server::new(watcher, s2).spawn();
let (mut snap, snap_request) = PageSnapshot_new_pair();
page.get_snapshot(snap_request, Some(key.clone()), Some(watcher_client_ptr))
.with(ledger_crash_callback);
let initial_state_chan = updates.clone();
let initial_buffer = buffer.clone();
snap.get(key.clone()).with(move |raw_res| {
match raw_res.map(|res| ledger::value_result(res)) {
Ok(Ok(Some(buf))) => {
initial_state_chan
.send(SyncMsg::NewState {
buffer: initial_buffer,
new_buf: buf,
done: None,
})
.unwrap();
}
Ok(Ok(None)) => (), // No initial state saved yet
Err(err) => error!("FIDL failed on initial response: {:?}", err),
Ok(Err(err)) => error!("Ledger failed to retrieve key: {:?}", err),
}
});
SyncStore { page, key, updates, buffer, transaction_pending: false }
}
/// Called whenever this app changed its own state and would like to
/// persist the changes to the ledger. Changes can't be committed
/// immediately since we have to wait for PageWatcher changes that may not
/// have arrived yet.
pub fn state_changed(&mut self) {
if!self.transaction_pending {
self.transaction_pending = true;
let ready_future = self.page.start_transaction();
let done_chan = self.updates.clone();
let buffer = self.buffer.clone();
ready_future.with(move |res| match res {
Ok(ledger::OK) => {
done_chan.send(SyncMsg::TransactionReady { buffer }).unwrap();
}
Ok(err_status) => error!("Ledger failed to start transaction: {:?}", err_status),
Err(err) => error!("FIDL failed on starting transaction: {:?}", err),
});
}
}
/// Should be called in SyncContainer::transaction_ready to persist the current state.
pub fn commit_transaction(&mut self, state: &Engine) {
assert!(self.transaction_pending, "must call state_changed (and wait) before commit");
self.page.put(self.key.clone(), state_to_buf(state)).with(ledger_crash_callback);
self.page.commit().with(ledger_crash_callback);
self.transaction_pending = false;
}
}
/// All the different asynchronous events the updater thread needs to listen for and act on
pub enum SyncMsg {
NewState {
buffer: BufferIdentifier,
new_buf: Vec<u8>,
done: Option<Promise<Option<PageSnapshot_Server>, fidl::Error>>,
},
TransactionReady {
buffer: BufferIdentifier,
},
/// Shut down the updater thread
Stop,
}
/// We want to be able to register to receive events from inside the
/// `SyncStore`/`SyncContainer` but from there we don't have access to the
/// Mutex that holds the container, so we give channel Senders to all the
/// futures so that they can all trigger events in one place that does have
/// the right reference.
///
/// Additionally, the individual `Editor`s aren't wrapped in a `Mutex` so we
/// have to hold a `BufferContainerRef` and use `BufferIdentifier`s with one
/// `SyncUpdater` for all buffers.
pub struct SyncUpdater<W: Write> {
container_ref: BufferContainerRef<W>,
chan: Receiver<SyncMsg>,
}
impl<W: Write + Send +'static> SyncUpdater<W> {
pub fn new(container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>) -> SyncUpdater<W> {
SyncUpdater { container_ref, chan }
}
/// Run this in a thread, it will return when it encounters an error
/// reading the channel or when the `Stop` message is recieved.
pub fn work(&self) -> Result<(), RecvError> {
loop {
let msg = self.chan.recv()?;
match msg {
SyncMsg::Stop => return Ok(()),
SyncMsg::TransactionReady { buffer } => {
let mut container = self.container_ref.lock();
// if the buffer was closed, hopefully the page connection was as well, which I hope aborts transactions
if let Some(mut editor) = container.editor_for_buffer_mut(&buffer) {
editor.transaction_ready();
}
}
SyncMsg::NewState { new_buf, done, buffer } => {
let mut container = self.container_ref.lock();
match (container.editor_for_buffer_mut(&buffer), buf_to_state(&new_buf)) {
(Some(mut editor), Ok(new_state)) => {
editor.merge_new_state(new_state);
if let Some(promise) = done {
promise.set_ok(None);
}
}
(None, _) => (), // buffer was closed
(_, Err(err)) => error!("Ledger was set to invalid state: {:?}", err),
}
}
}
}
}
}
struct PageWatcherServer {
updates: Sender<SyncMsg>,
buffer: BufferIdentifier,
}
impl PageWatcher for PageWatcherServer {
fn on_change(
&mut self,
page_change: PageChange,
result_state: ResultState,
) -> Future<Option<PageSnapshot_Server>, fidl::Error> {
let (future, done) = Future::make_promise();
let value_opt = page_change.changes.get(0).and_then(|c| c.value.as_ref());
if let (ledger::RESULT_COMPLETED, Some(value_vmo)) = (result_state, value_opt) {
let new_buf = read_entire_vmo(value_vmo).expect("failed to read key Vmo");
self.updates
.send(SyncMsg::NewState { buffer: self.buffer.clone(), new_buf, done: Some(done) })
.unwrap();
} else {
error!("Xi state corrupted, should have one key but has multiple.");
// I don't think this should be a FIDL-level error, so set okay
done.set_ok(None);
}
future
}
}
impl PageWatcher_Stub for PageWatcherServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(PageWatcherServer: PageWatcher_Stub);
// ============= Conflict resolution
pub fn start_conflict_resolver_factory(ledger: &mut Ledger_Proxy, key: Vec<u8>) {
let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let resolver_client = ConflictResolverFactory_Client::from_handle(s1.into_handle());
let resolver_client_ptr = ::fidl::InterfacePtr {
inner: resolver_client,
version: ConflictResolverFactory_Metadata::VERSION,
};
let _ = fidl::Server::new(ConflictResolverFactoryServer { key }, s2).spawn();
ledger.set_conflict_resolver_factory(Some(resolver_client_ptr)).with(ledger_crash_callback);
}
struct ConflictResolverFactoryServer {
key: Vec<u8>,
}
impl ConflictResolverFactory for ConflictResolverFactoryServer {
fn get_policy(&mut self, _page_id: Vec<u8>) -> Future<MergePolicy, ::fidl::Error> {
Future::done(Ok(MergePolicy_Custom))
}
/// Our resolvers are the same for every page
fn new_conflict_resolver(&mut self, _page_id: Vec<u8>, resolver: ConflictResolver_Server) {
let _ = fidl::Server::new(
ConflictResolverServer { key: self.key.clone() },
resolver.into_channel(),
)
.spawn();
}
}
impl ConflictResolverFactory_Stub for ConflictResolverFactoryServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(ConflictResolverFactoryServer: ConflictResolverFactory_Stub);
fn state_from_snapshot<F>(
snapshot: ::fidl::InterfacePtr<PageSnapshot_Client>,
key: Vec<u8>,
done: F,
) where
F: Send + FnOnce(Result<Option<Engine>, ()>) +'static,
{
assert_eq!(PageSnapshot_Metadata::VERSION, snapshot.version);
let mut snapshot_proxy = PageSnapshot_new_Proxy(snapshot.inner);
// TODO get a reference when too big
snapshot_proxy.get(key).with(move |raw_res| {
let state = match raw_res.map(|res| ledger::value_result(res)) {
// the.ok() has the behavior of acting like invalid state is empty
// and thus deleting invalid state and overwriting it with good state
Ok(Ok(Some(buf))) => Ok(buf_to_state(&buf).ok()),
Ok(Ok(None)) => {
info!("No state in conflicting page");
Ok(None)
}
Err(err) => {
warn!("FIDL failed on initial response: {:?}", err);
Err(())
}
Ok(Err(err)) => {
warn!("Ledger failed to retrieve key: {:?}", err);
Err(())
}
};
done(state);
});
}
struct ConflictResolverServer {
key: Vec<u8>,
}
impl ConflictResolver for ConflictResolverServer {
fn resolve(
&mut self,
left: ::fidl::InterfacePtr<PageSnapshot_Client>,
right: ::fidl::InterfacePtr<PageSnapshot_Client>,
_common_version: Option<::fidl::InterfacePtr<PageSnapshot_Client>>,
result_provider: ::fidl::InterfacePtr<MergeResultProvider_Client>,
) {
// TODO in the futures-rs future, do this in parallel with Future combinators
let key2 = self.key.clone();
state_from_snapshot(left, self.key.clone(), move |e1_opt| {
let key3 = key2.clone();
state_from_snapshot(right, key2, move |e2_opt| {
let result_opt = match (e1_opt, e2_opt) {
(Ok(Some(mut e1)), Ok(Some(e2))) => {
e1.merge(&e2);
Some(e1)
}
// one engine didn't exist yet, I'm not sure if Ledger actually generates a conflict in this case
(Ok(Some(e)), Ok(None)) | (Ok(None), Ok(Some(e))) => Some(e),
// failed to get one of the engines, we can't do the merge properly
(Err(()), _) | (_, Err(())) => None,
// if state is invalid or missing on both sides, can't merge
(Ok(None), Ok(None)) => None,
};
if let Some(out_state) = result_opt {
let buf = state_to_buf(&out_state);
// TODO use a reference here when buf is too big
let new_value = Some(Box::new(BytesOrReference::Bytes(buf)));
let merged = MergedValue {
key: key3,
source: ValueSource_New,
new_value,
priority: Priority_Eager,
};
assert_eq!(MergeResultProvider_Metadata::VERSION, result_provider.version);
let mut result_provider_proxy =
MergeResultProvider_new_Proxy(result_provider.inner);
result_provider_proxy.merge(vec![merged]);
result_provider_proxy.done().with(ledger_crash_callback);
}
});
});
}
}
impl ConflictResolver_Stub for ConflictResolverServer {
// Use default dispatching, but we could override it here.
}
impl_fidl_stub!(ConflictResolverServer: ConflictResolver_Stub); | let watcher_client = PageWatcher_Client::from_handle(s1.into_handle());
let watcher_client_ptr =
::fidl::InterfacePtr { inner: watcher_client, version: PageWatcher_Metadata::VERSION };
| random_line_split |
lib.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Helper crate for secure and convenient configuration of the Exonum nodes.
//!
//! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves
//! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without
//! need in exchanging private keys between administrators.
//!
//! # How to Run the Network
//!
//! 1. Generate common (template) part of the nodes configuration using `generate-template` command.
//! Generated `.toml` file must be spread among all the nodes and must be used in the following
//! configuration step.
//! 2. Generate public and secret (private) parts of the node configuration using `generate-config`
//! command. At this step, Exonum will generate master key from which consensus and service
//! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key
//! is used for communications between the nodes, while service secret key is used
//! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a
//! password. The public part of the node configuration must be spread among all nodes, while the
//! secret part must be only accessible by the node administrator only.
//! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of
//! the node configuration with public configurations of every other node, producing a single
//! configuration file with all the necessary node and network settings.
//! 4. Use `run` command and provide it with final node configuration file produced at the previous
//! step. If the secret keys are protected with passwords, the user need to enter the password.
//! Running node will automatically connect to other nodes in the network using IP addresses from
//! public parts of the node configurations.
//!
//! ## Additional Commands
//!
//! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node
//! administrators and easier debugging.
//!
//! * `run-dev` command automatically generates network configuration with a single node and runs
//! it. This command can be useful for fast testing of the services during development process.
//! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and
//! restart node's service migration script with `restart-migration`.
//!
//! ## How to Extend Parameters
//!
//! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI
//! commands with arbitrary behavior. To do so, you need to implement a structure with a list of
//! additional parameters and use `flatten` macro attribute of [`serde`][serde] and
//! [`structopt`][structopt] libraries.
//!
//! ```ignore
//! #[derive(Serialize, Deserialize, StructOpt)]
//! struct MyRunCommand {
//! #[serde(flatten)]
//! #[structopt(flatten)]
//! default: Run
//! /// My awesome parameter
//! secret_number: i32
//! }
//! ```
//!
//! You can also create own list of commands by implementing an enum with a similar principle:
//!
//! ```ignore
//! #[derive(StructOpt)]
//! enum MyCommands {
//! #[structopt(name = "run")
//! DefaultRun(Run),
//! #[structopt(name = "my-run")
//! MyAwesomeRun(MyRunCommand),
//! }
//! ```
//!
//! While implementing custom behavior for your commands, you may use
//! [`StandardResult`](./command/enum.StandardResult.html) enum for
//! accessing node configuration files created and filled by the standard Exonum commands.
//!
//! [serde]: https://crates.io/crates/serde
//! [structopt]: https://crates.io/crates/structopt
#![deny(missing_docs)]
pub use crate::config_manager::DefaultConfigManager;
pub use structopt;
use exonum::{
blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams},
merkledb::RocksDB,
runtime::{RuntimeInstance, WellKnownRuntime},
};
use exonum_explorer_service::ExplorerFactory;
use exonum_node::{Node, NodeBuilder as CoreNodeBuilder};
use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory};
use exonum_supervisor::{Supervisor, SupervisorConfig};
use exonum_system_api::SystemApiPlugin;
use structopt::StructOpt;
use tempfile::TempDir;
use std::{env, ffi::OsString, iter, path::PathBuf};
use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult};
pub mod command;
pub mod config;
pub mod io;
pub mod password;
mod config_manager;
/// Rust-specific node builder used for constructing a node with a list
/// of provided services.
#[derive(Debug)]
pub struct NodeBuilder {
rust_runtime: RustRuntimeBuilder,
external_runtimes: Vec<RuntimeInstance>,
builtin_instances: Vec<InstanceInitParams>,
args: Option<Vec<OsString>>,
temp_dir: Option<TempDir>,
}
impl Default for NodeBuilder {
fn default() -> Self {
Self::new()
}
}
impl NodeBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {
rust_runtime: RustRuntimeBuilder::new()
.with_factory(Supervisor)
.with_factory(ExplorerFactory),
external_runtimes: vec![],
builtin_instances: vec![],
args: None,
temp_dir: None,
}
}
/// Creates a new builder with the provided command-line arguments. The path
/// to the current executable **does not** need to be specified as the first argument.
#[doc(hidden)] // unstable
pub fn with_args<I>(args: I) -> Self
where
I: IntoIterator,
I::Item: Into<OsString>,
{
let mut this = Self::new();
let executable = env::current_exe()
.map(PathBuf::into_os_string)
.unwrap_or_else(|_| "node".into());
let all_args = iter::once(executable)
.chain(args.into_iter().map(Into::into))
.collect();
this.args = Some(all_args);
this
}
/// Creates a single-node development network with default settings. The node stores
/// its data in a temporary directory, which is automatically removed when the node is stopped.
///
/// # Return value
///
/// Returns an error if the temporary directory cannot be created.
pub fn development_node() -> Result<Self, failure::Error> {
let temp_dir = TempDir::new()?;
let mut this = Self::with_args(vec![
OsString::from("run-dev"),
OsString::from("--artifacts-dir"),
temp_dir.path().into(),
]);
this.temp_dir = Some(temp_dir);
Ok(this)
}
/// Adds new Rust service to the list of available services.
pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self {
self.rust_runtime = self.rust_runtime.with_factory(service);
self
}
/// Adds a new `Runtime` to the list of available runtimes.
///
/// Note that you don't have to add the Rust runtime, since it is included by default.
pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self {
self.external_runtimes.push(runtime.into());
self
}
/// Adds a service instance that will be available immediately after creating a genesis block.
///
/// For Rust services, the service factory needs to be separately supplied
/// via [`with_rust_service`](#method.with_rust_service).
pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self {
self.builtin_instances.push(instance.into());
self
}
/// Adds a default Rust service instance that will be available immediately after creating a
/// genesis block.
pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self {
self.with_instance(service.default_instance())
.with_rust_service(service)
}
/// Executes a command received from the command line.
///
/// # Return value
///
/// Returns:
///
/// - `Ok(Some(_))` if the command lead to the node creation
/// - `Ok(None)` if the command executed successfully and did not lead to node creation
/// - `Err(_)` if an error occurred during command execution
#[doc(hidden)] // unstable
pub fn execute_command(self) -> Result<Option<Node>, failure::Error> {
let command = if let Some(args) = self.args | else {
Command::from_args()
};
if let StandardResult::Run(run_config) = command.execute()? {
let genesis_config = Self::genesis_config(&run_config, self.builtin_instances);
let db_options = &run_config.node_config.private_config.database;
let database = RocksDB::open(run_config.db_path, db_options)?;
let node_config_path = run_config.node_config_path.to_string_lossy();
let config_manager = DefaultConfigManager::new(node_config_path.into_owned());
let rust_runtime = self.rust_runtime;
let node_config = run_config.node_config.into();
let node_keys = run_config.node_keys;
let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys)
.with_genesis_config(genesis_config)
.with_config_manager(config_manager)
.with_plugin(SystemApiPlugin)
.with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender()));
for runtime in self.external_runtimes {
node_builder = node_builder.with_runtime(runtime);
}
Ok(Some(node_builder.build()))
} else {
Ok(None)
}
}
/// Configures the node using parameters provided by user from stdin and then runs it.
pub fn run(mut self) -> Result<(), failure::Error> {
// Store temporary directory until the node is done.
let _temp_dir = self.temp_dir.take();
if let Some(node) = self.execute_command()? {
node.run()
} else {
Ok(())
}
}
fn genesis_config(
run_config: &NodeRunConfig,
default_instances: Vec<InstanceInitParams>,
) -> GenesisConfig {
let mut builder = GenesisConfigBuilder::with_consensus_config(
run_config.node_config.public_config.consensus.clone(),
);
// Add builtin services to genesis config.
builder = builder
.with_artifact(Supervisor.artifact_id())
.with_instance(Self::supervisor_service(&run_config))
.with_artifact(ExplorerFactory.artifact_id())
.with_instance(ExplorerFactory.default_instance());
// Add default instances.
for instance in default_instances {
builder = builder
.with_artifact(instance.instance_spec.artifact.clone())
.with_instance(instance)
}
builder.build()
}
fn supervisor_service(run_config: &NodeRunConfig) -> InstanceInitParams {
let mode = run_config
.node_config
.public_config
.general
.supervisor_mode
.clone();
Supervisor::builtin_instance(SupervisorConfig { mode })
}
}
| {
Command::from_iter(args)
} | conditional_block |
lib.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Helper crate for secure and convenient configuration of the Exonum nodes.
//!
//! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves
//! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without
//! need in exchanging private keys between administrators.
//!
//! # How to Run the Network
//!
//! 1. Generate common (template) part of the nodes configuration using `generate-template` command.
//! Generated `.toml` file must be spread among all the nodes and must be used in the following
//! configuration step.
//! 2. Generate public and secret (private) parts of the node configuration using `generate-config`
//! command. At this step, Exonum will generate master key from which consensus and service
//! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key
//! is used for communications between the nodes, while service secret key is used
//! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a
//! password. The public part of the node configuration must be spread among all nodes, while the
//! secret part must be only accessible by the node administrator only.
//! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of
//! the node configuration with public configurations of every other node, producing a single
//! configuration file with all the necessary node and network settings.
//! 4. Use `run` command and provide it with final node configuration file produced at the previous
//! step. If the secret keys are protected with passwords, the user need to enter the password.
//! Running node will automatically connect to other nodes in the network using IP addresses from
//! public parts of the node configurations.
//!
//! ## Additional Commands
//!
//! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node
//! administrators and easier debugging.
//!
//! * `run-dev` command automatically generates network configuration with a single node and runs
//! it. This command can be useful for fast testing of the services during development process.
//! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and
//! restart node's service migration script with `restart-migration`.
//!
//! ## How to Extend Parameters
//!
//! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI
//! commands with arbitrary behavior. To do so, you need to implement a structure with a list of
//! additional parameters and use `flatten` macro attribute of [`serde`][serde] and
//! [`structopt`][structopt] libraries.
//!
//! ```ignore
//! #[derive(Serialize, Deserialize, StructOpt)]
//! struct MyRunCommand {
//! #[serde(flatten)]
//! #[structopt(flatten)]
//! default: Run
//! /// My awesome parameter
//! secret_number: i32
//! }
//! ```
//!
//! You can also create own list of commands by implementing an enum with a similar principle:
//!
//! ```ignore
//! #[derive(StructOpt)]
//! enum MyCommands {
//! #[structopt(name = "run")
//! DefaultRun(Run),
//! #[structopt(name = "my-run")
//! MyAwesomeRun(MyRunCommand),
//! }
//! ```
//!
//! While implementing custom behavior for your commands, you may use
//! [`StandardResult`](./command/enum.StandardResult.html) enum for
//! accessing node configuration files created and filled by the standard Exonum commands.
//!
//! [serde]: https://crates.io/crates/serde
//! [structopt]: https://crates.io/crates/structopt
#![deny(missing_docs)]
pub use crate::config_manager::DefaultConfigManager;
pub use structopt;
use exonum::{
blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams},
merkledb::RocksDB,
runtime::{RuntimeInstance, WellKnownRuntime},
};
use exonum_explorer_service::ExplorerFactory;
use exonum_node::{Node, NodeBuilder as CoreNodeBuilder};
use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory};
use exonum_supervisor::{Supervisor, SupervisorConfig};
use exonum_system_api::SystemApiPlugin;
use structopt::StructOpt;
use tempfile::TempDir;
use std::{env, ffi::OsString, iter, path::PathBuf};
use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult};
pub mod command;
pub mod config;
pub mod io;
pub mod password;
mod config_manager;
/// Rust-specific node builder used for constructing a node with a list
/// of provided services.
#[derive(Debug)]
pub struct NodeBuilder {
rust_runtime: RustRuntimeBuilder,
external_runtimes: Vec<RuntimeInstance>,
builtin_instances: Vec<InstanceInitParams>,
args: Option<Vec<OsString>>,
temp_dir: Option<TempDir>,
}
impl Default for NodeBuilder {
fn default() -> Self {
Self::new()
}
}
impl NodeBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {
rust_runtime: RustRuntimeBuilder::new()
.with_factory(Supervisor)
.with_factory(ExplorerFactory),
external_runtimes: vec![],
builtin_instances: vec![],
args: None,
temp_dir: None,
}
}
/// Creates a new builder with the provided command-line arguments. The path
/// to the current executable **does not** need to be specified as the first argument.
#[doc(hidden)] // unstable
pub fn with_args<I>(args: I) -> Self
where
I: IntoIterator,
I::Item: Into<OsString>,
{
let mut this = Self::new();
let executable = env::current_exe()
.map(PathBuf::into_os_string)
.unwrap_or_else(|_| "node".into());
let all_args = iter::once(executable)
.chain(args.into_iter().map(Into::into))
.collect();
this.args = Some(all_args);
this
}
/// Creates a single-node development network with default settings. The node stores
/// its data in a temporary directory, which is automatically removed when the node is stopped.
///
/// # Return value
///
/// Returns an error if the temporary directory cannot be created.
pub fn development_node() -> Result<Self, failure::Error> |
/// Adds new Rust service to the list of available services.
pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self {
self.rust_runtime = self.rust_runtime.with_factory(service);
self
}
/// Adds a new `Runtime` to the list of available runtimes.
///
/// Note that you don't have to add the Rust runtime, since it is included by default.
pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self {
self.external_runtimes.push(runtime.into());
self
}
/// Adds a service instance that will be available immediately after creating a genesis block.
///
/// For Rust services, the service factory needs to be separately supplied
/// via [`with_rust_service`](#method.with_rust_service).
pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self {
self.builtin_instances.push(instance.into());
self
}
/// Adds a default Rust service instance that will be available immediately after creating a
/// genesis block.
pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self {
self.with_instance(service.default_instance())
.with_rust_service(service)
}
/// Executes a command received from the command line.
///
/// # Return value
///
/// Returns:
///
/// - `Ok(Some(_))` if the command lead to the node creation
/// - `Ok(None)` if the command executed successfully and did not lead to node creation
/// - `Err(_)` if an error occurred during command execution
#[doc(hidden)] // unstable
pub fn execute_command(self) -> Result<Option<Node>, failure::Error> {
let command = if let Some(args) = self.args {
Command::from_iter(args)
} else {
Command::from_args()
};
if let StandardResult::Run(run_config) = command.execute()? {
let genesis_config = Self::genesis_config(&run_config, self.builtin_instances);
let db_options = &run_config.node_config.private_config.database;
let database = RocksDB::open(run_config.db_path, db_options)?;
let node_config_path = run_config.node_config_path.to_string_lossy();
let config_manager = DefaultConfigManager::new(node_config_path.into_owned());
let rust_runtime = self.rust_runtime;
let node_config = run_config.node_config.into();
let node_keys = run_config.node_keys;
let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys)
.with_genesis_config(genesis_config)
.with_config_manager(config_manager)
.with_plugin(SystemApiPlugin)
.with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender()));
for runtime in self.external_runtimes {
node_builder = node_builder.with_runtime(runtime);
}
Ok(Some(node_builder.build()))
} else {
Ok(None)
}
}
/// Configures the node using parameters provided by user from stdin and then runs it.
pub fn run(mut self) -> Result<(), failure::Error> {
// Store temporary directory until the node is done.
let _temp_dir = self.temp_dir.take();
if let Some(node) = self.execute_command()? {
node.run()
} else {
Ok(())
}
}
fn genesis_config(
run_config: &NodeRunConfig,
default_instances: Vec<InstanceInitParams>,
) -> GenesisConfig {
let mut builder = GenesisConfigBuilder::with_consensus_config(
run_config.node_config.public_config.consensus.clone(),
);
// Add builtin services to genesis config.
builder = builder
.with_artifact(Supervisor.artifact_id())
.with_instance(Self::supervisor_service(&run_config))
.with_artifact(ExplorerFactory.artifact_id())
.with_instance(ExplorerFactory.default_instance());
// Add default instances.
for instance in default_instances {
builder = builder
.with_artifact(instance.instance_spec.artifact.clone())
.with_instance(instance)
}
builder.build()
}
fn supervisor_service(run_config: &NodeRunConfig) -> InstanceInitParams {
let mode = run_config
.node_config
.public_config
.general
.supervisor_mode
.clone();
Supervisor::builtin_instance(SupervisorConfig { mode })
}
}
| {
let temp_dir = TempDir::new()?;
let mut this = Self::with_args(vec![
OsString::from("run-dev"),
OsString::from("--artifacts-dir"),
temp_dir.path().into(),
]);
this.temp_dir = Some(temp_dir);
Ok(this)
} | identifier_body |
lib.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Helper crate for secure and convenient configuration of the Exonum nodes.
//!
//! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves
//! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without
//! need in exchanging private keys between administrators.
//!
//! # How to Run the Network
//!
//! 1. Generate common (template) part of the nodes configuration using `generate-template` command.
//! Generated `.toml` file must be spread among all the nodes and must be used in the following
//! configuration step.
//! 2. Generate public and secret (private) parts of the node configuration using `generate-config`
//! command. At this step, Exonum will generate master key from which consensus and service
//! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key
//! is used for communications between the nodes, while service secret key is used
//! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a
//! password. The public part of the node configuration must be spread among all nodes, while the
//! secret part must be only accessible by the node administrator only.
//! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of
//! the node configuration with public configurations of every other node, producing a single
//! configuration file with all the necessary node and network settings.
//! 4. Use `run` command and provide it with final node configuration file produced at the previous
//! step. If the secret keys are protected with passwords, the user need to enter the password.
//! Running node will automatically connect to other nodes in the network using IP addresses from
//! public parts of the node configurations.
//!
//! ## Additional Commands
//!
//! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node
//! administrators and easier debugging.
//!
//! * `run-dev` command automatically generates network configuration with a single node and runs
//! it. This command can be useful for fast testing of the services during development process.
//! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and
//! restart node's service migration script with `restart-migration`.
//!
//! ## How to Extend Parameters
//!
//! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI
//! commands with arbitrary behavior. To do so, you need to implement a structure with a list of
//! additional parameters and use `flatten` macro attribute of [`serde`][serde] and
//! [`structopt`][structopt] libraries.
//!
//! ```ignore
//! #[derive(Serialize, Deserialize, StructOpt)]
//! struct MyRunCommand {
//! #[serde(flatten)]
//! #[structopt(flatten)]
//! default: Run
//! /// My awesome parameter
//! secret_number: i32
//! }
//! ```
//!
//! You can also create own list of commands by implementing an enum with a similar principle:
//!
//! ```ignore
//! #[derive(StructOpt)]
//! enum MyCommands {
//! #[structopt(name = "run")
//! DefaultRun(Run),
//! #[structopt(name = "my-run")
//! MyAwesomeRun(MyRunCommand),
//! }
//! ```
//!
//! While implementing custom behavior for your commands, you may use
//! [`StandardResult`](./command/enum.StandardResult.html) enum for
//! accessing node configuration files created and filled by the standard Exonum commands.
//!
//! [serde]: https://crates.io/crates/serde
//! [structopt]: https://crates.io/crates/structopt
#![deny(missing_docs)]
pub use crate::config_manager::DefaultConfigManager;
pub use structopt;
use exonum::{
blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams},
merkledb::RocksDB,
runtime::{RuntimeInstance, WellKnownRuntime},
};
use exonum_explorer_service::ExplorerFactory;
use exonum_node::{Node, NodeBuilder as CoreNodeBuilder};
use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory};
use exonum_supervisor::{Supervisor, SupervisorConfig};
use exonum_system_api::SystemApiPlugin;
use structopt::StructOpt; |
use std::{env, ffi::OsString, iter, path::PathBuf};
use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult};
pub mod command;
pub mod config;
pub mod io;
pub mod password;
mod config_manager;
/// Rust-specific node builder used for constructing a node with a list
/// of provided services.
#[derive(Debug)]
pub struct NodeBuilder {
rust_runtime: RustRuntimeBuilder,
external_runtimes: Vec<RuntimeInstance>,
builtin_instances: Vec<InstanceInitParams>,
args: Option<Vec<OsString>>,
temp_dir: Option<TempDir>,
}
impl Default for NodeBuilder {
fn default() -> Self {
Self::new()
}
}
impl NodeBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {
rust_runtime: RustRuntimeBuilder::new()
.with_factory(Supervisor)
.with_factory(ExplorerFactory),
external_runtimes: vec![],
builtin_instances: vec![],
args: None,
temp_dir: None,
}
}
/// Creates a new builder with the provided command-line arguments. The path
/// to the current executable **does not** need to be specified as the first argument.
#[doc(hidden)] // unstable
pub fn with_args<I>(args: I) -> Self
where
I: IntoIterator,
I::Item: Into<OsString>,
{
let mut this = Self::new();
let executable = env::current_exe()
.map(PathBuf::into_os_string)
.unwrap_or_else(|_| "node".into());
let all_args = iter::once(executable)
.chain(args.into_iter().map(Into::into))
.collect();
this.args = Some(all_args);
this
}
/// Creates a single-node development network with default settings. The node stores
/// its data in a temporary directory, which is automatically removed when the node is stopped.
///
/// # Return value
///
/// Returns an error if the temporary directory cannot be created.
pub fn development_node() -> Result<Self, failure::Error> {
let temp_dir = TempDir::new()?;
let mut this = Self::with_args(vec![
OsString::from("run-dev"),
OsString::from("--artifacts-dir"),
temp_dir.path().into(),
]);
this.temp_dir = Some(temp_dir);
Ok(this)
}
/// Adds new Rust service to the list of available services.
pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self {
self.rust_runtime = self.rust_runtime.with_factory(service);
self
}
/// Adds a new `Runtime` to the list of available runtimes.
///
/// Note that you don't have to add the Rust runtime, since it is included by default.
pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self {
self.external_runtimes.push(runtime.into());
self
}
/// Adds a service instance that will be available immediately after creating a genesis block.
///
/// For Rust services, the service factory needs to be separately supplied
/// via [`with_rust_service`](#method.with_rust_service).
pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self {
self.builtin_instances.push(instance.into());
self
}
/// Adds a default Rust service instance that will be available immediately after creating a
/// genesis block.
pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self {
self.with_instance(service.default_instance())
.with_rust_service(service)
}
/// Executes a command received from the command line.
///
/// # Return value
///
/// Returns:
///
/// - `Ok(Some(_))` if the command lead to the node creation
/// - `Ok(None)` if the command executed successfully and did not lead to node creation
/// - `Err(_)` if an error occurred during command execution
#[doc(hidden)] // unstable
pub fn execute_command(self) -> Result<Option<Node>, failure::Error> {
let command = if let Some(args) = self.args {
Command::from_iter(args)
} else {
Command::from_args()
};
if let StandardResult::Run(run_config) = command.execute()? {
let genesis_config = Self::genesis_config(&run_config, self.builtin_instances);
let db_options = &run_config.node_config.private_config.database;
let database = RocksDB::open(run_config.db_path, db_options)?;
let node_config_path = run_config.node_config_path.to_string_lossy();
let config_manager = DefaultConfigManager::new(node_config_path.into_owned());
let rust_runtime = self.rust_runtime;
let node_config = run_config.node_config.into();
let node_keys = run_config.node_keys;
let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys)
.with_genesis_config(genesis_config)
.with_config_manager(config_manager)
.with_plugin(SystemApiPlugin)
.with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender()));
for runtime in self.external_runtimes {
node_builder = node_builder.with_runtime(runtime);
}
Ok(Some(node_builder.build()))
} else {
Ok(None)
}
}
/// Configures the node using parameters provided by user from stdin and then runs it.
pub fn run(mut self) -> Result<(), failure::Error> {
// Store temporary directory until the node is done.
let _temp_dir = self.temp_dir.take();
if let Some(node) = self.execute_command()? {
node.run()
} else {
Ok(())
}
}
fn genesis_config(
run_config: &NodeRunConfig,
default_instances: Vec<InstanceInitParams>,
) -> GenesisConfig {
let mut builder = GenesisConfigBuilder::with_consensus_config(
run_config.node_config.public_config.consensus.clone(),
);
// Add builtin services to genesis config.
builder = builder
.with_artifact(Supervisor.artifact_id())
.with_instance(Self::supervisor_service(&run_config))
.with_artifact(ExplorerFactory.artifact_id())
.with_instance(ExplorerFactory.default_instance());
// Add default instances.
for instance in default_instances {
builder = builder
.with_artifact(instance.instance_spec.artifact.clone())
.with_instance(instance)
}
builder.build()
}
fn supervisor_service(run_config: &NodeRunConfig) -> InstanceInitParams {
let mode = run_config
.node_config
.public_config
.general
.supervisor_mode
.clone();
Supervisor::builtin_instance(SupervisorConfig { mode })
}
} | use tempfile::TempDir; | random_line_split |
lib.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Helper crate for secure and convenient configuration of the Exonum nodes.
//!
//! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves
//! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without
//! need in exchanging private keys between administrators.
//!
//! # How to Run the Network
//!
//! 1. Generate common (template) part of the nodes configuration using `generate-template` command.
//! Generated `.toml` file must be spread among all the nodes and must be used in the following
//! configuration step.
//! 2. Generate public and secret (private) parts of the node configuration using `generate-config`
//! command. At this step, Exonum will generate master key from which consensus and service
//! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key
//! is used for communications between the nodes, while service secret key is used
//! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a
//! password. The public part of the node configuration must be spread among all nodes, while the
//! secret part must be only accessible by the node administrator only.
//! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of
//! the node configuration with public configurations of every other node, producing a single
//! configuration file with all the necessary node and network settings.
//! 4. Use `run` command and provide it with final node configuration file produced at the previous
//! step. If the secret keys are protected with passwords, the user need to enter the password.
//! Running node will automatically connect to other nodes in the network using IP addresses from
//! public parts of the node configurations.
//!
//! ## Additional Commands
//!
//! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node
//! administrators and easier debugging.
//!
//! * `run-dev` command automatically generates network configuration with a single node and runs
//! it. This command can be useful for fast testing of the services during development process.
//! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and
//! restart node's service migration script with `restart-migration`.
//!
//! ## How to Extend Parameters
//!
//! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI
//! commands with arbitrary behavior. To do so, you need to implement a structure with a list of
//! additional parameters and use `flatten` macro attribute of [`serde`][serde] and
//! [`structopt`][structopt] libraries.
//!
//! ```ignore
//! #[derive(Serialize, Deserialize, StructOpt)]
//! struct MyRunCommand {
//! #[serde(flatten)]
//! #[structopt(flatten)]
//! default: Run
//! /// My awesome parameter
//! secret_number: i32
//! }
//! ```
//!
//! You can also create own list of commands by implementing an enum with a similar principle:
//!
//! ```ignore
//! #[derive(StructOpt)]
//! enum MyCommands {
//! #[structopt(name = "run")
//! DefaultRun(Run),
//! #[structopt(name = "my-run")
//! MyAwesomeRun(MyRunCommand),
//! }
//! ```
//!
//! While implementing custom behavior for your commands, you may use
//! [`StandardResult`](./command/enum.StandardResult.html) enum for
//! accessing node configuration files created and filled by the standard Exonum commands.
//!
//! [serde]: https://crates.io/crates/serde
//! [structopt]: https://crates.io/crates/structopt
#![deny(missing_docs)]
pub use crate::config_manager::DefaultConfigManager;
pub use structopt;
use exonum::{
blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams},
merkledb::RocksDB,
runtime::{RuntimeInstance, WellKnownRuntime},
};
use exonum_explorer_service::ExplorerFactory;
use exonum_node::{Node, NodeBuilder as CoreNodeBuilder};
use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory};
use exonum_supervisor::{Supervisor, SupervisorConfig};
use exonum_system_api::SystemApiPlugin;
use structopt::StructOpt;
use tempfile::TempDir;
use std::{env, ffi::OsString, iter, path::PathBuf};
use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult};
pub mod command;
pub mod config;
pub mod io;
pub mod password;
mod config_manager;
/// Rust-specific node builder used for constructing a node with a list
/// of provided services.
#[derive(Debug)]
pub struct NodeBuilder {
rust_runtime: RustRuntimeBuilder,
external_runtimes: Vec<RuntimeInstance>,
builtin_instances: Vec<InstanceInitParams>,
args: Option<Vec<OsString>>,
temp_dir: Option<TempDir>,
}
impl Default for NodeBuilder {
fn default() -> Self {
Self::new()
}
}
impl NodeBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {
rust_runtime: RustRuntimeBuilder::new()
.with_factory(Supervisor)
.with_factory(ExplorerFactory),
external_runtimes: vec![],
builtin_instances: vec![],
args: None,
temp_dir: None,
}
}
/// Creates a new builder with the provided command-line arguments. The path
/// to the current executable **does not** need to be specified as the first argument.
#[doc(hidden)] // unstable
pub fn with_args<I>(args: I) -> Self
where
I: IntoIterator,
I::Item: Into<OsString>,
{
let mut this = Self::new();
let executable = env::current_exe()
.map(PathBuf::into_os_string)
.unwrap_or_else(|_| "node".into());
let all_args = iter::once(executable)
.chain(args.into_iter().map(Into::into))
.collect();
this.args = Some(all_args);
this
}
/// Creates a single-node development network with default settings. The node stores
/// its data in a temporary directory, which is automatically removed when the node is stopped.
///
/// # Return value
///
/// Returns an error if the temporary directory cannot be created.
pub fn development_node() -> Result<Self, failure::Error> {
let temp_dir = TempDir::new()?;
let mut this = Self::with_args(vec![
OsString::from("run-dev"),
OsString::from("--artifacts-dir"),
temp_dir.path().into(),
]);
this.temp_dir = Some(temp_dir);
Ok(this)
}
/// Adds new Rust service to the list of available services.
pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self {
self.rust_runtime = self.rust_runtime.with_factory(service);
self
}
/// Adds a new `Runtime` to the list of available runtimes.
///
/// Note that you don't have to add the Rust runtime, since it is included by default.
pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self {
self.external_runtimes.push(runtime.into());
self
}
/// Adds a service instance that will be available immediately after creating a genesis block.
///
/// For Rust services, the service factory needs to be separately supplied
/// via [`with_rust_service`](#method.with_rust_service).
pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self {
self.builtin_instances.push(instance.into());
self
}
/// Adds a default Rust service instance that will be available immediately after creating a
/// genesis block.
pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self {
self.with_instance(service.default_instance())
.with_rust_service(service)
}
/// Executes a command received from the command line.
///
/// # Return value
///
/// Returns:
///
/// - `Ok(Some(_))` if the command lead to the node creation
/// - `Ok(None)` if the command executed successfully and did not lead to node creation
/// - `Err(_)` if an error occurred during command execution
#[doc(hidden)] // unstable
pub fn execute_command(self) -> Result<Option<Node>, failure::Error> {
let command = if let Some(args) = self.args {
Command::from_iter(args)
} else {
Command::from_args()
};
if let StandardResult::Run(run_config) = command.execute()? {
let genesis_config = Self::genesis_config(&run_config, self.builtin_instances);
let db_options = &run_config.node_config.private_config.database;
let database = RocksDB::open(run_config.db_path, db_options)?;
let node_config_path = run_config.node_config_path.to_string_lossy();
let config_manager = DefaultConfigManager::new(node_config_path.into_owned());
let rust_runtime = self.rust_runtime;
let node_config = run_config.node_config.into();
let node_keys = run_config.node_keys;
let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys)
.with_genesis_config(genesis_config)
.with_config_manager(config_manager)
.with_plugin(SystemApiPlugin)
.with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender()));
for runtime in self.external_runtimes {
node_builder = node_builder.with_runtime(runtime);
}
Ok(Some(node_builder.build()))
} else {
Ok(None)
}
}
/// Configures the node using parameters provided by user from stdin and then runs it.
pub fn run(mut self) -> Result<(), failure::Error> {
// Store temporary directory until the node is done.
let _temp_dir = self.temp_dir.take();
if let Some(node) = self.execute_command()? {
node.run()
} else {
Ok(())
}
}
fn genesis_config(
run_config: &NodeRunConfig,
default_instances: Vec<InstanceInitParams>,
) -> GenesisConfig {
let mut builder = GenesisConfigBuilder::with_consensus_config(
run_config.node_config.public_config.consensus.clone(),
);
// Add builtin services to genesis config.
builder = builder
.with_artifact(Supervisor.artifact_id())
.with_instance(Self::supervisor_service(&run_config))
.with_artifact(ExplorerFactory.artifact_id())
.with_instance(ExplorerFactory.default_instance());
// Add default instances.
for instance in default_instances {
builder = builder
.with_artifact(instance.instance_spec.artifact.clone())
.with_instance(instance)
}
builder.build()
}
fn | (run_config: &NodeRunConfig) -> InstanceInitParams {
let mode = run_config
.node_config
.public_config
.general
.supervisor_mode
.clone();
Supervisor::builtin_instance(SupervisorConfig { mode })
}
}
| supervisor_service | identifier_name |
main.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Example of a very simple runtime that can perform two types of transaction:
//! increment and reset counter in the service instance.
#![allow(clippy::unnecessary_wraps)]
use exonum::{
blockchain::{config::GenesisConfigBuilder, Blockchain, ConsensusConfig, ValidatorKeys},
helpers::Height,
keys::Keys,
merkledb::{BinaryValue, Snapshot, TemporaryDB},
runtime::{
migrations::{InitMigrationError, MigrationScript},
oneshot::Receiver,
versioning::Version,
AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail,
InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime,
SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID,
},
};
use exonum_derive::ExecutionFail;
use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle};
use exonum_rust_runtime::{spec::Deploy, RustRuntime};
use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface};
use futures::TryFutureExt;
use std::{cell::Cell, collections::BTreeMap, thread, time::Duration};
/// Service instance with a counter.
#[derive(Debug, Default)]
struct SampleService {
counter: Cell<u64>,
_name: String,
}
/// Sample runtime.
#[derive(Debug, Default)]
struct SampleRuntime {
deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>,
started_services: BTreeMap<InstanceId, SampleService>,
}
// Define runtime specific errors.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[derive(ExecutionFail)]
#[execution_fail(kind = "runtime")]
enum SampleRuntimeError {
/// Incorrect information to call transaction.
IncorrectCallInfo = 1,
/// Incorrect transaction payload.
IncorrectPayload = 2,
}
impl SampleRuntime {
/// Create a new service instance with the given specification.
fn start_service(
&self,
artifact: &ArtifactId,
instance: &InstanceDescriptor,
) -> Result<SampleService, ExecutionError> {
// Invariants guaranteed by the core.
assert!(self.deployed_artifacts.contains_key(artifact));
assert!(!self.started_services.contains_key(&instance.id));
Ok(SampleService {
_name: instance.name.to_owned(),
..SampleService::default()
})
}
/// In the present simplest case, the artifact is added into the deployed artifacts table.
fn deploy_artifact(
&mut self,
artifact: ArtifactId,
spec: Vec<u8>,
) -> Result<(), ExecutionError> {
// Invariant guaranteed by the core
assert!(!self.deployed_artifacts.contains_key(&artifact));
println!("Deploying artifact: {}", &artifact);
self.deployed_artifacts.insert(artifact, spec);
Ok(())
}
}
impl Runtime for SampleRuntime {
fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver |
fn is_artifact_deployed(&self, id: &ArtifactId) -> bool {
self.deployed_artifacts.contains_key(id)
}
/// Initiates adding a new service and sets the counter value for this.
fn initiate_adding_service(
&self,
context: ExecutionContext<'_>,
artifact: &ArtifactId,
params: Vec<u8>,
) -> Result<(), ExecutionError> {
let service_instance = self.start_service(artifact, context.instance())?;
let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?;
service_instance.counter.set(new_value);
println!(
"Initializing service {}: {} with value {}",
artifact,
context.instance(),
new_value
);
Ok(())
}
fn initiate_resuming_service(
&self,
_context: ExecutionContext<'_>,
_artifact: &ArtifactId,
_parameters: Vec<u8>,
) -> Result<(), ExecutionError> {
unreachable!("We don't resume services in this example.")
}
/// Commits status for the `SampleService` instance with the specified ID.
fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) {
let spec = &state.spec;
match state.status {
Some(InstanceStatus::Active) => {
// Unwrap here is safe, since by invocation of this method
// `exonum` guarantees that `initiate_adding_service` was invoked
// before and it returned `Ok(..)`.
let instance = self
.start_service(&spec.artifact, &spec.as_descriptor())
.unwrap();
println!("Starting service {}: {:?}", spec, instance);
self.started_services.insert(spec.id, instance);
}
Some(InstanceStatus::Stopped) => {
let instance = self.started_services.remove(&spec.id);
println!("Stopping service {}: {:?}", spec, instance);
}
_ => {
// We aren't interested in other possible statuses.
}
}
}
fn migrate(
&self,
_new_artifact: &ArtifactId,
_data_version: &Version,
) -> Result<Option<MigrationScript>, InitMigrationError> {
Err(InitMigrationError::NotSupported)
}
fn execute(
&self,
context: ExecutionContext<'_>,
method_id: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError> {
let service = self
.started_services
.get(&context.instance().id)
.ok_or(SampleRuntimeError::IncorrectCallInfo)?;
println!(
"Executing method {}#{} of service {}",
context.interface_name(),
method_id,
context.instance().id
);
const SERVICE_INTERFACE: &str = "";
match (context.interface_name(), method_id) {
// Increment counter.
(SERVICE_INTERFACE, 0) => {
let value = u64::from_bytes(payload.into())
.map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?;
let counter = service.counter.get();
println!("Updating counter value to {}", counter + value);
service.counter.set(value + counter);
Ok(())
}
// Reset counter.
(SERVICE_INTERFACE, 1) => {
if!payload.is_empty() {
Err(SampleRuntimeError::IncorrectPayload.into())
} else {
println!("Resetting counter");
service.counter.set(0);
Ok(())
}
}
// Unknown transaction.
(interface, method) => {
let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!(
"Incorrect information to call transaction. {}#{}",
interface, method
));
Err(err)
}
}
}
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {}
}
impl From<SampleRuntime> for (u32, Box<dyn Runtime>) {
fn from(inner: SampleRuntime) -> Self {
(SampleRuntime::ID, Box::new(inner))
}
}
impl WellKnownRuntime for SampleRuntime {
const ID: u32 = 255;
}
fn node_config() -> (NodeConfig, Keys) {
let keys = Keys::random();
let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())];
let consensus = ConsensusConfig::default().with_validator_keys(validator_keys);
let api_address = "0.0.0.0:8000".parse().unwrap();
let api_cfg = NodeApiConfig {
public_api_address: Some(api_address),
..Default::default()
};
let peer_address = "0.0.0.0:2000";
let node_config = NodeConfig {
listen_address: peer_address.parse().unwrap(),
consensus,
external_address: peer_address.to_owned(),
network: Default::default(),
connect_list: Default::default(),
api: api_cfg,
mempool: Default::default(),
thread_pool_size: Default::default(),
};
(node_config, keys)
}
async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) {
let service_keypair = blockchain.service_keypair();
let deploy_height = Height(50);
// Send an artifact `DeployRequest` to the sample runtime.
let artifact = "255:sample_artifact:0.1.0".parse().unwrap();
let request = DeployRequest::new(artifact, deploy_height);
let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
// Wait until the request is finished.
thread::sleep(Duration::from_secs(5));
// Send a `StartService` request to the sample runtime.
let instance_name = "instance";
let proposal = ConfigPropose::immediate(0).start_service(
"255:sample_artifact:0.1.0".parse().unwrap(),
instance_name,
10_u64,
);
let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal);
blockchain
.sender()
.broadcast_transaction(proposal)
.await
.unwrap();
// Wait until instance identifier is assigned.
thread::sleep(Duration::from_secs(1));
// Get an instance identifier.
let snapshot = blockchain.snapshot();
let state = snapshot
.for_dispatcher()
.get_instance(instance_name)
.unwrap();
assert_eq!(state.status.unwrap(), InstanceStatus::Active);
let instance_id = state.spec.id;
// Send an update counter transaction.
let tx = AnyTx::new(CallInfo::new(instance_id, 0), 1_000_u64.into_bytes());
let tx = tx.sign_with_keypair(service_keypair);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
thread::sleep(Duration::from_secs(2));
// Send a reset counter transaction.
let tx = AnyTx::new(CallInfo::new(instance_id, 1), vec![]);
let tx = tx.sign_with_keypair(service_keypair);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
thread::sleep(Duration::from_secs(2));
shutdown_handle.shutdown().await.unwrap();
}
#[tokio::main]
async fn main() {
exonum::helpers::init_logger().unwrap();
println!("Creating database in temporary dir...");
let db = TemporaryDB::new();
let (node_cfg, node_keys) = node_config();
let consensus_config = node_cfg.consensus.clone();
let mut genesis_config = GenesisConfigBuilder::with_consensus_config(consensus_config);
let mut rt = RustRuntime::builder();
Supervisor::simple().deploy(&mut genesis_config, &mut rt);
println!("Creating blockchain with additional runtime...");
let node = NodeBuilder::new(db, node_cfg, node_keys)
.with_genesis_config(genesis_config.build())
.with_runtime(SampleRuntime::default())
.with_runtime_fn(|channel| {
RustRuntime::builder()
.with_factory(Supervisor)
.build(channel.endpoints_sender())
})
.build();
let shutdown_handle = node.shutdown_handle();
println!("Starting a single node...");
println!("Blockchain is ready for transactions!");
let blockchain = node.blockchain().clone();
let node_task = node.run().unwrap_or_else(|e| panic!("{}", e));
let node_task = tokio::spawn(node_task);
examine_runtime(blockchain, shutdown_handle).await;
node_task.await.unwrap();
}
| {
Receiver::with_result(self.deploy_artifact(artifact, spec))
} | identifier_body |
main.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Example of a very simple runtime that can perform two types of transaction:
//! increment and reset counter in the service instance.
#![allow(clippy::unnecessary_wraps)]
use exonum::{
blockchain::{config::GenesisConfigBuilder, Blockchain, ConsensusConfig, ValidatorKeys},
helpers::Height,
keys::Keys,
merkledb::{BinaryValue, Snapshot, TemporaryDB},
runtime::{
migrations::{InitMigrationError, MigrationScript},
oneshot::Receiver,
versioning::Version,
AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail,
InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime,
SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID,
},
};
use exonum_derive::ExecutionFail;
use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle};
use exonum_rust_runtime::{spec::Deploy, RustRuntime};
use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface};
use futures::TryFutureExt;
use std::{cell::Cell, collections::BTreeMap, thread, time::Duration};
/// Service instance with a counter.
#[derive(Debug, Default)]
struct SampleService {
counter: Cell<u64>,
_name: String,
}
/// Sample runtime.
#[derive(Debug, Default)]
struct SampleRuntime {
deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>,
started_services: BTreeMap<InstanceId, SampleService>,
}
// Define runtime specific errors.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[derive(ExecutionFail)]
#[execution_fail(kind = "runtime")]
enum SampleRuntimeError {
/// Incorrect information to call transaction.
IncorrectCallInfo = 1,
/// Incorrect transaction payload.
IncorrectPayload = 2,
}
impl SampleRuntime {
/// Create a new service instance with the given specification.
fn start_service(
&self,
artifact: &ArtifactId,
instance: &InstanceDescriptor,
) -> Result<SampleService, ExecutionError> {
// Invariants guaranteed by the core.
assert!(self.deployed_artifacts.contains_key(artifact));
assert!(!self.started_services.contains_key(&instance.id));
Ok(SampleService {
_name: instance.name.to_owned(),
..SampleService::default()
})
}
/// In the present simplest case, the artifact is added into the deployed artifacts table.
fn deploy_artifact(
&mut self,
artifact: ArtifactId,
spec: Vec<u8>,
) -> Result<(), ExecutionError> {
// Invariant guaranteed by the core
assert!(!self.deployed_artifacts.contains_key(&artifact));
println!("Deploying artifact: {}", &artifact);
self.deployed_artifacts.insert(artifact, spec);
Ok(())
}
}
impl Runtime for SampleRuntime {
fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver {
Receiver::with_result(self.deploy_artifact(artifact, spec))
}
fn is_artifact_deployed(&self, id: &ArtifactId) -> bool {
self.deployed_artifacts.contains_key(id)
}
/// Initiates adding a new service and sets the counter value for this.
fn initiate_adding_service(
&self,
context: ExecutionContext<'_>,
artifact: &ArtifactId,
params: Vec<u8>,
) -> Result<(), ExecutionError> {
let service_instance = self.start_service(artifact, context.instance())?;
let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?;
service_instance.counter.set(new_value);
println!(
"Initializing service {}: {} with value {}",
artifact,
context.instance(),
new_value
);
Ok(())
}
fn initiate_resuming_service(
&self,
_context: ExecutionContext<'_>,
_artifact: &ArtifactId,
_parameters: Vec<u8>,
) -> Result<(), ExecutionError> {
unreachable!("We don't resume services in this example.")
}
/// Commits status for the `SampleService` instance with the specified ID.
fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) {
let spec = &state.spec;
match state.status {
Some(InstanceStatus::Active) => |
Some(InstanceStatus::Stopped) => {
let instance = self.started_services.remove(&spec.id);
println!("Stopping service {}: {:?}", spec, instance);
}
_ => {
// We aren't interested in other possible statuses.
}
}
}
fn migrate(
&self,
_new_artifact: &ArtifactId,
_data_version: &Version,
) -> Result<Option<MigrationScript>, InitMigrationError> {
Err(InitMigrationError::NotSupported)
}
fn execute(
&self,
context: ExecutionContext<'_>,
method_id: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError> {
let service = self
.started_services
.get(&context.instance().id)
.ok_or(SampleRuntimeError::IncorrectCallInfo)?;
println!(
"Executing method {}#{} of service {}",
context.interface_name(),
method_id,
context.instance().id
);
const SERVICE_INTERFACE: &str = "";
match (context.interface_name(), method_id) {
// Increment counter.
(SERVICE_INTERFACE, 0) => {
let value = u64::from_bytes(payload.into())
.map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?;
let counter = service.counter.get();
println!("Updating counter value to {}", counter + value);
service.counter.set(value + counter);
Ok(())
}
// Reset counter.
(SERVICE_INTERFACE, 1) => {
if!payload.is_empty() {
Err(SampleRuntimeError::IncorrectPayload.into())
} else {
println!("Resetting counter");
service.counter.set(0);
Ok(())
}
}
// Unknown transaction.
(interface, method) => {
let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!(
"Incorrect information to call transaction. {}#{}",
interface, method
));
Err(err)
}
}
}
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {}
}
impl From<SampleRuntime> for (u32, Box<dyn Runtime>) {
fn from(inner: SampleRuntime) -> Self {
(SampleRuntime::ID, Box::new(inner))
}
}
impl WellKnownRuntime for SampleRuntime {
const ID: u32 = 255;
}
fn node_config() -> (NodeConfig, Keys) {
let keys = Keys::random();
let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())];
let consensus = ConsensusConfig::default().with_validator_keys(validator_keys);
let api_address = "0.0.0.0:8000".parse().unwrap();
let api_cfg = NodeApiConfig {
public_api_address: Some(api_address),
..Default::default()
};
let peer_address = "0.0.0.0:2000";
let node_config = NodeConfig {
listen_address: peer_address.parse().unwrap(),
consensus,
external_address: peer_address.to_owned(),
network: Default::default(),
connect_list: Default::default(),
api: api_cfg,
mempool: Default::default(),
thread_pool_size: Default::default(),
};
(node_config, keys)
}
async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) {
let service_keypair = blockchain.service_keypair();
let deploy_height = Height(50);
// Send an artifact `DeployRequest` to the sample runtime.
let artifact = "255:sample_artifact:0.1.0".parse().unwrap();
let request = DeployRequest::new(artifact, deploy_height);
let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
// Wait until the request is finished.
thread::sleep(Duration::from_secs(5));
// Send a `StartService` request to the sample runtime.
let instance_name = "instance";
let proposal = ConfigPropose::immediate(0).start_service(
"255:sample_artifact:0.1.0".parse().unwrap(),
instance_name,
10_u64,
);
let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal);
blockchain
.sender()
.broadcast_transaction(proposal)
.await
.unwrap();
// Wait until instance identifier is assigned.
thread::sleep(Duration::from_secs(1));
// Get an instance identifier.
let snapshot = blockchain.snapshot();
let state = snapshot
.for_dispatcher()
.get_instance(instance_name)
.unwrap();
assert_eq!(state.status.unwrap(), InstanceStatus::Active);
let instance_id = state.spec.id;
// Send an update counter transaction.
let tx = AnyTx::new(CallInfo::new(instance_id, 0), 1_000_u64.into_bytes());
let tx = tx.sign_with_keypair(service_keypair);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
thread::sleep(Duration::from_secs(2));
// Send a reset counter transaction.
let tx = AnyTx::new(CallInfo::new(instance_id, 1), vec![]);
let tx = tx.sign_with_keypair(service_keypair);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
thread::sleep(Duration::from_secs(2));
shutdown_handle.shutdown().await.unwrap();
}
#[tokio::main]
async fn main() {
exonum::helpers::init_logger().unwrap();
println!("Creating database in temporary dir...");
let db = TemporaryDB::new();
let (node_cfg, node_keys) = node_config();
let consensus_config = node_cfg.consensus.clone();
let mut genesis_config = GenesisConfigBuilder::with_consensus_config(consensus_config);
let mut rt = RustRuntime::builder();
Supervisor::simple().deploy(&mut genesis_config, &mut rt);
println!("Creating blockchain with additional runtime...");
let node = NodeBuilder::new(db, node_cfg, node_keys)
.with_genesis_config(genesis_config.build())
.with_runtime(SampleRuntime::default())
.with_runtime_fn(|channel| {
RustRuntime::builder()
.with_factory(Supervisor)
.build(channel.endpoints_sender())
})
.build();
let shutdown_handle = node.shutdown_handle();
println!("Starting a single node...");
println!("Blockchain is ready for transactions!");
let blockchain = node.blockchain().clone();
let node_task = node.run().unwrap_or_else(|e| panic!("{}", e));
let node_task = tokio::spawn(node_task);
examine_runtime(blockchain, shutdown_handle).await;
node_task.await.unwrap();
}
| {
// Unwrap here is safe, since by invocation of this method
// `exonum` guarantees that `initiate_adding_service` was invoked
// before and it returned `Ok(..)`.
let instance = self
.start_service(&spec.artifact, &spec.as_descriptor())
.unwrap();
println!("Starting service {}: {:?}", spec, instance);
self.started_services.insert(spec.id, instance);
} | conditional_block |
main.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Example of a very simple runtime that can perform two types of transaction:
//! increment and reset counter in the service instance.
#![allow(clippy::unnecessary_wraps)]
use exonum::{
blockchain::{config::GenesisConfigBuilder, Blockchain, ConsensusConfig, ValidatorKeys},
helpers::Height,
keys::Keys,
merkledb::{BinaryValue, Snapshot, TemporaryDB},
runtime::{
migrations::{InitMigrationError, MigrationScript},
oneshot::Receiver,
versioning::Version,
AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail,
InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime,
SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID,
},
};
use exonum_derive::ExecutionFail;
use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle};
use exonum_rust_runtime::{spec::Deploy, RustRuntime};
use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface};
use futures::TryFutureExt;
use std::{cell::Cell, collections::BTreeMap, thread, time::Duration};
/// Service instance with a counter.
#[derive(Debug, Default)]
struct SampleService {
counter: Cell<u64>,
_name: String,
}
/// Sample runtime.
#[derive(Debug, Default)]
struct SampleRuntime {
deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>,
started_services: BTreeMap<InstanceId, SampleService>,
}
// Define runtime specific errors.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[derive(ExecutionFail)]
#[execution_fail(kind = "runtime")]
enum SampleRuntimeError {
/// Incorrect information to call transaction.
IncorrectCallInfo = 1,
/// Incorrect transaction payload.
IncorrectPayload = 2,
}
impl SampleRuntime {
/// Create a new service instance with the given specification.
fn start_service(
&self,
artifact: &ArtifactId,
instance: &InstanceDescriptor,
) -> Result<SampleService, ExecutionError> {
// Invariants guaranteed by the core.
assert!(self.deployed_artifacts.contains_key(artifact));
assert!(!self.started_services.contains_key(&instance.id));
Ok(SampleService { | })
}
/// In the present simplest case, the artifact is added into the deployed artifacts table.
fn deploy_artifact(
&mut self,
artifact: ArtifactId,
spec: Vec<u8>,
) -> Result<(), ExecutionError> {
// Invariant guaranteed by the core
assert!(!self.deployed_artifacts.contains_key(&artifact));
println!("Deploying artifact: {}", &artifact);
self.deployed_artifacts.insert(artifact, spec);
Ok(())
}
}
impl Runtime for SampleRuntime {
fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver {
Receiver::with_result(self.deploy_artifact(artifact, spec))
}
fn is_artifact_deployed(&self, id: &ArtifactId) -> bool {
self.deployed_artifacts.contains_key(id)
}
/// Initiates adding a new service and sets the counter value for this.
fn initiate_adding_service(
&self,
context: ExecutionContext<'_>,
artifact: &ArtifactId,
params: Vec<u8>,
) -> Result<(), ExecutionError> {
let service_instance = self.start_service(artifact, context.instance())?;
let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?;
service_instance.counter.set(new_value);
println!(
"Initializing service {}: {} with value {}",
artifact,
context.instance(),
new_value
);
Ok(())
}
fn initiate_resuming_service(
&self,
_context: ExecutionContext<'_>,
_artifact: &ArtifactId,
_parameters: Vec<u8>,
) -> Result<(), ExecutionError> {
unreachable!("We don't resume services in this example.")
}
/// Commits status for the `SampleService` instance with the specified ID.
fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) {
let spec = &state.spec;
match state.status {
Some(InstanceStatus::Active) => {
// Unwrap here is safe, since by invocation of this method
// `exonum` guarantees that `initiate_adding_service` was invoked
// before and it returned `Ok(..)`.
let instance = self
.start_service(&spec.artifact, &spec.as_descriptor())
.unwrap();
println!("Starting service {}: {:?}", spec, instance);
self.started_services.insert(spec.id, instance);
}
Some(InstanceStatus::Stopped) => {
let instance = self.started_services.remove(&spec.id);
println!("Stopping service {}: {:?}", spec, instance);
}
_ => {
// We aren't interested in other possible statuses.
}
}
}
fn migrate(
&self,
_new_artifact: &ArtifactId,
_data_version: &Version,
) -> Result<Option<MigrationScript>, InitMigrationError> {
Err(InitMigrationError::NotSupported)
}
fn execute(
&self,
context: ExecutionContext<'_>,
method_id: MethodId,
payload: &[u8],
) -> Result<(), ExecutionError> {
let service = self
.started_services
.get(&context.instance().id)
.ok_or(SampleRuntimeError::IncorrectCallInfo)?;
println!(
"Executing method {}#{} of service {}",
context.interface_name(),
method_id,
context.instance().id
);
const SERVICE_INTERFACE: &str = "";
match (context.interface_name(), method_id) {
// Increment counter.
(SERVICE_INTERFACE, 0) => {
let value = u64::from_bytes(payload.into())
.map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?;
let counter = service.counter.get();
println!("Updating counter value to {}", counter + value);
service.counter.set(value + counter);
Ok(())
}
// Reset counter.
(SERVICE_INTERFACE, 1) => {
if!payload.is_empty() {
Err(SampleRuntimeError::IncorrectPayload.into())
} else {
println!("Resetting counter");
service.counter.set(0);
Ok(())
}
}
// Unknown transaction.
(interface, method) => {
let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!(
"Incorrect information to call transaction. {}#{}",
interface, method
));
Err(err)
}
}
}
fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> {
Ok(())
}
fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {}
}
impl From<SampleRuntime> for (u32, Box<dyn Runtime>) {
fn from(inner: SampleRuntime) -> Self {
(SampleRuntime::ID, Box::new(inner))
}
}
impl WellKnownRuntime for SampleRuntime {
const ID: u32 = 255;
}
fn node_config() -> (NodeConfig, Keys) {
let keys = Keys::random();
let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())];
let consensus = ConsensusConfig::default().with_validator_keys(validator_keys);
let api_address = "0.0.0.0:8000".parse().unwrap();
let api_cfg = NodeApiConfig {
public_api_address: Some(api_address),
..Default::default()
};
let peer_address = "0.0.0.0:2000";
let node_config = NodeConfig {
listen_address: peer_address.parse().unwrap(),
consensus,
external_address: peer_address.to_owned(),
network: Default::default(),
connect_list: Default::default(),
api: api_cfg,
mempool: Default::default(),
thread_pool_size: Default::default(),
};
(node_config, keys)
}
async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) {
let service_keypair = blockchain.service_keypair();
let deploy_height = Height(50);
// Send an artifact `DeployRequest` to the sample runtime.
let artifact = "255:sample_artifact:0.1.0".parse().unwrap();
let request = DeployRequest::new(artifact, deploy_height);
let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
// Wait until the request is finished.
thread::sleep(Duration::from_secs(5));
// Send a `StartService` request to the sample runtime.
let instance_name = "instance";
let proposal = ConfigPropose::immediate(0).start_service(
"255:sample_artifact:0.1.0".parse().unwrap(),
instance_name,
10_u64,
);
let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal);
blockchain
.sender()
.broadcast_transaction(proposal)
.await
.unwrap();
// Wait until instance identifier is assigned.
thread::sleep(Duration::from_secs(1));
// Get an instance identifier.
let snapshot = blockchain.snapshot();
let state = snapshot
.for_dispatcher()
.get_instance(instance_name)
.unwrap();
assert_eq!(state.status.unwrap(), InstanceStatus::Active);
let instance_id = state.spec.id;
// Send an update counter transaction.
let tx = AnyTx::new(CallInfo::new(instance_id, 0), 1_000_u64.into_bytes());
let tx = tx.sign_with_keypair(service_keypair);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
thread::sleep(Duration::from_secs(2));
// Send a reset counter transaction.
let tx = AnyTx::new(CallInfo::new(instance_id, 1), vec![]);
let tx = tx.sign_with_keypair(service_keypair);
blockchain.sender().broadcast_transaction(tx).await.unwrap();
thread::sleep(Duration::from_secs(2));
shutdown_handle.shutdown().await.unwrap();
}
#[tokio::main]
async fn main() {
exonum::helpers::init_logger().unwrap();
println!("Creating database in temporary dir...");
let db = TemporaryDB::new();
let (node_cfg, node_keys) = node_config();
let consensus_config = node_cfg.consensus.clone();
let mut genesis_config = GenesisConfigBuilder::with_consensus_config(consensus_config);
let mut rt = RustRuntime::builder();
Supervisor::simple().deploy(&mut genesis_config, &mut rt);
println!("Creating blockchain with additional runtime...");
let node = NodeBuilder::new(db, node_cfg, node_keys)
.with_genesis_config(genesis_config.build())
.with_runtime(SampleRuntime::default())
.with_runtime_fn(|channel| {
RustRuntime::builder()
.with_factory(Supervisor)
.build(channel.endpoints_sender())
})
.build();
let shutdown_handle = node.shutdown_handle();
println!("Starting a single node...");
println!("Blockchain is ready for transactions!");
let blockchain = node.blockchain().clone();
let node_task = node.run().unwrap_or_else(|e| panic!("{}", e));
let node_task = tokio::spawn(node_task);
examine_runtime(blockchain, shutdown_handle).await;
node_task.await.unwrap();
} | _name: instance.name.to_owned(),
..SampleService::default() | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.