file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
universal.rs | use crate::{script, Tag, Face, GlyphInfo, Mask, Script};
use crate::buffer::{Buffer, BufferFlags};
use crate::ot::{feature, FeatureFlags};
use crate::plan::{ShapePlan, ShapePlanner};
use crate::unicode::{CharExt, GeneralCategoryExt};
use super::*;
use super::arabic::ArabicShapePlan;
pub const UNIVERSAL_SHAPER: ComplexShaper = ComplexShaper {
collect_features: Some(collect_features),
override_features: None,
create_data: Some(|plan| Box::new(UniversalShapePlan::new(plan))),
preprocess_text: Some(preprocess_text),
postprocess_glyphs: None,
normalization_mode: Some(ShapeNormalizationMode::ComposedDiacriticsNoShortCircuit),
decompose: None,
compose: Some(compose),
setup_masks: Some(setup_masks),
gpos_tag: None,
reorder_marks: None,
zero_width_marks: Some(ZeroWidthMarksMode::ByGdefEarly),
fallback_position: false,
};
pub type Category = u8;
pub mod category {
pub const O: u8 = 0; // OTHER
pub const B: u8 = 1; // BASE
pub const IND: u8 = 3; // BASE_IND
pub const N: u8 = 4; // BASE_NUM
pub const GB: u8 = 5; // BASE_OTHER
pub const CGJ: u8 = 6; // CGJ
// pub const F: u8 = 7; // CONS_FINAL
pub const FM: u8 = 8; // CONS_FINAL_MOD
// pub const M: u8 = 9; // CONS_MED
// pub const CM: u8 = 10; // CONS_MOD
pub const SUB: u8 = 11; // CONS_SUB
pub const H: u8 = 12; // HALANT
pub const HN: u8 = 13; // HALANT_NUM
pub const ZWNJ: u8 = 14; // Zero width non-joiner
pub const ZWJ: u8 = 15; // Zero width joiner
pub const WJ: u8 = 16; // Word joiner
// pub const RSV: u8 = 17; // Reserved characters
pub const R: u8 = 18; // REPHA
pub const S: u8 = 19; // SYM
// pub const SM: u8 = 20; // SYM_MOD
pub const VS: u8 = 21; // VARIATION_SELECTOR
// pub const V: u8 = 36; // VOWEL
// pub const VM: u8 = 40; // VOWEL_MOD
pub const CS: u8 = 43; // CONS_WITH_STACKER
// https://github.com/harfbuzz/harfbuzz/issues/1102
pub const HVM: u8 = 44; // HALANT_OR_VOWEL_MODIFIER
pub const SK: u8 = 48; // SAKOT
pub const FABV: u8 = 24; // CONS_FINAL_ABOVE
pub const FBLW: u8 = 25; // CONS_FINAL_BELOW
pub const FPST: u8 = 26; // CONS_FINAL_POST
pub const MABV: u8 = 27; // CONS_MED_ABOVE
pub const MBLW: u8 = 28; // CONS_MED_BELOW
pub const MPST: u8 = 29; // CONS_MED_POST
pub const MPRE: u8 = 30; // CONS_MED_PRE
pub const CMABV: u8 = 31; // CONS_MOD_ABOVE
pub const CMBLW: u8 = 32; // CONS_MOD_BELOW
pub const VABV: u8 = 33; // VOWEL_ABOVE / VOWEL_ABOVE_BELOW / VOWEL_ABOVE_BELOW_POST / VOWEL_ABOVE_POST
pub const VBLW: u8 = 34; // VOWEL_BELOW / VOWEL_BELOW_POST
pub const VPST: u8 = 35; // VOWEL_POST UIPC = Right
pub const VPRE: u8 = 22; // VOWEL_PRE / VOWEL_PRE_ABOVE / VOWEL_PRE_ABOVE_POST / VOWEL_PRE_POST
pub const VMABV: u8 = 37; // VOWEL_MOD_ABOVE
pub const VMBLW: u8 = 38; // VOWEL_MOD_BELOW
pub const VMPST: u8 = 39; // VOWEL_MOD_POST
pub const VMPRE: u8 = 23; // VOWEL_MOD_PRE
pub const SMABV: u8 = 41; // SYM_MOD_ABOVE
pub const SMBLW: u8 = 42; // SYM_MOD_BELOW
pub const FMABV: u8 = 45; // CONS_FINAL_MOD UIPC = Top
pub const FMBLW: u8 = 46; // CONS_FINAL_MOD UIPC = Bottom
pub const FMPST: u8 = 47; // CONS_FINAL_MOD UIPC = Not_Applicable
}
// These features are applied all at once, before reordering.
const BASIC_FEATURES: &[Tag] = &[
feature::RAKAR_FORMS,
feature::ABOVE_BASE_FORMS,
feature::BELOW_BASE_FORMS,
feature::HALF_FORMS,
feature::POST_BASE_FORMS,
feature::VATTU_VARIANTS,
feature::CONJUNCT_FORMS,
];
const TOPOGRAPHICAL_FEATURES: &[Tag] = &[
feature::ISOLATED_FORMS,
feature::INITIAL_FORMS,
feature::MEDIAL_FORMS_1,
feature::TERMINAL_FORMS_1,
];
// Same order as use_topographical_features.
#[derive(Clone, Copy, PartialEq)]
enum JoiningForm {
Isolated = 0,
Initial,
Medial,
Terminal,
}
// These features are applied all at once, after reordering and clearing syllables.
const OTHER_FEATURES: &[Tag] = &[
feature::ABOVE_BASE_SUBSTITUTIONS,
feature::BELOW_BASE_SUBSTITUTIONS,
feature::HALANT_FORMS,
feature::PRE_BASE_SUBSTITUTIONS,
feature::POST_BASE_SUBSTITUTIONS,
];
impl GlyphInfo {
pub(crate) fn use_category(&self) -> Category {
let v: &[u8; 4] = bytemuck::cast_ref(&self.var2);
v[2]
}
fn set_use_category(&mut self, c: Category) {
let v: &mut [u8; 4] = bytemuck::cast_mut(&mut self.var2);
v[2] = c;
}
fn is_halant_use(&self) -> bool {
matches!(self.use_category(), category::H | category::HVM) &&!self.is_ligated()
}
}
struct UniversalShapePlan {
rphf_mask: Mask,
arabic_plan: Option<ArabicShapePlan>,
}
impl UniversalShapePlan {
fn new(plan: &ShapePlan) -> UniversalShapePlan {
let mut arabic_plan = None;
if plan.script.map_or(false, has_arabic_joining) {
arabic_plan = Some(super::arabic::ArabicShapePlan::new(plan));
}
UniversalShapePlan {
rphf_mask: plan.ot_map.one_mask(feature::REPH_FORMS),
arabic_plan,
}
}
}
fn collect_features(planner: &mut ShapePlanner) {
// Do this before any lookups have been applied.
planner.ot_map.add_gsub_pause(Some(setup_syllables));
// Default glyph pre-processing group
planner.ot_map.enable_feature(feature::LOCALIZED_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::GLYPH_COMPOSITION_DECOMPOSITION, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::NUKTA_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::AKHANDS, FeatureFlags::MANUAL_ZWJ, 1);
// Reordering group
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.add_feature(feature::REPH_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_rphf));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.enable_feature(feature::PRE_BASE_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_pref));
// Orthographic unit shaping group
for feature in BASIC_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::MANUAL_ZWJ, 1);
}
planner.ot_map.add_gsub_pause(Some(reorder));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_syllables));
// Topographical features
for feature in TOPOGRAPHICAL_FEATURES {
planner.ot_map.add_feature(*feature, FeatureFlags::empty(), 1);
}
planner.ot_map.add_gsub_pause(None);
// Standard typographic presentation
for feature in OTHER_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::empty(), 1);
}
}
fn setup_syllables(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
super::universal_machine::find_syllables(buffer);
foreach_syllable!(buffer, start, end, {
buffer.unsafe_to_break(start, end);
});
setup_rphf_mask(plan, buffer);
setup_topographical_masks(plan, buffer);
}
fn setup_rphf_mask(plan: &ShapePlan, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let limit = if buffer.info[start].use_category() == category::R {
1
} else {
core::cmp::min(3, end - start)
};
for i in start..start+limit {
buffer.info[i].mask |= mask;
}
start = end;
end = buffer.next_syllable(start);
}
}
fn setup_topographical_masks(plan: &ShapePlan, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
let mut masks = [0; 4];
let mut all_masks = 0;
for i in 0..4 {
masks[i] = plan.ot_map.one_mask(TOPOGRAPHICAL_FEATURES[i]);
if masks[i] == plan.ot_map.global_mask() {
masks[i] = 0;
}
all_masks |= masks[i];
}
if all_masks == 0 {
return;
}
let other_masks =!all_masks;
let mut last_start = 0;
let mut last_form = None;
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let syllable = buffer.info[start].syllable() & 0x0F;
if syllable == SyllableType::IndependentCluster as u8 ||
syllable == SyllableType::SymbolCluster as u8 ||
syllable == SyllableType::NonCluster as u8
{
last_form = None;
} else {
let join = last_form == Some(JoiningForm::Terminal) || last_form == Some(JoiningForm::Isolated);
if join {
// Fixup previous syllable's form.
let form = if last_form == Some(JoiningForm::Terminal) {
JoiningForm::Medial
} else {
JoiningForm::Initial
};
for i in last_start..start {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
// Form for this syllable.
let form = if join { JoiningForm::Terminal } else { JoiningForm::Isolated };
last_form = Some(form);
for i in start..end {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
last_start = start;
start = end;
end = buffer.next_syllable(start);
}
}
fn record_rphf(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) | }
}
start = end;
end = buffer.next_syllable(start);
}
}
fn reorder(_: &ShapePlan, face: &Face, buffer: &mut Buffer) {
insert_dotted_circles(face, buffer);
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
reorder_syllable(start, end, buffer);
start = end;
end = buffer.next_syllable(start);
}
}
fn insert_dotted_circles(face: &Face, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
if buffer.flags.contains(BufferFlags::DO_NOT_INSERT_DOTTED_CIRCLE) {
return;
}
// Note: This loop is extra overhead, but should not be measurable.
// TODO Use a buffer scratch flag to remove the loop.
let has_broken_syllables = buffer.info_slice().iter()
.any(|info| info.syllable() & 0x0F == SyllableType::BrokenCluster as u8);
if!has_broken_syllables {
return;
}
let dottedcircle_glyph = match face.glyph_index(0x25CC) {
Some(g) => g.0 as u32,
None => return,
};
let mut dottedcircle = GlyphInfo {
glyph_id: dottedcircle_glyph,
..GlyphInfo::default()
};
dottedcircle.set_use_category(super::universal_table::get_category(0x25CC));
buffer.clear_output();
buffer.idx = 0;
let mut last_syllable = 0;
while buffer.idx < buffer.len {
let syllable = buffer.cur(0).syllable();
let syllable_type = syllable & 0x0F;
if last_syllable!= syllable && syllable_type == SyllableType::BrokenCluster as u8 {
last_syllable = syllable;
let mut ginfo = dottedcircle;
ginfo.cluster = buffer.cur(0).cluster;
ginfo.mask = buffer.cur(0).mask;
ginfo.set_syllable(buffer.cur(0).syllable());
// Insert dottedcircle after possible Repha.
while buffer.idx < buffer.len &&
last_syllable == buffer.cur(0).syllable() &&
buffer.cur(0).use_category() == category::R
{
buffer.next_glyph();
}
buffer.output_info(ginfo);
} else {
buffer.next_glyph();
}
}
buffer.swap_buffers();
}
const fn category_flag(c: Category) -> u32 {
rb_flag(c as u32)
}
const fn category_flag64(c: Category) -> u64 {
rb_flag64(c as u32)
}
const BASE_FLAGS: u64 =
category_flag64(category::FM) |
category_flag64(category::FABV) |
category_flag64(category::FBLW) |
category_flag64(category::FPST) |
category_flag64(category::MABV) |
category_flag64(category::MBLW) |
category_flag64(category::MPST) |
category_flag64(category::MPRE) |
category_flag64(category::VABV) |
category_flag64(category::VBLW) |
category_flag64(category::VPST) |
category_flag64(category::VPRE) |
category_flag64(category::VMABV) |
category_flag64(category::VMBLW) |
category_flag64(category::VMPST) |
category_flag64(category::VMPRE);
fn reorder_syllable(start: usize, end: usize, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
let syllable_type = (buffer.info[start].syllable() & 0x0F) as u32;
// Only a few syllable types need reordering.
if (rb_flag_unsafe(syllable_type) &
(rb_flag(SyllableType::ViramaTerminatedCluster as u32) |
rb_flag(SyllableType::SakotTerminatedCluster as u32) |
rb_flag(SyllableType::StandardCluster as u32) |
rb_flag(SyllableType::BrokenCluster as u32) |
0)) == 0
{
return;
}
// Move things forward.
if buffer.info[start].use_category() == category::R && end - start > 1 {
// Got a repha. Reorder it towards the end, but before the first post-base glyph.
for i in start+1..end {
let is_post_base_glyph =
(rb_flag64_unsafe(buffer.info[i].use_category() as u32) & BASE_FLAGS)!= 0 ||
buffer.info[i].is_halant_use();
if is_post_base_glyph || i == end - 1 {
// If we hit a post-base glyph, move before it; otherwise move to the
// end. Shift things in between backward.
let mut i = i;
if is_post_base_glyph {
i -= 1;
}
buffer.merge_clusters(start, i + 1);
let t = buffer.info[start];
for k in 0..i-start {
buffer.info[k + start] = buffer.info[k + start + 1];
}
buffer.info[i] = t;
break;
}
}
}
// Move things back.
let mut j = start;
for i in start..end {
let flag = rb_flag_unsafe(buffer.info[i].use_category() as u32);
if buffer.info[i].is_halant_use() {
// If we hit a halant, move after it; otherwise move to the beginning, and
// shift things in between forward.
j = i + 1;
} else if (flag & (category_flag(category::VPRE) | category_flag(category::VMPRE)))!= 0 &&
buffer.info[i].lig_comp() == 0 && j < i
{
// Only move the first component of a MultipleSubst.
buffer.merge_clusters(j, i + 1);
let t = buffer.info[i];
for k in (0..i-j).rev() {
buffer.info[k + j + 1] = buffer.info[k + j];
}
buffer.info[j] = t;
}
}
}
fn record_pref(_: &ShapePlan, _: &Face, buffer: &mut Buffer) {
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
// Mark a substituted pref as VPre, as they behave the same way.
for i in start..end {
if buffer.info[i].is_substituted() {
buffer.info[i].set_use_category(category::VPRE);
break;
}
}
start = end;
end = buffer.next_syllable(start);
}
}
fn has_arabic_joining(script: Script) -> bool {
// List of scripts that have data in arabic-table.
match script {
// Unicode-1.1 additions.
script::ARABIC |
// Unicode-3.0 additions.
script::MONGOLIAN |
script::SYRIAC |
// Unicode-5.0 additions.
script::NKO |
script::PHAGS_PA |
// Unicode-6.0 additions.
script::MANDAIC |
// Unicode-7.0 additions.
script::MANICHAEAN |
script::PSALTER_PAHLAVI |
// Unicode-9.0 additions.
script::ADLAM => true,
_ => false,
}
}
fn preprocess_text(_: &ShapePlan, _: &Face, buffer: &mut Buffer) {
super::vowel_constraints::preprocess_text_vowel_constraints(buffer);
}
fn compose(_: &ShapeNormalizeContext, a: char, b: char) -> Option<char> {
// Avoid recomposing split matras.
if a.general_category().is_mark() {
return None;
}
crate::unicode::compose(a, b)
}
fn setup_masks(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
// Do this before allocating use_category().
if let Some(ref arabic_plan) = universal_plan.arabic_plan {
super::arabic::setup_masks_inner(arabic_plan, plan.script, buffer);
}
// We cannot setup masks here. We save information about characters
// and setup masks later on in a pause-callback.
for info in buffer.info_slice_mut() {
info.set_use_category(super::universal_table::get_category(info.glyph_id));
}
}
| {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
// Mark a substituted repha as USE_R.
for i in start..end {
if buffer.info[i].mask & mask == 0 {
break;
}
if buffer.info[i].is_substituted() {
buffer.info[i].set_use_category(category::R);
break; | identifier_body |
universal.rs | use crate::{script, Tag, Face, GlyphInfo, Mask, Script};
use crate::buffer::{Buffer, BufferFlags};
use crate::ot::{feature, FeatureFlags};
use crate::plan::{ShapePlan, ShapePlanner};
use crate::unicode::{CharExt, GeneralCategoryExt};
use super::*;
use super::arabic::ArabicShapePlan;
pub const UNIVERSAL_SHAPER: ComplexShaper = ComplexShaper {
collect_features: Some(collect_features),
override_features: None,
create_data: Some(|plan| Box::new(UniversalShapePlan::new(plan))),
preprocess_text: Some(preprocess_text),
postprocess_glyphs: None,
normalization_mode: Some(ShapeNormalizationMode::ComposedDiacriticsNoShortCircuit),
decompose: None,
compose: Some(compose),
setup_masks: Some(setup_masks),
gpos_tag: None,
reorder_marks: None,
zero_width_marks: Some(ZeroWidthMarksMode::ByGdefEarly),
fallback_position: false,
};
pub type Category = u8;
pub mod category {
pub const O: u8 = 0; // OTHER
pub const B: u8 = 1; // BASE
pub const IND: u8 = 3; // BASE_IND
pub const N: u8 = 4; // BASE_NUM
pub const GB: u8 = 5; // BASE_OTHER
pub const CGJ: u8 = 6; // CGJ
// pub const F: u8 = 7; // CONS_FINAL
pub const FM: u8 = 8; // CONS_FINAL_MOD
// pub const M: u8 = 9; // CONS_MED
// pub const CM: u8 = 10; // CONS_MOD
pub const SUB: u8 = 11; // CONS_SUB
pub const H: u8 = 12; // HALANT
pub const HN: u8 = 13; // HALANT_NUM
pub const ZWNJ: u8 = 14; // Zero width non-joiner
pub const ZWJ: u8 = 15; // Zero width joiner
pub const WJ: u8 = 16; // Word joiner
// pub const RSV: u8 = 17; // Reserved characters
pub const R: u8 = 18; // REPHA
pub const S: u8 = 19; // SYM
// pub const SM: u8 = 20; // SYM_MOD
pub const VS: u8 = 21; // VARIATION_SELECTOR
// pub const V: u8 = 36; // VOWEL
// pub const VM: u8 = 40; // VOWEL_MOD
pub const CS: u8 = 43; // CONS_WITH_STACKER
// https://github.com/harfbuzz/harfbuzz/issues/1102
pub const HVM: u8 = 44; // HALANT_OR_VOWEL_MODIFIER
pub const SK: u8 = 48; // SAKOT
pub const FABV: u8 = 24; // CONS_FINAL_ABOVE
pub const FBLW: u8 = 25; // CONS_FINAL_BELOW
pub const FPST: u8 = 26; // CONS_FINAL_POST
pub const MABV: u8 = 27; // CONS_MED_ABOVE
pub const MBLW: u8 = 28; // CONS_MED_BELOW
pub const MPST: u8 = 29; // CONS_MED_POST
pub const MPRE: u8 = 30; // CONS_MED_PRE
pub const CMABV: u8 = 31; // CONS_MOD_ABOVE
pub const CMBLW: u8 = 32; // CONS_MOD_BELOW
pub const VABV: u8 = 33; // VOWEL_ABOVE / VOWEL_ABOVE_BELOW / VOWEL_ABOVE_BELOW_POST / VOWEL_ABOVE_POST
pub const VBLW: u8 = 34; // VOWEL_BELOW / VOWEL_BELOW_POST
pub const VPST: u8 = 35; // VOWEL_POST UIPC = Right
pub const VPRE: u8 = 22; // VOWEL_PRE / VOWEL_PRE_ABOVE / VOWEL_PRE_ABOVE_POST / VOWEL_PRE_POST
pub const VMABV: u8 = 37; // VOWEL_MOD_ABOVE
pub const VMBLW: u8 = 38; // VOWEL_MOD_BELOW
pub const VMPST: u8 = 39; // VOWEL_MOD_POST
pub const VMPRE: u8 = 23; // VOWEL_MOD_PRE
pub const SMABV: u8 = 41; // SYM_MOD_ABOVE
pub const SMBLW: u8 = 42; // SYM_MOD_BELOW
pub const FMABV: u8 = 45; // CONS_FINAL_MOD UIPC = Top
pub const FMBLW: u8 = 46; // CONS_FINAL_MOD UIPC = Bottom
pub const FMPST: u8 = 47; // CONS_FINAL_MOD UIPC = Not_Applicable
}
// These features are applied all at once, before reordering.
const BASIC_FEATURES: &[Tag] = &[
feature::RAKAR_FORMS,
feature::ABOVE_BASE_FORMS,
feature::BELOW_BASE_FORMS,
feature::HALF_FORMS,
feature::POST_BASE_FORMS,
feature::VATTU_VARIANTS,
feature::CONJUNCT_FORMS,
];
const TOPOGRAPHICAL_FEATURES: &[Tag] = &[
feature::ISOLATED_FORMS,
feature::INITIAL_FORMS,
feature::MEDIAL_FORMS_1,
feature::TERMINAL_FORMS_1,
];
// Same order as use_topographical_features.
#[derive(Clone, Copy, PartialEq)]
enum JoiningForm {
Isolated = 0,
Initial,
Medial,
Terminal,
}
// These features are applied all at once, after reordering and clearing syllables.
const OTHER_FEATURES: &[Tag] = &[
feature::ABOVE_BASE_SUBSTITUTIONS,
feature::BELOW_BASE_SUBSTITUTIONS,
feature::HALANT_FORMS,
feature::PRE_BASE_SUBSTITUTIONS,
feature::POST_BASE_SUBSTITUTIONS,
];
impl GlyphInfo {
pub(crate) fn use_category(&self) -> Category {
let v: &[u8; 4] = bytemuck::cast_ref(&self.var2);
v[2]
}
fn set_use_category(&mut self, c: Category) {
let v: &mut [u8; 4] = bytemuck::cast_mut(&mut self.var2);
v[2] = c;
}
fn is_halant_use(&self) -> bool {
matches!(self.use_category(), category::H | category::HVM) &&!self.is_ligated()
}
}
struct UniversalShapePlan {
rphf_mask: Mask,
arabic_plan: Option<ArabicShapePlan>,
}
impl UniversalShapePlan {
fn new(plan: &ShapePlan) -> UniversalShapePlan {
let mut arabic_plan = None;
if plan.script.map_or(false, has_arabic_joining) {
arabic_plan = Some(super::arabic::ArabicShapePlan::new(plan));
}
UniversalShapePlan {
rphf_mask: plan.ot_map.one_mask(feature::REPH_FORMS),
arabic_plan,
}
}
}
fn collect_features(planner: &mut ShapePlanner) {
// Do this before any lookups have been applied.
planner.ot_map.add_gsub_pause(Some(setup_syllables));
// Default glyph pre-processing group
planner.ot_map.enable_feature(feature::LOCALIZED_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::GLYPH_COMPOSITION_DECOMPOSITION, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::NUKTA_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::AKHANDS, FeatureFlags::MANUAL_ZWJ, 1);
// Reordering group
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.add_feature(feature::REPH_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_rphf));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.enable_feature(feature::PRE_BASE_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_pref));
// Orthographic unit shaping group
for feature in BASIC_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::MANUAL_ZWJ, 1);
}
planner.ot_map.add_gsub_pause(Some(reorder));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_syllables));
// Topographical features
for feature in TOPOGRAPHICAL_FEATURES {
planner.ot_map.add_feature(*feature, FeatureFlags::empty(), 1);
}
planner.ot_map.add_gsub_pause(None);
// Standard typographic presentation
for feature in OTHER_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::empty(), 1);
}
}
fn setup_syllables(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
super::universal_machine::find_syllables(buffer);
foreach_syllable!(buffer, start, end, {
buffer.unsafe_to_break(start, end);
});
setup_rphf_mask(plan, buffer);
setup_topographical_masks(plan, buffer);
}
fn setup_rphf_mask(plan: &ShapePlan, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let limit = if buffer.info[start].use_category() == category::R {
1
} else {
core::cmp::min(3, end - start)
};
for i in start..start+limit {
buffer.info[i].mask |= mask;
}
start = end;
end = buffer.next_syllable(start);
}
}
fn setup_topographical_masks(plan: &ShapePlan, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
let mut masks = [0; 4];
let mut all_masks = 0;
for i in 0..4 {
masks[i] = plan.ot_map.one_mask(TOPOGRAPHICAL_FEATURES[i]);
if masks[i] == plan.ot_map.global_mask() {
masks[i] = 0;
}
all_masks |= masks[i];
}
if all_masks == 0 {
return;
}
let other_masks =!all_masks;
let mut last_start = 0;
let mut last_form = None;
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let syllable = buffer.info[start].syllable() & 0x0F;
if syllable == SyllableType::IndependentCluster as u8 ||
syllable == SyllableType::SymbolCluster as u8 ||
syllable == SyllableType::NonCluster as u8
{
last_form = None;
} else {
let join = last_form == Some(JoiningForm::Terminal) || last_form == Some(JoiningForm::Isolated);
if join {
// Fixup previous syllable's form.
let form = if last_form == Some(JoiningForm::Terminal) {
JoiningForm::Medial
} else {
JoiningForm::Initial
};
for i in last_start..start {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
// Form for this syllable.
let form = if join { JoiningForm::Terminal } else { JoiningForm::Isolated };
last_form = Some(form);
for i in start..end {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
last_start = start;
start = end;
end = buffer.next_syllable(start);
}
}
fn record_rphf(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
// Mark a substituted repha as USE_R.
for i in start..end {
if buffer.info[i].mask & mask == 0 {
break;
}
if buffer.info[i].is_substituted() {
buffer.info[i].set_use_category(category::R);
break;
}
}
start = end;
end = buffer.next_syllable(start);
}
}
fn reorder(_: &ShapePlan, face: &Face, buffer: &mut Buffer) {
insert_dotted_circles(face, buffer);
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
reorder_syllable(start, end, buffer);
start = end;
end = buffer.next_syllable(start);
}
}
fn insert_dotted_circles(face: &Face, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
if buffer.flags.contains(BufferFlags::DO_NOT_INSERT_DOTTED_CIRCLE) {
return;
}
// Note: This loop is extra overhead, but should not be measurable.
// TODO Use a buffer scratch flag to remove the loop.
let has_broken_syllables = buffer.info_slice().iter()
.any(|info| info.syllable() & 0x0F == SyllableType::BrokenCluster as u8);
if!has_broken_syllables {
return;
}
let dottedcircle_glyph = match face.glyph_index(0x25CC) {
Some(g) => g.0 as u32,
None => return,
};
let mut dottedcircle = GlyphInfo {
glyph_id: dottedcircle_glyph,
..GlyphInfo::default()
};
dottedcircle.set_use_category(super::universal_table::get_category(0x25CC));
buffer.clear_output();
buffer.idx = 0;
let mut last_syllable = 0;
while buffer.idx < buffer.len {
let syllable = buffer.cur(0).syllable();
let syllable_type = syllable & 0x0F;
if last_syllable!= syllable && syllable_type == SyllableType::BrokenCluster as u8 {
last_syllable = syllable;
let mut ginfo = dottedcircle;
ginfo.cluster = buffer.cur(0).cluster;
ginfo.mask = buffer.cur(0).mask;
ginfo.set_syllable(buffer.cur(0).syllable());
// Insert dottedcircle after possible Repha.
while buffer.idx < buffer.len &&
last_syllable == buffer.cur(0).syllable() &&
buffer.cur(0).use_category() == category::R
{
buffer.next_glyph();
}
buffer.output_info(ginfo); | buffer.next_glyph();
}
}
buffer.swap_buffers();
}
const fn category_flag(c: Category) -> u32 {
rb_flag(c as u32)
}
const fn category_flag64(c: Category) -> u64 {
rb_flag64(c as u32)
}
const BASE_FLAGS: u64 =
category_flag64(category::FM) |
category_flag64(category::FABV) |
category_flag64(category::FBLW) |
category_flag64(category::FPST) |
category_flag64(category::MABV) |
category_flag64(category::MBLW) |
category_flag64(category::MPST) |
category_flag64(category::MPRE) |
category_flag64(category::VABV) |
category_flag64(category::VBLW) |
category_flag64(category::VPST) |
category_flag64(category::VPRE) |
category_flag64(category::VMABV) |
category_flag64(category::VMBLW) |
category_flag64(category::VMPST) |
category_flag64(category::VMPRE);
fn reorder_syllable(start: usize, end: usize, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
let syllable_type = (buffer.info[start].syllable() & 0x0F) as u32;
// Only a few syllable types need reordering.
if (rb_flag_unsafe(syllable_type) &
(rb_flag(SyllableType::ViramaTerminatedCluster as u32) |
rb_flag(SyllableType::SakotTerminatedCluster as u32) |
rb_flag(SyllableType::StandardCluster as u32) |
rb_flag(SyllableType::BrokenCluster as u32) |
0)) == 0
{
return;
}
// Move things forward.
if buffer.info[start].use_category() == category::R && end - start > 1 {
// Got a repha. Reorder it towards the end, but before the first post-base glyph.
for i in start+1..end {
let is_post_base_glyph =
(rb_flag64_unsafe(buffer.info[i].use_category() as u32) & BASE_FLAGS)!= 0 ||
buffer.info[i].is_halant_use();
if is_post_base_glyph || i == end - 1 {
// If we hit a post-base glyph, move before it; otherwise move to the
// end. Shift things in between backward.
let mut i = i;
if is_post_base_glyph {
i -= 1;
}
buffer.merge_clusters(start, i + 1);
let t = buffer.info[start];
for k in 0..i-start {
buffer.info[k + start] = buffer.info[k + start + 1];
}
buffer.info[i] = t;
break;
}
}
}
// Move things back.
let mut j = start;
for i in start..end {
let flag = rb_flag_unsafe(buffer.info[i].use_category() as u32);
if buffer.info[i].is_halant_use() {
// If we hit a halant, move after it; otherwise move to the beginning, and
// shift things in between forward.
j = i + 1;
} else if (flag & (category_flag(category::VPRE) | category_flag(category::VMPRE)))!= 0 &&
buffer.info[i].lig_comp() == 0 && j < i
{
// Only move the first component of a MultipleSubst.
buffer.merge_clusters(j, i + 1);
let t = buffer.info[i];
for k in (0..i-j).rev() {
buffer.info[k + j + 1] = buffer.info[k + j];
}
buffer.info[j] = t;
}
}
}
fn record_pref(_: &ShapePlan, _: &Face, buffer: &mut Buffer) {
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
// Mark a substituted pref as VPre, as they behave the same way.
for i in start..end {
if buffer.info[i].is_substituted() {
buffer.info[i].set_use_category(category::VPRE);
break;
}
}
start = end;
end = buffer.next_syllable(start);
}
}
fn has_arabic_joining(script: Script) -> bool {
// List of scripts that have data in arabic-table.
match script {
// Unicode-1.1 additions.
script::ARABIC |
// Unicode-3.0 additions.
script::MONGOLIAN |
script::SYRIAC |
// Unicode-5.0 additions.
script::NKO |
script::PHAGS_PA |
// Unicode-6.0 additions.
script::MANDAIC |
// Unicode-7.0 additions.
script::MANICHAEAN |
script::PSALTER_PAHLAVI |
// Unicode-9.0 additions.
script::ADLAM => true,
_ => false,
}
}
fn preprocess_text(_: &ShapePlan, _: &Face, buffer: &mut Buffer) {
super::vowel_constraints::preprocess_text_vowel_constraints(buffer);
}
fn compose(_: &ShapeNormalizeContext, a: char, b: char) -> Option<char> {
// Avoid recomposing split matras.
if a.general_category().is_mark() {
return None;
}
crate::unicode::compose(a, b)
}
fn setup_masks(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
// Do this before allocating use_category().
if let Some(ref arabic_plan) = universal_plan.arabic_plan {
super::arabic::setup_masks_inner(arabic_plan, plan.script, buffer);
}
// We cannot setup masks here. We save information about characters
// and setup masks later on in a pause-callback.
for info in buffer.info_slice_mut() {
info.set_use_category(super::universal_table::get_category(info.glyph_id));
}
} | } else { | random_line_split |
universal.rs | use crate::{script, Tag, Face, GlyphInfo, Mask, Script};
use crate::buffer::{Buffer, BufferFlags};
use crate::ot::{feature, FeatureFlags};
use crate::plan::{ShapePlan, ShapePlanner};
use crate::unicode::{CharExt, GeneralCategoryExt};
use super::*;
use super::arabic::ArabicShapePlan;
pub const UNIVERSAL_SHAPER: ComplexShaper = ComplexShaper {
collect_features: Some(collect_features),
override_features: None,
create_data: Some(|plan| Box::new(UniversalShapePlan::new(plan))),
preprocess_text: Some(preprocess_text),
postprocess_glyphs: None,
normalization_mode: Some(ShapeNormalizationMode::ComposedDiacriticsNoShortCircuit),
decompose: None,
compose: Some(compose),
setup_masks: Some(setup_masks),
gpos_tag: None,
reorder_marks: None,
zero_width_marks: Some(ZeroWidthMarksMode::ByGdefEarly),
fallback_position: false,
};
pub type Category = u8;
pub mod category {
pub const O: u8 = 0; // OTHER
pub const B: u8 = 1; // BASE
pub const IND: u8 = 3; // BASE_IND
pub const N: u8 = 4; // BASE_NUM
pub const GB: u8 = 5; // BASE_OTHER
pub const CGJ: u8 = 6; // CGJ
// pub const F: u8 = 7; // CONS_FINAL
pub const FM: u8 = 8; // CONS_FINAL_MOD
// pub const M: u8 = 9; // CONS_MED
// pub const CM: u8 = 10; // CONS_MOD
pub const SUB: u8 = 11; // CONS_SUB
pub const H: u8 = 12; // HALANT
pub const HN: u8 = 13; // HALANT_NUM
pub const ZWNJ: u8 = 14; // Zero width non-joiner
pub const ZWJ: u8 = 15; // Zero width joiner
pub const WJ: u8 = 16; // Word joiner
// pub const RSV: u8 = 17; // Reserved characters
pub const R: u8 = 18; // REPHA
pub const S: u8 = 19; // SYM
// pub const SM: u8 = 20; // SYM_MOD
pub const VS: u8 = 21; // VARIATION_SELECTOR
// pub const V: u8 = 36; // VOWEL
// pub const VM: u8 = 40; // VOWEL_MOD
pub const CS: u8 = 43; // CONS_WITH_STACKER
// https://github.com/harfbuzz/harfbuzz/issues/1102
pub const HVM: u8 = 44; // HALANT_OR_VOWEL_MODIFIER
pub const SK: u8 = 48; // SAKOT
pub const FABV: u8 = 24; // CONS_FINAL_ABOVE
pub const FBLW: u8 = 25; // CONS_FINAL_BELOW
pub const FPST: u8 = 26; // CONS_FINAL_POST
pub const MABV: u8 = 27; // CONS_MED_ABOVE
pub const MBLW: u8 = 28; // CONS_MED_BELOW
pub const MPST: u8 = 29; // CONS_MED_POST
pub const MPRE: u8 = 30; // CONS_MED_PRE
pub const CMABV: u8 = 31; // CONS_MOD_ABOVE
pub const CMBLW: u8 = 32; // CONS_MOD_BELOW
pub const VABV: u8 = 33; // VOWEL_ABOVE / VOWEL_ABOVE_BELOW / VOWEL_ABOVE_BELOW_POST / VOWEL_ABOVE_POST
pub const VBLW: u8 = 34; // VOWEL_BELOW / VOWEL_BELOW_POST
pub const VPST: u8 = 35; // VOWEL_POST UIPC = Right
pub const VPRE: u8 = 22; // VOWEL_PRE / VOWEL_PRE_ABOVE / VOWEL_PRE_ABOVE_POST / VOWEL_PRE_POST
pub const VMABV: u8 = 37; // VOWEL_MOD_ABOVE
pub const VMBLW: u8 = 38; // VOWEL_MOD_BELOW
pub const VMPST: u8 = 39; // VOWEL_MOD_POST
pub const VMPRE: u8 = 23; // VOWEL_MOD_PRE
pub const SMABV: u8 = 41; // SYM_MOD_ABOVE
pub const SMBLW: u8 = 42; // SYM_MOD_BELOW
pub const FMABV: u8 = 45; // CONS_FINAL_MOD UIPC = Top
pub const FMBLW: u8 = 46; // CONS_FINAL_MOD UIPC = Bottom
pub const FMPST: u8 = 47; // CONS_FINAL_MOD UIPC = Not_Applicable
}
// These features are applied all at once, before reordering.
const BASIC_FEATURES: &[Tag] = &[
feature::RAKAR_FORMS,
feature::ABOVE_BASE_FORMS,
feature::BELOW_BASE_FORMS,
feature::HALF_FORMS,
feature::POST_BASE_FORMS,
feature::VATTU_VARIANTS,
feature::CONJUNCT_FORMS,
];
const TOPOGRAPHICAL_FEATURES: &[Tag] = &[
feature::ISOLATED_FORMS,
feature::INITIAL_FORMS,
feature::MEDIAL_FORMS_1,
feature::TERMINAL_FORMS_1,
];
// Same order as use_topographical_features.
#[derive(Clone, Copy, PartialEq)]
enum JoiningForm {
Isolated = 0,
Initial,
Medial,
Terminal,
}
// These features are applied all at once, after reordering and clearing syllables.
const OTHER_FEATURES: &[Tag] = &[
feature::ABOVE_BASE_SUBSTITUTIONS,
feature::BELOW_BASE_SUBSTITUTIONS,
feature::HALANT_FORMS,
feature::PRE_BASE_SUBSTITUTIONS,
feature::POST_BASE_SUBSTITUTIONS,
];
impl GlyphInfo {
pub(crate) fn use_category(&self) -> Category {
let v: &[u8; 4] = bytemuck::cast_ref(&self.var2);
v[2]
}
fn set_use_category(&mut self, c: Category) {
let v: &mut [u8; 4] = bytemuck::cast_mut(&mut self.var2);
v[2] = c;
}
fn is_halant_use(&self) -> bool {
matches!(self.use_category(), category::H | category::HVM) &&!self.is_ligated()
}
}
struct UniversalShapePlan {
rphf_mask: Mask,
arabic_plan: Option<ArabicShapePlan>,
}
impl UniversalShapePlan {
fn new(plan: &ShapePlan) -> UniversalShapePlan {
let mut arabic_plan = None;
if plan.script.map_or(false, has_arabic_joining) {
arabic_plan = Some(super::arabic::ArabicShapePlan::new(plan));
}
UniversalShapePlan {
rphf_mask: plan.ot_map.one_mask(feature::REPH_FORMS),
arabic_plan,
}
}
}
fn collect_features(planner: &mut ShapePlanner) {
// Do this before any lookups have been applied.
planner.ot_map.add_gsub_pause(Some(setup_syllables));
// Default glyph pre-processing group
planner.ot_map.enable_feature(feature::LOCALIZED_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::GLYPH_COMPOSITION_DECOMPOSITION, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::NUKTA_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::AKHANDS, FeatureFlags::MANUAL_ZWJ, 1);
// Reordering group
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.add_feature(feature::REPH_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_rphf));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.enable_feature(feature::PRE_BASE_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_pref));
// Orthographic unit shaping group
for feature in BASIC_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::MANUAL_ZWJ, 1);
}
planner.ot_map.add_gsub_pause(Some(reorder));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_syllables));
// Topographical features
for feature in TOPOGRAPHICAL_FEATURES {
planner.ot_map.add_feature(*feature, FeatureFlags::empty(), 1);
}
planner.ot_map.add_gsub_pause(None);
// Standard typographic presentation
for feature in OTHER_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::empty(), 1);
}
}
fn setup_syllables(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
super::universal_machine::find_syllables(buffer);
foreach_syllable!(buffer, start, end, {
buffer.unsafe_to_break(start, end);
});
setup_rphf_mask(plan, buffer);
setup_topographical_masks(plan, buffer);
}
fn setup_rphf_mask(plan: &ShapePlan, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let limit = if buffer.info[start].use_category() == category::R {
1
} else {
core::cmp::min(3, end - start)
};
for i in start..start+limit {
buffer.info[i].mask |= mask;
}
start = end;
end = buffer.next_syllable(start);
}
}
fn setup_topographical_masks(plan: &ShapePlan, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
let mut masks = [0; 4];
let mut all_masks = 0;
for i in 0..4 {
masks[i] = plan.ot_map.one_mask(TOPOGRAPHICAL_FEATURES[i]);
if masks[i] == plan.ot_map.global_mask() {
masks[i] = 0;
}
all_masks |= masks[i];
}
if all_masks == 0 {
return;
}
let other_masks =!all_masks;
let mut last_start = 0;
let mut last_form = None;
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let syllable = buffer.info[start].syllable() & 0x0F;
if syllable == SyllableType::IndependentCluster as u8 ||
syllable == SyllableType::SymbolCluster as u8 ||
syllable == SyllableType::NonCluster as u8
{
last_form = None;
} else {
let join = last_form == Some(JoiningForm::Terminal) || last_form == Some(JoiningForm::Isolated);
if join {
// Fixup previous syllable's form.
let form = if last_form == Some(JoiningForm::Terminal) {
JoiningForm::Medial
} else {
JoiningForm::Initial
};
for i in last_start..start {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
// Form for this syllable.
let form = if join { JoiningForm::Terminal } else { JoiningForm::Isolated };
last_form = Some(form);
for i in start..end {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
last_start = start;
start = end;
end = buffer.next_syllable(start);
}
}
fn record_rphf(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
// Mark a substituted repha as USE_R.
for i in start..end {
if buffer.info[i].mask & mask == 0 {
break;
}
if buffer.info[i].is_substituted() {
buffer.info[i].set_use_category(category::R);
break;
}
}
start = end;
end = buffer.next_syllable(start);
}
}
fn reorder(_: &ShapePlan, face: &Face, buffer: &mut Buffer) {
insert_dotted_circles(face, buffer);
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
reorder_syllable(start, end, buffer);
start = end;
end = buffer.next_syllable(start);
}
}
fn | (face: &Face, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
if buffer.flags.contains(BufferFlags::DO_NOT_INSERT_DOTTED_CIRCLE) {
return;
}
// Note: This loop is extra overhead, but should not be measurable.
// TODO Use a buffer scratch flag to remove the loop.
let has_broken_syllables = buffer.info_slice().iter()
.any(|info| info.syllable() & 0x0F == SyllableType::BrokenCluster as u8);
if!has_broken_syllables {
return;
}
let dottedcircle_glyph = match face.glyph_index(0x25CC) {
Some(g) => g.0 as u32,
None => return,
};
let mut dottedcircle = GlyphInfo {
glyph_id: dottedcircle_glyph,
..GlyphInfo::default()
};
dottedcircle.set_use_category(super::universal_table::get_category(0x25CC));
buffer.clear_output();
buffer.idx = 0;
let mut last_syllable = 0;
while buffer.idx < buffer.len {
let syllable = buffer.cur(0).syllable();
let syllable_type = syllable & 0x0F;
if last_syllable!= syllable && syllable_type == SyllableType::BrokenCluster as u8 {
last_syllable = syllable;
let mut ginfo = dottedcircle;
ginfo.cluster = buffer.cur(0).cluster;
ginfo.mask = buffer.cur(0).mask;
ginfo.set_syllable(buffer.cur(0).syllable());
// Insert dottedcircle after possible Repha.
while buffer.idx < buffer.len &&
last_syllable == buffer.cur(0).syllable() &&
buffer.cur(0).use_category() == category::R
{
buffer.next_glyph();
}
buffer.output_info(ginfo);
} else {
buffer.next_glyph();
}
}
buffer.swap_buffers();
}
const fn category_flag(c: Category) -> u32 {
rb_flag(c as u32)
}
const fn category_flag64(c: Category) -> u64 {
rb_flag64(c as u32)
}
const BASE_FLAGS: u64 =
category_flag64(category::FM) |
category_flag64(category::FABV) |
category_flag64(category::FBLW) |
category_flag64(category::FPST) |
category_flag64(category::MABV) |
category_flag64(category::MBLW) |
category_flag64(category::MPST) |
category_flag64(category::MPRE) |
category_flag64(category::VABV) |
category_flag64(category::VBLW) |
category_flag64(category::VPST) |
category_flag64(category::VPRE) |
category_flag64(category::VMABV) |
category_flag64(category::VMBLW) |
category_flag64(category::VMPST) |
category_flag64(category::VMPRE);
fn reorder_syllable(start: usize, end: usize, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
let syllable_type = (buffer.info[start].syllable() & 0x0F) as u32;
// Only a few syllable types need reordering.
if (rb_flag_unsafe(syllable_type) &
(rb_flag(SyllableType::ViramaTerminatedCluster as u32) |
rb_flag(SyllableType::SakotTerminatedCluster as u32) |
rb_flag(SyllableType::StandardCluster as u32) |
rb_flag(SyllableType::BrokenCluster as u32) |
0)) == 0
{
return;
}
// Move things forward.
if buffer.info[start].use_category() == category::R && end - start > 1 {
// Got a repha. Reorder it towards the end, but before the first post-base glyph.
for i in start+1..end {
let is_post_base_glyph =
(rb_flag64_unsafe(buffer.info[i].use_category() as u32) & BASE_FLAGS)!= 0 ||
buffer.info[i].is_halant_use();
if is_post_base_glyph || i == end - 1 {
// If we hit a post-base glyph, move before it; otherwise move to the
// end. Shift things in between backward.
let mut i = i;
if is_post_base_glyph {
i -= 1;
}
buffer.merge_clusters(start, i + 1);
let t = buffer.info[start];
for k in 0..i-start {
buffer.info[k + start] = buffer.info[k + start + 1];
}
buffer.info[i] = t;
break;
}
}
}
// Move things back.
let mut j = start;
for i in start..end {
let flag = rb_flag_unsafe(buffer.info[i].use_category() as u32);
if buffer.info[i].is_halant_use() {
// If we hit a halant, move after it; otherwise move to the beginning, and
// shift things in between forward.
j = i + 1;
} else if (flag & (category_flag(category::VPRE) | category_flag(category::VMPRE)))!= 0 &&
buffer.info[i].lig_comp() == 0 && j < i
{
// Only move the first component of a MultipleSubst.
buffer.merge_clusters(j, i + 1);
let t = buffer.info[i];
for k in (0..i-j).rev() {
buffer.info[k + j + 1] = buffer.info[k + j];
}
buffer.info[j] = t;
}
}
}
fn record_pref(_: &ShapePlan, _: &Face, buffer: &mut Buffer) {
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
// Mark a substituted pref as VPre, as they behave the same way.
for i in start..end {
if buffer.info[i].is_substituted() {
buffer.info[i].set_use_category(category::VPRE);
break;
}
}
start = end;
end = buffer.next_syllable(start);
}
}
fn has_arabic_joining(script: Script) -> bool {
// List of scripts that have data in arabic-table.
match script {
// Unicode-1.1 additions.
script::ARABIC |
// Unicode-3.0 additions.
script::MONGOLIAN |
script::SYRIAC |
// Unicode-5.0 additions.
script::NKO |
script::PHAGS_PA |
// Unicode-6.0 additions.
script::MANDAIC |
// Unicode-7.0 additions.
script::MANICHAEAN |
script::PSALTER_PAHLAVI |
// Unicode-9.0 additions.
script::ADLAM => true,
_ => false,
}
}
fn preprocess_text(_: &ShapePlan, _: &Face, buffer: &mut Buffer) {
super::vowel_constraints::preprocess_text_vowel_constraints(buffer);
}
fn compose(_: &ShapeNormalizeContext, a: char, b: char) -> Option<char> {
// Avoid recomposing split matras.
if a.general_category().is_mark() {
return None;
}
crate::unicode::compose(a, b)
}
fn setup_masks(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
// Do this before allocating use_category().
if let Some(ref arabic_plan) = universal_plan.arabic_plan {
super::arabic::setup_masks_inner(arabic_plan, plan.script, buffer);
}
// We cannot setup masks here. We save information about characters
// and setup masks later on in a pause-callback.
for info in buffer.info_slice_mut() {
info.set_use_category(super::universal_table::get_category(info.glyph_id));
}
}
| insert_dotted_circles | identifier_name |
source.rs | //! Utils for extracting, inspecting or transforming source code
#![allow(clippy::module_name_repetitions)]
use crate::line_span;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::{LateContext, LintContext};
use rustc_span::hygiene;
use rustc_span::{BytePos, Pos, Span, SyntaxContext};
use std::borrow::Cow;
/// Like `snippet_block`, but add braces if the expr is not an `ExprKind::Block`.
/// Also takes an `Option<String>` which can be put inside the braces.
pub fn expr_block<'a, T: LintContext>(
cx: &T,
expr: &Expr<'_>,
option: Option<String>,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let code = snippet_block(cx, expr.span, default, indent_relative_to);
let string = option.unwrap_or_default();
if expr.span.from_expansion() {
Cow::Owned(format!("{{ {} }}", snippet_with_macro_callsite(cx, expr.span, default)))
} else if let ExprKind::Block(_, _) = expr.kind {
Cow::Owned(format!("{}{}", code, string))
} else if string.is_empty() {
Cow::Owned(format!("{{ {} }}", code))
} else {
Cow::Owned(format!("{{\n{};\n{}\n}}", code, string))
}
}
/// Returns a new Span that extends the original Span to the first non-whitespace char of the first
/// line.
///
/// ```rust,ignore
/// let x = ();
/// // ^^
/// // will be converted to
/// let x = ();
/// // ^^^^^^^^^^
/// ```
pub fn first_line_of_span<T: LintContext>(cx: &T, span: Span) -> Span {
first_char_in_first_line(cx, span).map_or(span, |first_char_pos| span.with_lo(first_char_pos))
}
fn first_char_in_first_line<T: LintContext>(cx: &T, span: Span) -> Option<BytePos> {
let line_span = line_span(cx, span);
snippet_opt(cx, line_span).and_then(|snip| {
snip.find(|c: char|!c.is_whitespace())
.map(|pos| line_span.lo() + BytePos::from_usize(pos))
})
}
/// Returns the indentation of the line of a span
///
/// ```rust,ignore
/// let x = ();
/// // ^^ -- will return 0
/// let x = ();
/// // ^^ -- will return 4
/// ```
pub fn indent_of<T: LintContext>(cx: &T, span: Span) -> Option<usize> {
snippet_opt(cx, line_span(cx, span)).and_then(|snip| snip.find(|c: char|!c.is_whitespace()))
}
/// Gets a snippet of the indentation of the line of a span
pub fn snippet_indent<T: LintContext>(cx: &T, span: Span) -> Option<String> {
snippet_opt(cx, line_span(cx, span)).map(|mut s| {
let len = s.len() - s.trim_start().len();
s.truncate(len);
s
})
}
// If the snippet is empty, it's an attribute that was inserted during macro
// expansion and we want to ignore those, because they could come from external
// sources that the user has no control over.
// For some reason these attributes don't have any expansion info on them, so
// we have to check it this way until there is a better way.
pub fn is_present_in_source<T: LintContext>(cx: &T, span: Span) -> bool {
if let Some(snippet) = snippet_opt(cx, span) {
if snippet.is_empty() {
return false;
}
}
true
}
/// Returns the positon just before rarrow
///
/// ```rust,ignore
/// fn into(self) -> () {}
/// ^
/// // in case of unformatted code
/// fn into2(self)-> () {}
/// ^
/// fn into3(self) -> () {}
/// ^
/// ```
pub fn position_before_rarrow(s: &str) -> Option<usize> {
s.rfind("->").map(|rpos| {
let mut rpos = rpos;
let chars: Vec<char> = s.chars().collect();
while rpos > 1 {
if let Some(c) = chars.get(rpos - 1) {
if c.is_whitespace() {
rpos -= 1;
continue;
}
}
break;
}
rpos
})
}
/// Reindent a multiline string with possibility of ignoring the first line.
#[allow(clippy::needless_pass_by_value)]
pub fn reindent_multiline(s: Cow<'_, str>, ignore_first: bool, indent: Option<usize>) -> Cow<'_, str> {
let s_space = reindent_multiline_inner(&s, ignore_first, indent,'');
let s_tab = reindent_multiline_inner(&s_space, ignore_first, indent, '\t');
reindent_multiline_inner(&s_tab, ignore_first, indent,'').into()
}
fn reindent_multiline_inner(s: &str, ignore_first: bool, indent: Option<usize>, ch: char) -> String {
let x = s
.lines()
.skip(usize::from(ignore_first))
.filter_map(|l| {
if l.is_empty() {
None
} else {
// ignore empty lines
Some(l.char_indices().find(|&(_, x)| x!= ch).unwrap_or((l.len(), ch)).0)
}
})
.min()
.unwrap_or(0);
let indent = indent.unwrap_or(0);
s.lines()
.enumerate()
.map(|(i, l)| {
if (ignore_first && i == 0) || l.is_empty() {
l.to_owned()
} else if x > indent | else {
" ".repeat(indent - x) + l
}
})
.collect::<Vec<String>>()
.join("\n")
}
/// Converts a span to a code snippet if available, otherwise returns the default.
///
/// This is useful if you want to provide suggestions for your lint or more generally, if you want
/// to convert a given `Span` to a `str`. To create suggestions consider using
/// [`snippet_with_applicability`] to ensure that the applicability stays correct.
///
/// # Example
/// ```rust,ignore
/// // Given two spans one for `value` and one for the `init` expression.
/// let value = Vec::new();
/// // ^^^^^ ^^^^^^^^^^
/// // span1 span2
///
/// // The snipped call would return the corresponding code snippet
/// snippet(cx, span1, "..") // -> "value"
/// snippet(cx, span2, "..") // -> "Vec::new()"
/// ```
pub fn snippet<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet_opt(cx, span).map_or_else(|| Cow::Borrowed(default), From::from)
}
/// Same as [`snippet`], but it adapts the applicability level by following rules:
///
/// - Applicability level `Unspecified` will never be changed.
/// - If the span is inside a macro, change the applicability level to `MaybeIncorrect`.
/// - If the default value is used and the applicability level is `MachineApplicable`, change it to
/// `HasPlaceholders`
pub fn snippet_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
applicability: &mut Applicability,
) -> Cow<'a, str> {
if *applicability!= Applicability::Unspecified && span.from_expansion() {
*applicability = Applicability::MaybeIncorrect;
}
snippet_opt(cx, span).map_or_else(
|| {
if *applicability == Applicability::MachineApplicable {
*applicability = Applicability::HasPlaceholders;
}
Cow::Borrowed(default)
},
From::from,
)
}
/// Same as `snippet`, but should only be used when it's clear that the input span is
/// not a macro argument.
pub fn snippet_with_macro_callsite<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet(cx, span.source_callsite(), default)
}
/// Converts a span to a code snippet. Returns `None` if not available.
pub fn snippet_opt<T: LintContext>(cx: &T, span: Span) -> Option<String> {
cx.sess().source_map().span_to_snippet(span).ok()
}
/// Converts a span (from a block) to a code snippet if available, otherwise use default.
///
/// This trims the code of indentation, except for the first line. Use it for blocks or block-like
/// things which need to be printed as such.
///
/// The `indent_relative_to` arg can be used, to provide a span, where the indentation of the
/// resulting snippet of the given span.
///
/// # Example
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", None)
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// }
/// ```
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", Some(if_expr.span))
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// } // aligned with `if`
/// ```
/// Note that the first line of the snippet always has 0 indentation.
pub fn snippet_block<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let snip = snippet(cx, span, default);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_block`, but adapts the applicability level by the rules of
/// `snippet_with_applicability`.
pub fn snippet_block_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
applicability: &mut Applicability,
) -> Cow<'a, str> {
let snip = snippet_with_applicability(cx, span, default, applicability);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_with_applicability`, but first walks the span up to the given context. This
/// will result in the macro call, rather then the expansion, if the span is from a child context.
/// If the span is not from a child context, it will be used directly instead.
///
/// e.g. Given the expression `&vec![]`, getting a snippet from the span for `vec![]` as a HIR node
/// would result in `box []`. If given the context of the address of expression, this function will
/// correctly get a snippet of `vec![]`.
///
/// This will also return whether or not the snippet is a macro call.
pub fn snippet_with_context(
cx: &LateContext<'_>,
span: Span,
outer: SyntaxContext,
default: &'a str,
applicability: &mut Applicability,
) -> (Cow<'a, str>, bool) {
let (span, is_macro_call) = walk_span_to_context(span, outer).map_or_else(
|| {
// The span is from a macro argument, and the outer context is the macro using the argument
if *applicability!= Applicability::Unspecified {
*applicability = Applicability::MaybeIncorrect;
}
// TODO: get the argument span.
(span, false)
},
|outer_span| (outer_span, span.ctxt()!= outer),
);
(
snippet_with_applicability(cx, span, default, applicability),
is_macro_call,
)
}
/// Walks the span up to the target context, thereby returning the macro call site if the span is
/// inside a macro expansion, or the original span if it is not. Note this will return `None` in the
/// case of the span being in a macro expansion, but the target context is from expanding a macro
/// argument.
///
/// Given the following
///
/// ```rust,ignore
/// macro_rules! m { ($e:expr) => { f($e) }; }
/// g(m!(0))
/// ```
///
/// If called with a span of the call to `f` and a context of the call to `g` this will return a
/// span containing `m!(0)`. However, if called with a span of the literal `0` this will give a span
/// containing `0` as the context is the same as the outer context.
///
/// This will traverse through multiple macro calls. Given the following:
///
/// ```rust,ignore
/// macro_rules! m { ($e:expr) => { n!($e, 0) }; }
/// macro_rules! n { ($e:expr, $f:expr) => { f($e, $f) }; }
/// g(m!(0))
/// ```
///
/// If called with a span of the call to `f` and a context of the call to `g` this will return a
/// span containing `m!(0)`.
pub fn walk_span_to_context(span: Span, outer: SyntaxContext) -> Option<Span> {
let outer_span = hygiene::walk_chain(span, outer);
(outer_span.ctxt() == outer).then(|| outer_span)
}
/// Removes block comments from the given `Vec` of lines.
///
/// # Examples
///
/// ```rust,ignore
/// without_block_comments(vec!["/*", "foo", "*/"]);
/// // => vec![]
///
/// without_block_comments(vec!["bar", "/*", "foo", "*/"]);
/// // => vec!["bar"]
/// ```
pub fn without_block_comments(lines: Vec<&str>) -> Vec<&str> {
let mut without = vec![];
let mut nest_level = 0;
for line in lines {
if line.contains("/*") {
nest_level += 1;
continue;
} else if line.contains("*/") {
nest_level -= 1;
continue;
}
if nest_level == 0 {
without.push(line);
}
}
without
}
#[cfg(test)]
mod test {
use super::{reindent_multiline, without_block_comments};
#[test]
fn test_reindent_multiline_single_line() {
assert_eq!("", reindent_multiline("".into(), false, None));
assert_eq!("...", reindent_multiline("...".into(), false, None));
assert_eq!("...", reindent_multiline(" ...".into(), false, None));
assert_eq!("...", reindent_multiline("\t...".into(), false, None));
assert_eq!("...", reindent_multiline("\t\t...".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_block() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
assert_eq!("\
if x {
\ty
} else {
\tz
}", reindent_multiline(" if x {
\ty
} else {
\tz
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_empty_line() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_lines_deeper() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline("\
if x {
y
} else {
z
}".into(), true, Some(8)));
}
#[test]
fn test_without_block_comments_lines_without_block_comments() {
let result = without_block_comments(vec!["/*", "", "*/"]);
println!("result: {:?}", result);
assert!(result.is_empty());
let result = without_block_comments(vec!["", "/*", "", "*/", "#[crate_type = \"lib\"]", "/*", "", "*/", ""]);
assert_eq!(result, vec!["", "#[crate_type = \"lib\"]", ""]);
let result = without_block_comments(vec!["/* rust", "", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* one-line comment */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested", "/* multi-line", "comment", "*/", "test", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested /* inline /* comment */ test */ */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["foo", "bar", "baz"]);
assert_eq!(result, vec!["foo", "bar", "baz"]);
}
}
| {
l.split_at(x - indent).1.to_owned()
} | conditional_block |
source.rs | //! Utils for extracting, inspecting or transforming source code
#![allow(clippy::module_name_repetitions)]
use crate::line_span;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::{LateContext, LintContext};
use rustc_span::hygiene;
use rustc_span::{BytePos, Pos, Span, SyntaxContext};
use std::borrow::Cow;
/// Like `snippet_block`, but add braces if the expr is not an `ExprKind::Block`.
/// Also takes an `Option<String>` which can be put inside the braces.
pub fn expr_block<'a, T: LintContext>(
cx: &T,
expr: &Expr<'_>,
option: Option<String>,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let code = snippet_block(cx, expr.span, default, indent_relative_to);
let string = option.unwrap_or_default();
if expr.span.from_expansion() {
Cow::Owned(format!("{{ {} }}", snippet_with_macro_callsite(cx, expr.span, default)))
} else if let ExprKind::Block(_, _) = expr.kind {
Cow::Owned(format!("{}{}", code, string))
} else if string.is_empty() {
Cow::Owned(format!("{{ {} }}", code))
} else {
Cow::Owned(format!("{{\n{};\n{}\n}}", code, string))
}
}
/// Returns a new Span that extends the original Span to the first non-whitespace char of the first
/// line.
///
/// ```rust,ignore
/// let x = ();
/// // ^^
/// // will be converted to
/// let x = ();
/// // ^^^^^^^^^^
/// ```
pub fn first_line_of_span<T: LintContext>(cx: &T, span: Span) -> Span {
first_char_in_first_line(cx, span).map_or(span, |first_char_pos| span.with_lo(first_char_pos))
}
fn first_char_in_first_line<T: LintContext>(cx: &T, span: Span) -> Option<BytePos> {
let line_span = line_span(cx, span);
snippet_opt(cx, line_span).and_then(|snip| {
snip.find(|c: char|!c.is_whitespace())
.map(|pos| line_span.lo() + BytePos::from_usize(pos))
})
}
/// Returns the indentation of the line of a span
///
/// ```rust,ignore
/// let x = ();
/// // ^^ -- will return 0
/// let x = ();
/// // ^^ -- will return 4
/// ```
pub fn indent_of<T: LintContext>(cx: &T, span: Span) -> Option<usize> {
snippet_opt(cx, line_span(cx, span)).and_then(|snip| snip.find(|c: char|!c.is_whitespace()))
}
/// Gets a snippet of the indentation of the line of a span
pub fn snippet_indent<T: LintContext>(cx: &T, span: Span) -> Option<String> {
snippet_opt(cx, line_span(cx, span)).map(|mut s| {
let len = s.len() - s.trim_start().len();
s.truncate(len);
s
})
}
// If the snippet is empty, it's an attribute that was inserted during macro
// expansion and we want to ignore those, because they could come from external
// sources that the user has no control over.
// For some reason these attributes don't have any expansion info on them, so
// we have to check it this way until there is a better way.
pub fn is_present_in_source<T: LintContext>(cx: &T, span: Span) -> bool {
if let Some(snippet) = snippet_opt(cx, span) {
if snippet.is_empty() {
return false;
}
}
true
}
/// Returns the positon just before rarrow
///
/// ```rust,ignore
/// fn into(self) -> () {}
/// ^
/// // in case of unformatted code
/// fn into2(self)-> () {}
/// ^
/// fn into3(self) -> () {}
/// ^
/// ```
pub fn position_before_rarrow(s: &str) -> Option<usize> {
s.rfind("->").map(|rpos| {
let mut rpos = rpos;
let chars: Vec<char> = s.chars().collect();
while rpos > 1 {
if let Some(c) = chars.get(rpos - 1) {
if c.is_whitespace() {
rpos -= 1;
continue;
}
}
break;
}
rpos
})
}
/// Reindent a multiline string with possibility of ignoring the first line.
#[allow(clippy::needless_pass_by_value)]
pub fn reindent_multiline(s: Cow<'_, str>, ignore_first: bool, indent: Option<usize>) -> Cow<'_, str> {
let s_space = reindent_multiline_inner(&s, ignore_first, indent,'');
let s_tab = reindent_multiline_inner(&s_space, ignore_first, indent, '\t');
reindent_multiline_inner(&s_tab, ignore_first, indent,'').into()
}
fn reindent_multiline_inner(s: &str, ignore_first: bool, indent: Option<usize>, ch: char) -> String {
let x = s
.lines()
.skip(usize::from(ignore_first))
.filter_map(|l| {
if l.is_empty() {
None
} else {
// ignore empty lines
Some(l.char_indices().find(|&(_, x)| x!= ch).unwrap_or((l.len(), ch)).0)
}
})
.min()
.unwrap_or(0);
let indent = indent.unwrap_or(0);
s.lines()
.enumerate()
.map(|(i, l)| {
if (ignore_first && i == 0) || l.is_empty() {
l.to_owned()
} else if x > indent {
l.split_at(x - indent).1.to_owned()
} else {
" ".repeat(indent - x) + l
}
})
.collect::<Vec<String>>()
.join("\n") | /// Converts a span to a code snippet if available, otherwise returns the default.
///
/// This is useful if you want to provide suggestions for your lint or more generally, if you want
/// to convert a given `Span` to a `str`. To create suggestions consider using
/// [`snippet_with_applicability`] to ensure that the applicability stays correct.
///
/// # Example
/// ```rust,ignore
/// // Given two spans one for `value` and one for the `init` expression.
/// let value = Vec::new();
/// // ^^^^^ ^^^^^^^^^^
/// // span1 span2
///
/// // The snipped call would return the corresponding code snippet
/// snippet(cx, span1, "..") // -> "value"
/// snippet(cx, span2, "..") // -> "Vec::new()"
/// ```
pub fn snippet<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet_opt(cx, span).map_or_else(|| Cow::Borrowed(default), From::from)
}
/// Same as [`snippet`], but it adapts the applicability level by following rules:
///
/// - Applicability level `Unspecified` will never be changed.
/// - If the span is inside a macro, change the applicability level to `MaybeIncorrect`.
/// - If the default value is used and the applicability level is `MachineApplicable`, change it to
/// `HasPlaceholders`
pub fn snippet_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
applicability: &mut Applicability,
) -> Cow<'a, str> {
if *applicability!= Applicability::Unspecified && span.from_expansion() {
*applicability = Applicability::MaybeIncorrect;
}
snippet_opt(cx, span).map_or_else(
|| {
if *applicability == Applicability::MachineApplicable {
*applicability = Applicability::HasPlaceholders;
}
Cow::Borrowed(default)
},
From::from,
)
}
/// Same as `snippet`, but should only be used when it's clear that the input span is
/// not a macro argument.
pub fn snippet_with_macro_callsite<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet(cx, span.source_callsite(), default)
}
/// Converts a span to a code snippet. Returns `None` if not available.
pub fn snippet_opt<T: LintContext>(cx: &T, span: Span) -> Option<String> {
cx.sess().source_map().span_to_snippet(span).ok()
}
/// Converts a span (from a block) to a code snippet if available, otherwise use default.
///
/// This trims the code of indentation, except for the first line. Use it for blocks or block-like
/// things which need to be printed as such.
///
/// The `indent_relative_to` arg can be used, to provide a span, where the indentation of the
/// resulting snippet of the given span.
///
/// # Example
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", None)
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// }
/// ```
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", Some(if_expr.span))
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// } // aligned with `if`
/// ```
/// Note that the first line of the snippet always has 0 indentation.
pub fn snippet_block<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let snip = snippet(cx, span, default);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_block`, but adapts the applicability level by the rules of
/// `snippet_with_applicability`.
pub fn snippet_block_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
applicability: &mut Applicability,
) -> Cow<'a, str> {
let snip = snippet_with_applicability(cx, span, default, applicability);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_with_applicability`, but first walks the span up to the given context. This
/// will result in the macro call, rather then the expansion, if the span is from a child context.
/// If the span is not from a child context, it will be used directly instead.
///
/// e.g. Given the expression `&vec![]`, getting a snippet from the span for `vec![]` as a HIR node
/// would result in `box []`. If given the context of the address of expression, this function will
/// correctly get a snippet of `vec![]`.
///
/// This will also return whether or not the snippet is a macro call.
pub fn snippet_with_context(
cx: &LateContext<'_>,
span: Span,
outer: SyntaxContext,
default: &'a str,
applicability: &mut Applicability,
) -> (Cow<'a, str>, bool) {
let (span, is_macro_call) = walk_span_to_context(span, outer).map_or_else(
|| {
// The span is from a macro argument, and the outer context is the macro using the argument
if *applicability!= Applicability::Unspecified {
*applicability = Applicability::MaybeIncorrect;
}
// TODO: get the argument span.
(span, false)
},
|outer_span| (outer_span, span.ctxt()!= outer),
);
(
snippet_with_applicability(cx, span, default, applicability),
is_macro_call,
)
}
/// Walks the span up to the target context, thereby returning the macro call site if the span is
/// inside a macro expansion, or the original span if it is not. Note this will return `None` in the
/// case of the span being in a macro expansion, but the target context is from expanding a macro
/// argument.
///
/// Given the following
///
/// ```rust,ignore
/// macro_rules! m { ($e:expr) => { f($e) }; }
/// g(m!(0))
/// ```
///
/// If called with a span of the call to `f` and a context of the call to `g` this will return a
/// span containing `m!(0)`. However, if called with a span of the literal `0` this will give a span
/// containing `0` as the context is the same as the outer context.
///
/// This will traverse through multiple macro calls. Given the following:
///
/// ```rust,ignore
/// macro_rules! m { ($e:expr) => { n!($e, 0) }; }
/// macro_rules! n { ($e:expr, $f:expr) => { f($e, $f) }; }
/// g(m!(0))
/// ```
///
/// If called with a span of the call to `f` and a context of the call to `g` this will return a
/// span containing `m!(0)`.
pub fn walk_span_to_context(span: Span, outer: SyntaxContext) -> Option<Span> {
let outer_span = hygiene::walk_chain(span, outer);
(outer_span.ctxt() == outer).then(|| outer_span)
}
/// Removes block comments from the given `Vec` of lines.
///
/// # Examples
///
/// ```rust,ignore
/// without_block_comments(vec!["/*", "foo", "*/"]);
/// // => vec![]
///
/// without_block_comments(vec!["bar", "/*", "foo", "*/"]);
/// // => vec!["bar"]
/// ```
pub fn without_block_comments(lines: Vec<&str>) -> Vec<&str> {
let mut without = vec![];
let mut nest_level = 0;
for line in lines {
if line.contains("/*") {
nest_level += 1;
continue;
} else if line.contains("*/") {
nest_level -= 1;
continue;
}
if nest_level == 0 {
without.push(line);
}
}
without
}
#[cfg(test)]
mod test {
use super::{reindent_multiline, without_block_comments};
#[test]
fn test_reindent_multiline_single_line() {
assert_eq!("", reindent_multiline("".into(), false, None));
assert_eq!("...", reindent_multiline("...".into(), false, None));
assert_eq!("...", reindent_multiline(" ...".into(), false, None));
assert_eq!("...", reindent_multiline("\t...".into(), false, None));
assert_eq!("...", reindent_multiline("\t\t...".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_block() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
assert_eq!("\
if x {
\ty
} else {
\tz
}", reindent_multiline(" if x {
\ty
} else {
\tz
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_empty_line() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_lines_deeper() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline("\
if x {
y
} else {
z
}".into(), true, Some(8)));
}
#[test]
fn test_without_block_comments_lines_without_block_comments() {
let result = without_block_comments(vec!["/*", "", "*/"]);
println!("result: {:?}", result);
assert!(result.is_empty());
let result = without_block_comments(vec!["", "/*", "", "*/", "#[crate_type = \"lib\"]", "/*", "", "*/", ""]);
assert_eq!(result, vec!["", "#[crate_type = \"lib\"]", ""]);
let result = without_block_comments(vec!["/* rust", "", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* one-line comment */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested", "/* multi-line", "comment", "*/", "test", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested /* inline /* comment */ test */ */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["foo", "bar", "baz"]);
assert_eq!(result, vec!["foo", "bar", "baz"]);
}
} | }
| random_line_split |
source.rs | //! Utils for extracting, inspecting or transforming source code
#![allow(clippy::module_name_repetitions)]
use crate::line_span;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::{LateContext, LintContext};
use rustc_span::hygiene;
use rustc_span::{BytePos, Pos, Span, SyntaxContext};
use std::borrow::Cow;
/// Like `snippet_block`, but add braces if the expr is not an `ExprKind::Block`.
/// Also takes an `Option<String>` which can be put inside the braces.
pub fn | <'a, T: LintContext>(
cx: &T,
expr: &Expr<'_>,
option: Option<String>,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let code = snippet_block(cx, expr.span, default, indent_relative_to);
let string = option.unwrap_or_default();
if expr.span.from_expansion() {
Cow::Owned(format!("{{ {} }}", snippet_with_macro_callsite(cx, expr.span, default)))
} else if let ExprKind::Block(_, _) = expr.kind {
Cow::Owned(format!("{}{}", code, string))
} else if string.is_empty() {
Cow::Owned(format!("{{ {} }}", code))
} else {
Cow::Owned(format!("{{\n{};\n{}\n}}", code, string))
}
}
/// Returns a new Span that extends the original Span to the first non-whitespace char of the first
/// line.
///
/// ```rust,ignore
/// let x = ();
/// // ^^
/// // will be converted to
/// let x = ();
/// // ^^^^^^^^^^
/// ```
pub fn first_line_of_span<T: LintContext>(cx: &T, span: Span) -> Span {
first_char_in_first_line(cx, span).map_or(span, |first_char_pos| span.with_lo(first_char_pos))
}
fn first_char_in_first_line<T: LintContext>(cx: &T, span: Span) -> Option<BytePos> {
let line_span = line_span(cx, span);
snippet_opt(cx, line_span).and_then(|snip| {
snip.find(|c: char|!c.is_whitespace())
.map(|pos| line_span.lo() + BytePos::from_usize(pos))
})
}
/// Returns the indentation of the line of a span
///
/// ```rust,ignore
/// let x = ();
/// // ^^ -- will return 0
/// let x = ();
/// // ^^ -- will return 4
/// ```
pub fn indent_of<T: LintContext>(cx: &T, span: Span) -> Option<usize> {
snippet_opt(cx, line_span(cx, span)).and_then(|snip| snip.find(|c: char|!c.is_whitespace()))
}
/// Gets a snippet of the indentation of the line of a span
pub fn snippet_indent<T: LintContext>(cx: &T, span: Span) -> Option<String> {
snippet_opt(cx, line_span(cx, span)).map(|mut s| {
let len = s.len() - s.trim_start().len();
s.truncate(len);
s
})
}
// If the snippet is empty, it's an attribute that was inserted during macro
// expansion and we want to ignore those, because they could come from external
// sources that the user has no control over.
// For some reason these attributes don't have any expansion info on them, so
// we have to check it this way until there is a better way.
pub fn is_present_in_source<T: LintContext>(cx: &T, span: Span) -> bool {
if let Some(snippet) = snippet_opt(cx, span) {
if snippet.is_empty() {
return false;
}
}
true
}
/// Returns the positon just before rarrow
///
/// ```rust,ignore
/// fn into(self) -> () {}
/// ^
/// // in case of unformatted code
/// fn into2(self)-> () {}
/// ^
/// fn into3(self) -> () {}
/// ^
/// ```
pub fn position_before_rarrow(s: &str) -> Option<usize> {
s.rfind("->").map(|rpos| {
let mut rpos = rpos;
let chars: Vec<char> = s.chars().collect();
while rpos > 1 {
if let Some(c) = chars.get(rpos - 1) {
if c.is_whitespace() {
rpos -= 1;
continue;
}
}
break;
}
rpos
})
}
/// Reindent a multiline string with possibility of ignoring the first line.
#[allow(clippy::needless_pass_by_value)]
pub fn reindent_multiline(s: Cow<'_, str>, ignore_first: bool, indent: Option<usize>) -> Cow<'_, str> {
let s_space = reindent_multiline_inner(&s, ignore_first, indent,'');
let s_tab = reindent_multiline_inner(&s_space, ignore_first, indent, '\t');
reindent_multiline_inner(&s_tab, ignore_first, indent,'').into()
}
fn reindent_multiline_inner(s: &str, ignore_first: bool, indent: Option<usize>, ch: char) -> String {
let x = s
.lines()
.skip(usize::from(ignore_first))
.filter_map(|l| {
if l.is_empty() {
None
} else {
// ignore empty lines
Some(l.char_indices().find(|&(_, x)| x!= ch).unwrap_or((l.len(), ch)).0)
}
})
.min()
.unwrap_or(0);
let indent = indent.unwrap_or(0);
s.lines()
.enumerate()
.map(|(i, l)| {
if (ignore_first && i == 0) || l.is_empty() {
l.to_owned()
} else if x > indent {
l.split_at(x - indent).1.to_owned()
} else {
" ".repeat(indent - x) + l
}
})
.collect::<Vec<String>>()
.join("\n")
}
/// Converts a span to a code snippet if available, otherwise returns the default.
///
/// This is useful if you want to provide suggestions for your lint or more generally, if you want
/// to convert a given `Span` to a `str`. To create suggestions consider using
/// [`snippet_with_applicability`] to ensure that the applicability stays correct.
///
/// # Example
/// ```rust,ignore
/// // Given two spans one for `value` and one for the `init` expression.
/// let value = Vec::new();
/// // ^^^^^ ^^^^^^^^^^
/// // span1 span2
///
/// // The snipped call would return the corresponding code snippet
/// snippet(cx, span1, "..") // -> "value"
/// snippet(cx, span2, "..") // -> "Vec::new()"
/// ```
pub fn snippet<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet_opt(cx, span).map_or_else(|| Cow::Borrowed(default), From::from)
}
/// Same as [`snippet`], but it adapts the applicability level by following rules:
///
/// - Applicability level `Unspecified` will never be changed.
/// - If the span is inside a macro, change the applicability level to `MaybeIncorrect`.
/// - If the default value is used and the applicability level is `MachineApplicable`, change it to
/// `HasPlaceholders`
pub fn snippet_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
applicability: &mut Applicability,
) -> Cow<'a, str> {
if *applicability!= Applicability::Unspecified && span.from_expansion() {
*applicability = Applicability::MaybeIncorrect;
}
snippet_opt(cx, span).map_or_else(
|| {
if *applicability == Applicability::MachineApplicable {
*applicability = Applicability::HasPlaceholders;
}
Cow::Borrowed(default)
},
From::from,
)
}
/// Same as `snippet`, but should only be used when it's clear that the input span is
/// not a macro argument.
pub fn snippet_with_macro_callsite<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet(cx, span.source_callsite(), default)
}
/// Converts a span to a code snippet. Returns `None` if not available.
pub fn snippet_opt<T: LintContext>(cx: &T, span: Span) -> Option<String> {
cx.sess().source_map().span_to_snippet(span).ok()
}
/// Converts a span (from a block) to a code snippet if available, otherwise use default.
///
/// This trims the code of indentation, except for the first line. Use it for blocks or block-like
/// things which need to be printed as such.
///
/// The `indent_relative_to` arg can be used, to provide a span, where the indentation of the
/// resulting snippet of the given span.
///
/// # Example
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", None)
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// }
/// ```
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", Some(if_expr.span))
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// } // aligned with `if`
/// ```
/// Note that the first line of the snippet always has 0 indentation.
pub fn snippet_block<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let snip = snippet(cx, span, default);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_block`, but adapts the applicability level by the rules of
/// `snippet_with_applicability`.
pub fn snippet_block_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
applicability: &mut Applicability,
) -> Cow<'a, str> {
let snip = snippet_with_applicability(cx, span, default, applicability);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_with_applicability`, but first walks the span up to the given context. This
/// will result in the macro call, rather then the expansion, if the span is from a child context.
/// If the span is not from a child context, it will be used directly instead.
///
/// e.g. Given the expression `&vec![]`, getting a snippet from the span for `vec![]` as a HIR node
/// would result in `box []`. If given the context of the address of expression, this function will
/// correctly get a snippet of `vec![]`.
///
/// This will also return whether or not the snippet is a macro call.
pub fn snippet_with_context(
cx: &LateContext<'_>,
span: Span,
outer: SyntaxContext,
default: &'a str,
applicability: &mut Applicability,
) -> (Cow<'a, str>, bool) {
let (span, is_macro_call) = walk_span_to_context(span, outer).map_or_else(
|| {
// The span is from a macro argument, and the outer context is the macro using the argument
if *applicability!= Applicability::Unspecified {
*applicability = Applicability::MaybeIncorrect;
}
// TODO: get the argument span.
(span, false)
},
|outer_span| (outer_span, span.ctxt()!= outer),
);
(
snippet_with_applicability(cx, span, default, applicability),
is_macro_call,
)
}
/// Walks the span up to the target context, thereby returning the macro call site if the span is
/// inside a macro expansion, or the original span if it is not. Note this will return `None` in the
/// case of the span being in a macro expansion, but the target context is from expanding a macro
/// argument.
///
/// Given the following
///
/// ```rust,ignore
/// macro_rules! m { ($e:expr) => { f($e) }; }
/// g(m!(0))
/// ```
///
/// If called with a span of the call to `f` and a context of the call to `g` this will return a
/// span containing `m!(0)`. However, if called with a span of the literal `0` this will give a span
/// containing `0` as the context is the same as the outer context.
///
/// This will traverse through multiple macro calls. Given the following:
///
/// ```rust,ignore
/// macro_rules! m { ($e:expr) => { n!($e, 0) }; }
/// macro_rules! n { ($e:expr, $f:expr) => { f($e, $f) }; }
/// g(m!(0))
/// ```
///
/// If called with a span of the call to `f` and a context of the call to `g` this will return a
/// span containing `m!(0)`.
pub fn walk_span_to_context(span: Span, outer: SyntaxContext) -> Option<Span> {
let outer_span = hygiene::walk_chain(span, outer);
(outer_span.ctxt() == outer).then(|| outer_span)
}
/// Removes block comments from the given `Vec` of lines.
///
/// # Examples
///
/// ```rust,ignore
/// without_block_comments(vec!["/*", "foo", "*/"]);
/// // => vec![]
///
/// without_block_comments(vec!["bar", "/*", "foo", "*/"]);
/// // => vec!["bar"]
/// ```
pub fn without_block_comments(lines: Vec<&str>) -> Vec<&str> {
let mut without = vec![];
let mut nest_level = 0;
for line in lines {
if line.contains("/*") {
nest_level += 1;
continue;
} else if line.contains("*/") {
nest_level -= 1;
continue;
}
if nest_level == 0 {
without.push(line);
}
}
without
}
#[cfg(test)]
mod test {
use super::{reindent_multiline, without_block_comments};
#[test]
fn test_reindent_multiline_single_line() {
assert_eq!("", reindent_multiline("".into(), false, None));
assert_eq!("...", reindent_multiline("...".into(), false, None));
assert_eq!("...", reindent_multiline(" ...".into(), false, None));
assert_eq!("...", reindent_multiline("\t...".into(), false, None));
assert_eq!("...", reindent_multiline("\t\t...".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_block() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
assert_eq!("\
if x {
\ty
} else {
\tz
}", reindent_multiline(" if x {
\ty
} else {
\tz
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_empty_line() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_lines_deeper() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline("\
if x {
y
} else {
z
}".into(), true, Some(8)));
}
#[test]
fn test_without_block_comments_lines_without_block_comments() {
let result = without_block_comments(vec!["/*", "", "*/"]);
println!("result: {:?}", result);
assert!(result.is_empty());
let result = without_block_comments(vec!["", "/*", "", "*/", "#[crate_type = \"lib\"]", "/*", "", "*/", ""]);
assert_eq!(result, vec!["", "#[crate_type = \"lib\"]", ""]);
let result = without_block_comments(vec!["/* rust", "", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* one-line comment */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested", "/* multi-line", "comment", "*/", "test", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested /* inline /* comment */ test */ */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["foo", "bar", "baz"]);
assert_eq!(result, vec!["foo", "bar", "baz"]);
}
}
| expr_block | identifier_name |
source.rs | //! Utils for extracting, inspecting or transforming source code
#![allow(clippy::module_name_repetitions)]
use crate::line_span;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind};
use rustc_lint::{LateContext, LintContext};
use rustc_span::hygiene;
use rustc_span::{BytePos, Pos, Span, SyntaxContext};
use std::borrow::Cow;
/// Like `snippet_block`, but add braces if the expr is not an `ExprKind::Block`.
/// Also takes an `Option<String>` which can be put inside the braces.
pub fn expr_block<'a, T: LintContext>(
cx: &T,
expr: &Expr<'_>,
option: Option<String>,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let code = snippet_block(cx, expr.span, default, indent_relative_to);
let string = option.unwrap_or_default();
if expr.span.from_expansion() {
Cow::Owned(format!("{{ {} }}", snippet_with_macro_callsite(cx, expr.span, default)))
} else if let ExprKind::Block(_, _) = expr.kind {
Cow::Owned(format!("{}{}", code, string))
} else if string.is_empty() {
Cow::Owned(format!("{{ {} }}", code))
} else {
Cow::Owned(format!("{{\n{};\n{}\n}}", code, string))
}
}
/// Returns a new Span that extends the original Span to the first non-whitespace char of the first
/// line.
///
/// ```rust,ignore
/// let x = ();
/// // ^^
/// // will be converted to
/// let x = ();
/// // ^^^^^^^^^^
/// ```
pub fn first_line_of_span<T: LintContext>(cx: &T, span: Span) -> Span {
first_char_in_first_line(cx, span).map_or(span, |first_char_pos| span.with_lo(first_char_pos))
}
fn first_char_in_first_line<T: LintContext>(cx: &T, span: Span) -> Option<BytePos> {
let line_span = line_span(cx, span);
snippet_opt(cx, line_span).and_then(|snip| {
snip.find(|c: char|!c.is_whitespace())
.map(|pos| line_span.lo() + BytePos::from_usize(pos))
})
}
/// Returns the indentation of the line of a span
///
/// ```rust,ignore
/// let x = ();
/// // ^^ -- will return 0
/// let x = ();
/// // ^^ -- will return 4
/// ```
pub fn indent_of<T: LintContext>(cx: &T, span: Span) -> Option<usize> {
snippet_opt(cx, line_span(cx, span)).and_then(|snip| snip.find(|c: char|!c.is_whitespace()))
}
/// Gets a snippet of the indentation of the line of a span
pub fn snippet_indent<T: LintContext>(cx: &T, span: Span) -> Option<String> {
snippet_opt(cx, line_span(cx, span)).map(|mut s| {
let len = s.len() - s.trim_start().len();
s.truncate(len);
s
})
}
// If the snippet is empty, it's an attribute that was inserted during macro
// expansion and we want to ignore those, because they could come from external
// sources that the user has no control over.
// For some reason these attributes don't have any expansion info on them, so
// we have to check it this way until there is a better way.
pub fn is_present_in_source<T: LintContext>(cx: &T, span: Span) -> bool {
if let Some(snippet) = snippet_opt(cx, span) {
if snippet.is_empty() {
return false;
}
}
true
}
/// Returns the positon just before rarrow
///
/// ```rust,ignore
/// fn into(self) -> () {}
/// ^
/// // in case of unformatted code
/// fn into2(self)-> () {}
/// ^
/// fn into3(self) -> () {}
/// ^
/// ```
pub fn position_before_rarrow(s: &str) -> Option<usize> {
s.rfind("->").map(|rpos| {
let mut rpos = rpos;
let chars: Vec<char> = s.chars().collect();
while rpos > 1 {
if let Some(c) = chars.get(rpos - 1) {
if c.is_whitespace() {
rpos -= 1;
continue;
}
}
break;
}
rpos
})
}
/// Reindent a multiline string with possibility of ignoring the first line.
#[allow(clippy::needless_pass_by_value)]
pub fn reindent_multiline(s: Cow<'_, str>, ignore_first: bool, indent: Option<usize>) -> Cow<'_, str> {
let s_space = reindent_multiline_inner(&s, ignore_first, indent,'');
let s_tab = reindent_multiline_inner(&s_space, ignore_first, indent, '\t');
reindent_multiline_inner(&s_tab, ignore_first, indent,'').into()
}
fn reindent_multiline_inner(s: &str, ignore_first: bool, indent: Option<usize>, ch: char) -> String {
let x = s
.lines()
.skip(usize::from(ignore_first))
.filter_map(|l| {
if l.is_empty() {
None
} else {
// ignore empty lines
Some(l.char_indices().find(|&(_, x)| x!= ch).unwrap_or((l.len(), ch)).0)
}
})
.min()
.unwrap_or(0);
let indent = indent.unwrap_or(0);
s.lines()
.enumerate()
.map(|(i, l)| {
if (ignore_first && i == 0) || l.is_empty() {
l.to_owned()
} else if x > indent {
l.split_at(x - indent).1.to_owned()
} else {
" ".repeat(indent - x) + l
}
})
.collect::<Vec<String>>()
.join("\n")
}
/// Converts a span to a code snippet if available, otherwise returns the default.
///
/// This is useful if you want to provide suggestions for your lint or more generally, if you want
/// to convert a given `Span` to a `str`. To create suggestions consider using
/// [`snippet_with_applicability`] to ensure that the applicability stays correct.
///
/// # Example
/// ```rust,ignore
/// // Given two spans one for `value` and one for the `init` expression.
/// let value = Vec::new();
/// // ^^^^^ ^^^^^^^^^^
/// // span1 span2
///
/// // The snipped call would return the corresponding code snippet
/// snippet(cx, span1, "..") // -> "value"
/// snippet(cx, span2, "..") // -> "Vec::new()"
/// ```
pub fn snippet<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet_opt(cx, span).map_or_else(|| Cow::Borrowed(default), From::from)
}
/// Same as [`snippet`], but it adapts the applicability level by following rules:
///
/// - Applicability level `Unspecified` will never be changed.
/// - If the span is inside a macro, change the applicability level to `MaybeIncorrect`.
/// - If the default value is used and the applicability level is `MachineApplicable`, change it to
/// `HasPlaceholders`
pub fn snippet_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
applicability: &mut Applicability,
) -> Cow<'a, str> |
/// Same as `snippet`, but should only be used when it's clear that the input span is
/// not a macro argument.
pub fn snippet_with_macro_callsite<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet(cx, span.source_callsite(), default)
}
/// Converts a span to a code snippet. Returns `None` if not available.
pub fn snippet_opt<T: LintContext>(cx: &T, span: Span) -> Option<String> {
cx.sess().source_map().span_to_snippet(span).ok()
}
/// Converts a span (from a block) to a code snippet if available, otherwise use default.
///
/// This trims the code of indentation, except for the first line. Use it for blocks or block-like
/// things which need to be printed as such.
///
/// The `indent_relative_to` arg can be used, to provide a span, where the indentation of the
/// resulting snippet of the given span.
///
/// # Example
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", None)
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// }
/// ```
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", Some(if_expr.span))
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// } // aligned with `if`
/// ```
/// Note that the first line of the snippet always has 0 indentation.
pub fn snippet_block<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let snip = snippet(cx, span, default);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_block`, but adapts the applicability level by the rules of
/// `snippet_with_applicability`.
pub fn snippet_block_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
applicability: &mut Applicability,
) -> Cow<'a, str> {
let snip = snippet_with_applicability(cx, span, default, applicability);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_with_applicability`, but first walks the span up to the given context. This
/// will result in the macro call, rather then the expansion, if the span is from a child context.
/// If the span is not from a child context, it will be used directly instead.
///
/// e.g. Given the expression `&vec![]`, getting a snippet from the span for `vec![]` as a HIR node
/// would result in `box []`. If given the context of the address of expression, this function will
/// correctly get a snippet of `vec![]`.
///
/// This will also return whether or not the snippet is a macro call.
pub fn snippet_with_context(
cx: &LateContext<'_>,
span: Span,
outer: SyntaxContext,
default: &'a str,
applicability: &mut Applicability,
) -> (Cow<'a, str>, bool) {
let (span, is_macro_call) = walk_span_to_context(span, outer).map_or_else(
|| {
// The span is from a macro argument, and the outer context is the macro using the argument
if *applicability!= Applicability::Unspecified {
*applicability = Applicability::MaybeIncorrect;
}
// TODO: get the argument span.
(span, false)
},
|outer_span| (outer_span, span.ctxt()!= outer),
);
(
snippet_with_applicability(cx, span, default, applicability),
is_macro_call,
)
}
/// Walks the span up to the target context, thereby returning the macro call site if the span is
/// inside a macro expansion, or the original span if it is not. Note this will return `None` in the
/// case of the span being in a macro expansion, but the target context is from expanding a macro
/// argument.
///
/// Given the following
///
/// ```rust,ignore
/// macro_rules! m { ($e:expr) => { f($e) }; }
/// g(m!(0))
/// ```
///
/// If called with a span of the call to `f` and a context of the call to `g` this will return a
/// span containing `m!(0)`. However, if called with a span of the literal `0` this will give a span
/// containing `0` as the context is the same as the outer context.
///
/// This will traverse through multiple macro calls. Given the following:
///
/// ```rust,ignore
/// macro_rules! m { ($e:expr) => { n!($e, 0) }; }
/// macro_rules! n { ($e:expr, $f:expr) => { f($e, $f) }; }
/// g(m!(0))
/// ```
///
/// If called with a span of the call to `f` and a context of the call to `g` this will return a
/// span containing `m!(0)`.
pub fn walk_span_to_context(span: Span, outer: SyntaxContext) -> Option<Span> {
let outer_span = hygiene::walk_chain(span, outer);
(outer_span.ctxt() == outer).then(|| outer_span)
}
/// Removes block comments from the given `Vec` of lines.
///
/// # Examples
///
/// ```rust,ignore
/// without_block_comments(vec!["/*", "foo", "*/"]);
/// // => vec![]
///
/// without_block_comments(vec!["bar", "/*", "foo", "*/"]);
/// // => vec!["bar"]
/// ```
pub fn without_block_comments(lines: Vec<&str>) -> Vec<&str> {
let mut without = vec![];
let mut nest_level = 0;
for line in lines {
if line.contains("/*") {
nest_level += 1;
continue;
} else if line.contains("*/") {
nest_level -= 1;
continue;
}
if nest_level == 0 {
without.push(line);
}
}
without
}
#[cfg(test)]
mod test {
use super::{reindent_multiline, without_block_comments};
#[test]
fn test_reindent_multiline_single_line() {
assert_eq!("", reindent_multiline("".into(), false, None));
assert_eq!("...", reindent_multiline("...".into(), false, None));
assert_eq!("...", reindent_multiline(" ...".into(), false, None));
assert_eq!("...", reindent_multiline("\t...".into(), false, None));
assert_eq!("...", reindent_multiline("\t\t...".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_block() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
assert_eq!("\
if x {
\ty
} else {
\tz
}", reindent_multiline(" if x {
\ty
} else {
\tz
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_empty_line() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline(" if x {
y
} else {
z
}".into(), false, None));
}
#[test]
#[rustfmt::skip]
fn test_reindent_multiline_lines_deeper() {
assert_eq!("\
if x {
y
} else {
z
}", reindent_multiline("\
if x {
y
} else {
z
}".into(), true, Some(8)));
}
#[test]
fn test_without_block_comments_lines_without_block_comments() {
let result = without_block_comments(vec!["/*", "", "*/"]);
println!("result: {:?}", result);
assert!(result.is_empty());
let result = without_block_comments(vec!["", "/*", "", "*/", "#[crate_type = \"lib\"]", "/*", "", "*/", ""]);
assert_eq!(result, vec!["", "#[crate_type = \"lib\"]", ""]);
let result = without_block_comments(vec!["/* rust", "", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* one-line comment */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested", "/* multi-line", "comment", "*/", "test", "*/"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["/* nested /* inline /* comment */ test */ */"]);
assert!(result.is_empty());
let result = without_block_comments(vec!["foo", "bar", "baz"]);
assert_eq!(result, vec!["foo", "bar", "baz"]);
}
}
| {
if *applicability != Applicability::Unspecified && span.from_expansion() {
*applicability = Applicability::MaybeIncorrect;
}
snippet_opt(cx, span).map_or_else(
|| {
if *applicability == Applicability::MachineApplicable {
*applicability = Applicability::HasPlaceholders;
}
Cow::Borrowed(default)
},
From::from,
)
} | identifier_body |
spi_host.rs | use crate::hil::spi_host::SpiHost;
use core::cell::Cell;
use core::cmp::min;
use kernel::common::cells::{OptionalCell, TakeCell};
use kernel::common::registers::{register_bitfields, register_structs, ReadOnly, ReadWrite, WriteOnly};
use kernel::common::StaticRef;
use kernel::hil::spi::{ClockPolarity, ClockPhase, SpiMaster, SpiMasterClient};
use kernel::ReturnCode;
// The TX and RX FIFOs both have the same length. We write and read at the same
// time.
// Registers for the SPI host controller
register_structs! {
Registers {
(0x0000 => ctrl: ReadWrite<u32, CTRL::Register>),
(0x0004 => xact: ReadWrite<u32, XACT::Register>),
(0x0008 => ictrl: ReadWrite<u32, ICTRL::Register>),
(0x000c => istate: ReadOnly<u32, ISTATE::Register>),
(0x0010 => istate_clr: ReadWrite<u32, ISTATE_CLR::Register>),
(0x0014 => _reserved),
(0x1000 => tx_fifo: [WriteOnly<u8>; 128]),
(0x1080 => rx_fifo: [ReadOnly<u8>; 128]),
(0x1100 => @END),
}
}
register_bitfields![u32,
CTRL [ | CSBSU OFFSET(2) NUMBITS(4) [],
/// CSB from SCK hold time in SCK cycles + 1 (defined with respect to
/// the last SCK edge)
CSBHLD OFFSET(6) NUMBITS(4) [],
/// SPI Clk Divider. Actual divider is IDIV+1. A value of 0 gives divide
/// by 1 clock, 1 gives divide by 2 etc.
IDIV OFFSET(10) NUMBITS(12) [],
/// Polarity of CSB signal. 0:active low 1:active high
CSBPOL OFFSET(22) NUMBITS(1) [],
/// Order in which bits of byte are sent. 0: send bit 0 first. 1: send
/// bit 7 first
TXBITOR OFFSET(23) NUMBITS(1) [],
/// Order in which bytes of buffer word are sent.
/// 0: send byte 0 first. 1: send byte 3 first
TXBYTOR OFFSET(24) NUMBITS(1) [],
/// Order in which received bits are packed into byte.
/// 0: first bit received is bit0 1: last bit received is bit 0
RXBITOR OFFSET(25) NUMBITS(1) [],
/// Order in which received bytes are packed into word.
/// 0: first byte received is byte 0 1: first byte received is byte 3
RXBYTOR OFFSET(26) NUMBITS(1) [],
/// SPI Passthrough Mode. 0: Disable, 1: Enable. This is the host side
/// control of whether passthrough is allowed. In order for full
/// passthrough functionality, both the host and device passthrough
/// functionality have to be enabled
ENPASSTHRU OFFSET(27) NUMBITS(1) []
],
XACT [
/// Initiate transaction in buffer
START OFFSET(0) NUMBITS(1) [],
/// Bits-1 in last byte transferred. The default assumes last byte will
/// have 8 bits, this should be sufficient for most usage.
BCNT OFFSET(1) NUMBITS(3) [],
/// Total number of transactions in bytes-1. If 64 bytes are to be
/// transferred, this should be programmed as 63.
SIZE OFFSET(4) NUMBITS(7) [],
/// Poll for ready
RDY_POLL OFFSET(11) NUMBITS(1) [],
/// Delay before polling in PCLK cycles + 1
RDY_POLL_DLY OFFSET(12) NUMBITS(5) []
],
ICTRL [
/// TX interrupt enable
TXDONE OFFSET(0) NUMBITS(1) []
],
ISTATE [
/// TX done interrupt
TXDONE OFFSET(0) NUMBITS(1) []
],
ISTATE_CLR [
/// TX done interrupt clear
TXDONE OFFSET(0) NUMBITS(1) []
]
];
const SPI_HOST0_BASE_ADDR: u32 = 0x4070_0000;
const SPI_HOST1_BASE_ADDR: u32 = 0x4071_0000;
const SPI_HOST0_REGISTERS: StaticRef<Registers> =
unsafe { StaticRef::new(SPI_HOST0_BASE_ADDR as *const Registers) };
const SPI_HOST1_REGISTERS: StaticRef<Registers> =
unsafe { StaticRef::new(SPI_HOST1_BASE_ADDR as *const Registers) };
pub static mut SPI_HOST0: SpiHostHardware = SpiHostHardware::new(SPI_HOST0_REGISTERS);
pub static mut SPI_HOST1: SpiHostHardware = SpiHostHardware::new(SPI_HOST1_REGISTERS);
/// A SPI Host
pub struct SpiHostHardware {
registers: StaticRef<Registers>,
transaction_len: Cell<usize>,
tx_buffer: TakeCell<'static, [u8]>,
rx_buffer: TakeCell<'static, [u8]>,
client: OptionalCell<&'static dyn SpiMasterClient>,
}
impl SpiHostHardware {
const fn new(base_addr: StaticRef<Registers>) -> SpiHostHardware {
SpiHostHardware {
registers: base_addr,
transaction_len: Cell::new(0),
tx_buffer: TakeCell::empty(),
rx_buffer: TakeCell::empty(),
client: OptionalCell::empty(),
}
}
pub fn init(&self) {
self.registers.ctrl.write(
CTRL::CPOL::CLEAR +
CTRL::CPHA::CLEAR +
CTRL::CSBSU::CLEAR +
CTRL::CSBHLD::CLEAR +
CTRL::IDIV.val(2) +
CTRL::CSBPOL::CLEAR +
CTRL::TXBITOR::SET +
CTRL::TXBYTOR::CLEAR +
CTRL::RXBITOR::SET +
CTRL::RXBYTOR::CLEAR +
CTRL::ENPASSTHRU::CLEAR);
self.registers.xact.write(
XACT::START::CLEAR +
XACT::BCNT.val(7) +
XACT::SIZE.val(0) +
XACT::RDY_POLL::CLEAR +
XACT::RDY_POLL_DLY.val(0));
}
fn enable_tx_interrupt(&self) {
self.registers.ictrl.modify(ICTRL::TXDONE::SET);
}
fn disable_tx_interrupt(&self) {
self.registers.ictrl.modify(ICTRL::TXDONE::CLEAR);
}
pub fn handle_interrupt(&self) {
//debug!("SpiHostHardware::handle_interrupt: ISTATE = {:08x}", self.registers.istate.get());
if self.registers.istate.is_set(ISTATE::TXDONE) {
self.registers.istate_clr.write(ISTATE_CLR::TXDONE::SET);
self.client.map(|client| {
self.tx_buffer.take()
.map(|tx_buf| {
self.rx_buffer
.map(|rx_buf| {
self.read_data(rx_buf);
});
client.read_write_done(
tx_buf,
self.rx_buffer.take(),
self.transaction_len.get())
});
});
}
self.disable_tx_interrupt();
}
fn start_transaction(
&self,
write_buffer: Option<&'static mut [u8]>,
read_buffer: Option<&'static mut [u8]>,
transaction_len: usize) -> ReturnCode {
//debug!("SpiHostHardware::start_transaction: transaction_len={}", transaction_len);
// The transaction needs at least one byte.
// It also cannot have more bytes than tx_fifo or rx_fifo is long.
if (transaction_len == 0) ||
(transaction_len > self.registers.tx_fifo.len()) ||
(transaction_len > self.registers.rx_fifo.len()) {
//debug!("SpiHostHardware::start_transaction: Invalid transaction_len={}", transaction_len);
return ReturnCode::ESIZE;
}
self.registers.xact.modify(XACT::BCNT.val(7));
self.registers.xact.modify(XACT::SIZE.val((transaction_len - 1) as u32));
let mut tx_buf_len = 0;
write_buffer.as_ref().map(|tx_buf| {
tx_buf_len = min(tx_buf.len(), transaction_len);
for idx in 0..tx_buf_len {
self.registers.tx_fifo[idx].set(tx_buf[idx]);
}
});
// Clear the TX FIFO for additional bytes not supplied by write_buffer.
// Since we have no control over how many bytes the SPI host reads, we
// want to make sure to not accidentally leak information that made it
// into the TX FIFO beyond the length of the `write_buffer`.
for idx in tx_buf_len..transaction_len {
self.registers.tx_fifo[idx].set(0xff);
}
write_buffer.map(|buf| {
self.tx_buffer.replace(buf);
});
read_buffer.map(|buf| {
self.rx_buffer.replace(buf);
});
self.transaction_len.set(transaction_len);
self.registers.istate_clr.write(ISTATE_CLR::TXDONE::SET);
self.enable_tx_interrupt();
self.registers.xact.modify(XACT::START::SET);
ReturnCode::SUCCESS
}
fn read_data(&self, read_buffer: &mut [u8]) {
let read_len = min(read_buffer.len(), self.transaction_len.get());
for idx in 0..read_len {
let val = self.registers.rx_fifo[idx].get();
read_buffer[idx] = val;
}
}
}
impl SpiHost for SpiHostHardware {
fn spi_device_spi_host_passthrough(&self, enabled: bool) {
self.registers.ctrl.modify(
if enabled { CTRL::ENPASSTHRU::SET } else { CTRL::ENPASSTHRU::CLEAR });
}
fn wait_busy_clear_in_transactions(&self, enabled: bool) {
self.registers.xact.modify(
if enabled { XACT::RDY_POLL::SET } else { XACT::RDY_POLL::CLEAR });
}
}
impl SpiMaster for SpiHostHardware {
type ChipSelect = bool;
fn set_client(&self, client: &'static dyn kernel::hil::spi::SpiMasterClient) {
self.client.set(client);
}
fn init(&self) {}
fn is_busy(&self) -> bool {
self.registers.istate.is_set(ISTATE::TXDONE)
}
fn read_write_bytes(
&self,
write_buffer: &'static mut [u8],
read_buffer: Option<&'static mut [u8]>,
len: usize,
) -> ReturnCode {
// If busy, don't start
if self.is_busy() {
return ReturnCode::EBUSY;
}
self.start_transaction(Some(write_buffer), read_buffer, len)
}
fn write_byte(&self, _val: u8) {
panic!("write_byte is not implemented");
}
fn read_byte(&self) -> u8 {
panic!("read_byte is not implemented");
}
fn read_write_byte(&self, _val: u8) -> u8 {
panic!("read_write_byte is not implemented");
}
fn specify_chip_select(&self, _cs: Self::ChipSelect) {
// Nothing to be done
}
/// Returns the actual rate set
fn set_rate(&self, _rate: u32) -> u32 {
panic!("set_rate is not implemented");
}
fn get_rate(&self) -> u32 {
panic!("get_rate is not implemented");
}
fn set_clock(&self, _polarity: ClockPolarity) {
panic!("set_clock is not implemented");
}
fn get_clock(&self) -> ClockPolarity {
panic!("get_clock is not implemented");
}
fn set_phase(&self, _phase: ClockPhase) {
panic!("set_phase is not implemented");
}
fn get_phase(&self) -> ClockPhase {
panic!("get_phase is not implemented");
}
fn hold_low(&self) {
panic!("hold_low is not implemented");
}
fn release_low(&self) {
// Nothing to do, since this is the only mode supported.
}
} | /// CPOL setting
CPOL OFFSET(0) NUMBITS(1) [],
/// CPHA setting
CPHA OFFSET(1) NUMBITS(1) [],
/// CSB to SCK setup time in SCK cycles + 1.5 | random_line_split |
spi_host.rs | use crate::hil::spi_host::SpiHost;
use core::cell::Cell;
use core::cmp::min;
use kernel::common::cells::{OptionalCell, TakeCell};
use kernel::common::registers::{register_bitfields, register_structs, ReadOnly, ReadWrite, WriteOnly};
use kernel::common::StaticRef;
use kernel::hil::spi::{ClockPolarity, ClockPhase, SpiMaster, SpiMasterClient};
use kernel::ReturnCode;
// The TX and RX FIFOs both have the same length. We write and read at the same
// time.
// Registers for the SPI host controller
register_structs! {
Registers {
(0x0000 => ctrl: ReadWrite<u32, CTRL::Register>),
(0x0004 => xact: ReadWrite<u32, XACT::Register>),
(0x0008 => ictrl: ReadWrite<u32, ICTRL::Register>),
(0x000c => istate: ReadOnly<u32, ISTATE::Register>),
(0x0010 => istate_clr: ReadWrite<u32, ISTATE_CLR::Register>),
(0x0014 => _reserved),
(0x1000 => tx_fifo: [WriteOnly<u8>; 128]),
(0x1080 => rx_fifo: [ReadOnly<u8>; 128]),
(0x1100 => @END),
}
}
register_bitfields![u32,
CTRL [
/// CPOL setting
CPOL OFFSET(0) NUMBITS(1) [],
/// CPHA setting
CPHA OFFSET(1) NUMBITS(1) [],
/// CSB to SCK setup time in SCK cycles + 1.5
CSBSU OFFSET(2) NUMBITS(4) [],
/// CSB from SCK hold time in SCK cycles + 1 (defined with respect to
/// the last SCK edge)
CSBHLD OFFSET(6) NUMBITS(4) [],
/// SPI Clk Divider. Actual divider is IDIV+1. A value of 0 gives divide
/// by 1 clock, 1 gives divide by 2 etc.
IDIV OFFSET(10) NUMBITS(12) [],
/// Polarity of CSB signal. 0:active low 1:active high
CSBPOL OFFSET(22) NUMBITS(1) [],
/// Order in which bits of byte are sent. 0: send bit 0 first. 1: send
/// bit 7 first
TXBITOR OFFSET(23) NUMBITS(1) [],
/// Order in which bytes of buffer word are sent.
/// 0: send byte 0 first. 1: send byte 3 first
TXBYTOR OFFSET(24) NUMBITS(1) [],
/// Order in which received bits are packed into byte.
/// 0: first bit received is bit0 1: last bit received is bit 0
RXBITOR OFFSET(25) NUMBITS(1) [],
/// Order in which received bytes are packed into word.
/// 0: first byte received is byte 0 1: first byte received is byte 3
RXBYTOR OFFSET(26) NUMBITS(1) [],
/// SPI Passthrough Mode. 0: Disable, 1: Enable. This is the host side
/// control of whether passthrough is allowed. In order for full
/// passthrough functionality, both the host and device passthrough
/// functionality have to be enabled
ENPASSTHRU OFFSET(27) NUMBITS(1) []
],
XACT [
/// Initiate transaction in buffer
START OFFSET(0) NUMBITS(1) [],
/// Bits-1 in last byte transferred. The default assumes last byte will
/// have 8 bits, this should be sufficient for most usage.
BCNT OFFSET(1) NUMBITS(3) [],
/// Total number of transactions in bytes-1. If 64 bytes are to be
/// transferred, this should be programmed as 63.
SIZE OFFSET(4) NUMBITS(7) [],
/// Poll for ready
RDY_POLL OFFSET(11) NUMBITS(1) [],
/// Delay before polling in PCLK cycles + 1
RDY_POLL_DLY OFFSET(12) NUMBITS(5) []
],
ICTRL [
/// TX interrupt enable
TXDONE OFFSET(0) NUMBITS(1) []
],
ISTATE [
/// TX done interrupt
TXDONE OFFSET(0) NUMBITS(1) []
],
ISTATE_CLR [
/// TX done interrupt clear
TXDONE OFFSET(0) NUMBITS(1) []
]
];
const SPI_HOST0_BASE_ADDR: u32 = 0x4070_0000;
const SPI_HOST1_BASE_ADDR: u32 = 0x4071_0000;
const SPI_HOST0_REGISTERS: StaticRef<Registers> =
unsafe { StaticRef::new(SPI_HOST0_BASE_ADDR as *const Registers) };
const SPI_HOST1_REGISTERS: StaticRef<Registers> =
unsafe { StaticRef::new(SPI_HOST1_BASE_ADDR as *const Registers) };
pub static mut SPI_HOST0: SpiHostHardware = SpiHostHardware::new(SPI_HOST0_REGISTERS);
pub static mut SPI_HOST1: SpiHostHardware = SpiHostHardware::new(SPI_HOST1_REGISTERS);
/// A SPI Host
pub struct SpiHostHardware {
registers: StaticRef<Registers>,
transaction_len: Cell<usize>,
tx_buffer: TakeCell<'static, [u8]>,
rx_buffer: TakeCell<'static, [u8]>,
client: OptionalCell<&'static dyn SpiMasterClient>,
}
impl SpiHostHardware {
const fn new(base_addr: StaticRef<Registers>) -> SpiHostHardware {
SpiHostHardware {
registers: base_addr,
transaction_len: Cell::new(0),
tx_buffer: TakeCell::empty(),
rx_buffer: TakeCell::empty(),
client: OptionalCell::empty(),
}
}
pub fn init(&self) {
self.registers.ctrl.write(
CTRL::CPOL::CLEAR +
CTRL::CPHA::CLEAR +
CTRL::CSBSU::CLEAR +
CTRL::CSBHLD::CLEAR +
CTRL::IDIV.val(2) +
CTRL::CSBPOL::CLEAR +
CTRL::TXBITOR::SET +
CTRL::TXBYTOR::CLEAR +
CTRL::RXBITOR::SET +
CTRL::RXBYTOR::CLEAR +
CTRL::ENPASSTHRU::CLEAR);
self.registers.xact.write(
XACT::START::CLEAR +
XACT::BCNT.val(7) +
XACT::SIZE.val(0) +
XACT::RDY_POLL::CLEAR +
XACT::RDY_POLL_DLY.val(0));
}
fn enable_tx_interrupt(&self) {
self.registers.ictrl.modify(ICTRL::TXDONE::SET);
}
fn disable_tx_interrupt(&self) {
self.registers.ictrl.modify(ICTRL::TXDONE::CLEAR);
}
pub fn handle_interrupt(&self) {
//debug!("SpiHostHardware::handle_interrupt: ISTATE = {:08x}", self.registers.istate.get());
if self.registers.istate.is_set(ISTATE::TXDONE) {
self.registers.istate_clr.write(ISTATE_CLR::TXDONE::SET);
self.client.map(|client| {
self.tx_buffer.take()
.map(|tx_buf| {
self.rx_buffer
.map(|rx_buf| {
self.read_data(rx_buf);
});
client.read_write_done(
tx_buf,
self.rx_buffer.take(),
self.transaction_len.get())
});
});
}
self.disable_tx_interrupt();
}
fn start_transaction(
&self,
write_buffer: Option<&'static mut [u8]>,
read_buffer: Option<&'static mut [u8]>,
transaction_len: usize) -> ReturnCode {
//debug!("SpiHostHardware::start_transaction: transaction_len={}", transaction_len);
// The transaction needs at least one byte.
// It also cannot have more bytes than tx_fifo or rx_fifo is long.
if (transaction_len == 0) ||
(transaction_len > self.registers.tx_fifo.len()) ||
(transaction_len > self.registers.rx_fifo.len()) {
//debug!("SpiHostHardware::start_transaction: Invalid transaction_len={}", transaction_len);
return ReturnCode::ESIZE;
}
self.registers.xact.modify(XACT::BCNT.val(7));
self.registers.xact.modify(XACT::SIZE.val((transaction_len - 1) as u32));
let mut tx_buf_len = 0;
write_buffer.as_ref().map(|tx_buf| {
tx_buf_len = min(tx_buf.len(), transaction_len);
for idx in 0..tx_buf_len {
self.registers.tx_fifo[idx].set(tx_buf[idx]);
}
});
// Clear the TX FIFO for additional bytes not supplied by write_buffer.
// Since we have no control over how many bytes the SPI host reads, we
// want to make sure to not accidentally leak information that made it
// into the TX FIFO beyond the length of the `write_buffer`.
for idx in tx_buf_len..transaction_len {
self.registers.tx_fifo[idx].set(0xff);
}
write_buffer.map(|buf| {
self.tx_buffer.replace(buf);
});
read_buffer.map(|buf| {
self.rx_buffer.replace(buf);
});
self.transaction_len.set(transaction_len);
self.registers.istate_clr.write(ISTATE_CLR::TXDONE::SET);
self.enable_tx_interrupt();
self.registers.xact.modify(XACT::START::SET);
ReturnCode::SUCCESS
}
fn read_data(&self, read_buffer: &mut [u8]) {
let read_len = min(read_buffer.len(), self.transaction_len.get());
for idx in 0..read_len {
let val = self.registers.rx_fifo[idx].get();
read_buffer[idx] = val;
}
}
}
impl SpiHost for SpiHostHardware {
fn spi_device_spi_host_passthrough(&self, enabled: bool) {
self.registers.ctrl.modify(
if enabled { CTRL::ENPASSTHRU::SET } else { CTRL::ENPASSTHRU::CLEAR });
}
fn wait_busy_clear_in_transactions(&self, enabled: bool) {
self.registers.xact.modify(
if enabled { XACT::RDY_POLL::SET } else { XACT::RDY_POLL::CLEAR });
}
}
impl SpiMaster for SpiHostHardware {
type ChipSelect = bool;
fn set_client(&self, client: &'static dyn kernel::hil::spi::SpiMasterClient) {
self.client.set(client);
}
fn init(&self) {}
fn is_busy(&self) -> bool {
self.registers.istate.is_set(ISTATE::TXDONE)
}
fn read_write_bytes(
&self,
write_buffer: &'static mut [u8],
read_buffer: Option<&'static mut [u8]>,
len: usize,
) -> ReturnCode {
// If busy, don't start
if self.is_busy() {
return ReturnCode::EBUSY;
}
self.start_transaction(Some(write_buffer), read_buffer, len)
}
fn write_byte(&self, _val: u8) {
panic!("write_byte is not implemented");
}
fn read_byte(&self) -> u8 {
panic!("read_byte is not implemented");
}
fn read_write_byte(&self, _val: u8) -> u8 {
panic!("read_write_byte is not implemented");
}
fn specify_chip_select(&self, _cs: Self::ChipSelect) {
// Nothing to be done
}
/// Returns the actual rate set
fn set_rate(&self, _rate: u32) -> u32 |
fn get_rate(&self) -> u32 {
panic!("get_rate is not implemented");
}
fn set_clock(&self, _polarity: ClockPolarity) {
panic!("set_clock is not implemented");
}
fn get_clock(&self) -> ClockPolarity {
panic!("get_clock is not implemented");
}
fn set_phase(&self, _phase: ClockPhase) {
panic!("set_phase is not implemented");
}
fn get_phase(&self) -> ClockPhase {
panic!("get_phase is not implemented");
}
fn hold_low(&self) {
panic!("hold_low is not implemented");
}
fn release_low(&self) {
// Nothing to do, since this is the only mode supported.
}
}
| {
panic!("set_rate is not implemented");
} | identifier_body |
spi_host.rs | use crate::hil::spi_host::SpiHost;
use core::cell::Cell;
use core::cmp::min;
use kernel::common::cells::{OptionalCell, TakeCell};
use kernel::common::registers::{register_bitfields, register_structs, ReadOnly, ReadWrite, WriteOnly};
use kernel::common::StaticRef;
use kernel::hil::spi::{ClockPolarity, ClockPhase, SpiMaster, SpiMasterClient};
use kernel::ReturnCode;
// The TX and RX FIFOs both have the same length. We write and read at the same
// time.
// Registers for the SPI host controller
register_structs! {
Registers {
(0x0000 => ctrl: ReadWrite<u32, CTRL::Register>),
(0x0004 => xact: ReadWrite<u32, XACT::Register>),
(0x0008 => ictrl: ReadWrite<u32, ICTRL::Register>),
(0x000c => istate: ReadOnly<u32, ISTATE::Register>),
(0x0010 => istate_clr: ReadWrite<u32, ISTATE_CLR::Register>),
(0x0014 => _reserved),
(0x1000 => tx_fifo: [WriteOnly<u8>; 128]),
(0x1080 => rx_fifo: [ReadOnly<u8>; 128]),
(0x1100 => @END),
}
}
register_bitfields![u32,
CTRL [
/// CPOL setting
CPOL OFFSET(0) NUMBITS(1) [],
/// CPHA setting
CPHA OFFSET(1) NUMBITS(1) [],
/// CSB to SCK setup time in SCK cycles + 1.5
CSBSU OFFSET(2) NUMBITS(4) [],
/// CSB from SCK hold time in SCK cycles + 1 (defined with respect to
/// the last SCK edge)
CSBHLD OFFSET(6) NUMBITS(4) [],
/// SPI Clk Divider. Actual divider is IDIV+1. A value of 0 gives divide
/// by 1 clock, 1 gives divide by 2 etc.
IDIV OFFSET(10) NUMBITS(12) [],
/// Polarity of CSB signal. 0:active low 1:active high
CSBPOL OFFSET(22) NUMBITS(1) [],
/// Order in which bits of byte are sent. 0: send bit 0 first. 1: send
/// bit 7 first
TXBITOR OFFSET(23) NUMBITS(1) [],
/// Order in which bytes of buffer word are sent.
/// 0: send byte 0 first. 1: send byte 3 first
TXBYTOR OFFSET(24) NUMBITS(1) [],
/// Order in which received bits are packed into byte.
/// 0: first bit received is bit0 1: last bit received is bit 0
RXBITOR OFFSET(25) NUMBITS(1) [],
/// Order in which received bytes are packed into word.
/// 0: first byte received is byte 0 1: first byte received is byte 3
RXBYTOR OFFSET(26) NUMBITS(1) [],
/// SPI Passthrough Mode. 0: Disable, 1: Enable. This is the host side
/// control of whether passthrough is allowed. In order for full
/// passthrough functionality, both the host and device passthrough
/// functionality have to be enabled
ENPASSTHRU OFFSET(27) NUMBITS(1) []
],
XACT [
/// Initiate transaction in buffer
START OFFSET(0) NUMBITS(1) [],
/// Bits-1 in last byte transferred. The default assumes last byte will
/// have 8 bits, this should be sufficient for most usage.
BCNT OFFSET(1) NUMBITS(3) [],
/// Total number of transactions in bytes-1. If 64 bytes are to be
/// transferred, this should be programmed as 63.
SIZE OFFSET(4) NUMBITS(7) [],
/// Poll for ready
RDY_POLL OFFSET(11) NUMBITS(1) [],
/// Delay before polling in PCLK cycles + 1
RDY_POLL_DLY OFFSET(12) NUMBITS(5) []
],
ICTRL [
/// TX interrupt enable
TXDONE OFFSET(0) NUMBITS(1) []
],
ISTATE [
/// TX done interrupt
TXDONE OFFSET(0) NUMBITS(1) []
],
ISTATE_CLR [
/// TX done interrupt clear
TXDONE OFFSET(0) NUMBITS(1) []
]
];
const SPI_HOST0_BASE_ADDR: u32 = 0x4070_0000;
const SPI_HOST1_BASE_ADDR: u32 = 0x4071_0000;
const SPI_HOST0_REGISTERS: StaticRef<Registers> =
unsafe { StaticRef::new(SPI_HOST0_BASE_ADDR as *const Registers) };
const SPI_HOST1_REGISTERS: StaticRef<Registers> =
unsafe { StaticRef::new(SPI_HOST1_BASE_ADDR as *const Registers) };
pub static mut SPI_HOST0: SpiHostHardware = SpiHostHardware::new(SPI_HOST0_REGISTERS);
pub static mut SPI_HOST1: SpiHostHardware = SpiHostHardware::new(SPI_HOST1_REGISTERS);
/// A SPI Host
pub struct SpiHostHardware {
registers: StaticRef<Registers>,
transaction_len: Cell<usize>,
tx_buffer: TakeCell<'static, [u8]>,
rx_buffer: TakeCell<'static, [u8]>,
client: OptionalCell<&'static dyn SpiMasterClient>,
}
impl SpiHostHardware {
const fn new(base_addr: StaticRef<Registers>) -> SpiHostHardware {
SpiHostHardware {
registers: base_addr,
transaction_len: Cell::new(0),
tx_buffer: TakeCell::empty(),
rx_buffer: TakeCell::empty(),
client: OptionalCell::empty(),
}
}
pub fn init(&self) {
self.registers.ctrl.write(
CTRL::CPOL::CLEAR +
CTRL::CPHA::CLEAR +
CTRL::CSBSU::CLEAR +
CTRL::CSBHLD::CLEAR +
CTRL::IDIV.val(2) +
CTRL::CSBPOL::CLEAR +
CTRL::TXBITOR::SET +
CTRL::TXBYTOR::CLEAR +
CTRL::RXBITOR::SET +
CTRL::RXBYTOR::CLEAR +
CTRL::ENPASSTHRU::CLEAR);
self.registers.xact.write(
XACT::START::CLEAR +
XACT::BCNT.val(7) +
XACT::SIZE.val(0) +
XACT::RDY_POLL::CLEAR +
XACT::RDY_POLL_DLY.val(0));
}
fn enable_tx_interrupt(&self) {
self.registers.ictrl.modify(ICTRL::TXDONE::SET);
}
fn disable_tx_interrupt(&self) {
self.registers.ictrl.modify(ICTRL::TXDONE::CLEAR);
}
pub fn handle_interrupt(&self) {
//debug!("SpiHostHardware::handle_interrupt: ISTATE = {:08x}", self.registers.istate.get());
if self.registers.istate.is_set(ISTATE::TXDONE) {
self.registers.istate_clr.write(ISTATE_CLR::TXDONE::SET);
self.client.map(|client| {
self.tx_buffer.take()
.map(|tx_buf| {
self.rx_buffer
.map(|rx_buf| {
self.read_data(rx_buf);
});
client.read_write_done(
tx_buf,
self.rx_buffer.take(),
self.transaction_len.get())
});
});
}
self.disable_tx_interrupt();
}
fn start_transaction(
&self,
write_buffer: Option<&'static mut [u8]>,
read_buffer: Option<&'static mut [u8]>,
transaction_len: usize) -> ReturnCode {
//debug!("SpiHostHardware::start_transaction: transaction_len={}", transaction_len);
// The transaction needs at least one byte.
// It also cannot have more bytes than tx_fifo or rx_fifo is long.
if (transaction_len == 0) ||
(transaction_len > self.registers.tx_fifo.len()) ||
(transaction_len > self.registers.rx_fifo.len()) {
//debug!("SpiHostHardware::start_transaction: Invalid transaction_len={}", transaction_len);
return ReturnCode::ESIZE;
}
self.registers.xact.modify(XACT::BCNT.val(7));
self.registers.xact.modify(XACT::SIZE.val((transaction_len - 1) as u32));
let mut tx_buf_len = 0;
write_buffer.as_ref().map(|tx_buf| {
tx_buf_len = min(tx_buf.len(), transaction_len);
for idx in 0..tx_buf_len {
self.registers.tx_fifo[idx].set(tx_buf[idx]);
}
});
// Clear the TX FIFO for additional bytes not supplied by write_buffer.
// Since we have no control over how many bytes the SPI host reads, we
// want to make sure to not accidentally leak information that made it
// into the TX FIFO beyond the length of the `write_buffer`.
for idx in tx_buf_len..transaction_len {
self.registers.tx_fifo[idx].set(0xff);
}
write_buffer.map(|buf| {
self.tx_buffer.replace(buf);
});
read_buffer.map(|buf| {
self.rx_buffer.replace(buf);
});
self.transaction_len.set(transaction_len);
self.registers.istate_clr.write(ISTATE_CLR::TXDONE::SET);
self.enable_tx_interrupt();
self.registers.xact.modify(XACT::START::SET);
ReturnCode::SUCCESS
}
fn read_data(&self, read_buffer: &mut [u8]) {
let read_len = min(read_buffer.len(), self.transaction_len.get());
for idx in 0..read_len {
let val = self.registers.rx_fifo[idx].get();
read_buffer[idx] = val;
}
}
}
impl SpiHost for SpiHostHardware {
fn spi_device_spi_host_passthrough(&self, enabled: bool) {
self.registers.ctrl.modify(
if enabled { CTRL::ENPASSTHRU::SET } else { CTRL::ENPASSTHRU::CLEAR });
}
fn wait_busy_clear_in_transactions(&self, enabled: bool) {
self.registers.xact.modify(
if enabled { XACT::RDY_POLL::SET } else { XACT::RDY_POLL::CLEAR });
}
}
impl SpiMaster for SpiHostHardware {
type ChipSelect = bool;
fn set_client(&self, client: &'static dyn kernel::hil::spi::SpiMasterClient) {
self.client.set(client);
}
fn init(&self) {}
fn is_busy(&self) -> bool {
self.registers.istate.is_set(ISTATE::TXDONE)
}
fn read_write_bytes(
&self,
write_buffer: &'static mut [u8],
read_buffer: Option<&'static mut [u8]>,
len: usize,
) -> ReturnCode {
// If busy, don't start
if self.is_busy() {
return ReturnCode::EBUSY;
}
self.start_transaction(Some(write_buffer), read_buffer, len)
}
fn write_byte(&self, _val: u8) {
panic!("write_byte is not implemented");
}
fn read_byte(&self) -> u8 {
panic!("read_byte is not implemented");
}
fn read_write_byte(&self, _val: u8) -> u8 {
panic!("read_write_byte is not implemented");
}
fn specify_chip_select(&self, _cs: Self::ChipSelect) {
// Nothing to be done
}
/// Returns the actual rate set
fn set_rate(&self, _rate: u32) -> u32 {
panic!("set_rate is not implemented");
}
fn get_rate(&self) -> u32 {
panic!("get_rate is not implemented");
}
fn set_clock(&self, _polarity: ClockPolarity) {
panic!("set_clock is not implemented");
}
fn get_clock(&self) -> ClockPolarity {
panic!("get_clock is not implemented");
}
fn | (&self, _phase: ClockPhase) {
panic!("set_phase is not implemented");
}
fn get_phase(&self) -> ClockPhase {
panic!("get_phase is not implemented");
}
fn hold_low(&self) {
panic!("hold_low is not implemented");
}
fn release_low(&self) {
// Nothing to do, since this is the only mode supported.
}
}
| set_phase | identifier_name |
spi_host.rs | use crate::hil::spi_host::SpiHost;
use core::cell::Cell;
use core::cmp::min;
use kernel::common::cells::{OptionalCell, TakeCell};
use kernel::common::registers::{register_bitfields, register_structs, ReadOnly, ReadWrite, WriteOnly};
use kernel::common::StaticRef;
use kernel::hil::spi::{ClockPolarity, ClockPhase, SpiMaster, SpiMasterClient};
use kernel::ReturnCode;
// The TX and RX FIFOs both have the same length. We write and read at the same
// time.
// Registers for the SPI host controller
register_structs! {
Registers {
(0x0000 => ctrl: ReadWrite<u32, CTRL::Register>),
(0x0004 => xact: ReadWrite<u32, XACT::Register>),
(0x0008 => ictrl: ReadWrite<u32, ICTRL::Register>),
(0x000c => istate: ReadOnly<u32, ISTATE::Register>),
(0x0010 => istate_clr: ReadWrite<u32, ISTATE_CLR::Register>),
(0x0014 => _reserved),
(0x1000 => tx_fifo: [WriteOnly<u8>; 128]),
(0x1080 => rx_fifo: [ReadOnly<u8>; 128]),
(0x1100 => @END),
}
}
register_bitfields![u32,
CTRL [
/// CPOL setting
CPOL OFFSET(0) NUMBITS(1) [],
/// CPHA setting
CPHA OFFSET(1) NUMBITS(1) [],
/// CSB to SCK setup time in SCK cycles + 1.5
CSBSU OFFSET(2) NUMBITS(4) [],
/// CSB from SCK hold time in SCK cycles + 1 (defined with respect to
/// the last SCK edge)
CSBHLD OFFSET(6) NUMBITS(4) [],
/// SPI Clk Divider. Actual divider is IDIV+1. A value of 0 gives divide
/// by 1 clock, 1 gives divide by 2 etc.
IDIV OFFSET(10) NUMBITS(12) [],
/// Polarity of CSB signal. 0:active low 1:active high
CSBPOL OFFSET(22) NUMBITS(1) [],
/// Order in which bits of byte are sent. 0: send bit 0 first. 1: send
/// bit 7 first
TXBITOR OFFSET(23) NUMBITS(1) [],
/// Order in which bytes of buffer word are sent.
/// 0: send byte 0 first. 1: send byte 3 first
TXBYTOR OFFSET(24) NUMBITS(1) [],
/// Order in which received bits are packed into byte.
/// 0: first bit received is bit0 1: last bit received is bit 0
RXBITOR OFFSET(25) NUMBITS(1) [],
/// Order in which received bytes are packed into word.
/// 0: first byte received is byte 0 1: first byte received is byte 3
RXBYTOR OFFSET(26) NUMBITS(1) [],
/// SPI Passthrough Mode. 0: Disable, 1: Enable. This is the host side
/// control of whether passthrough is allowed. In order for full
/// passthrough functionality, both the host and device passthrough
/// functionality have to be enabled
ENPASSTHRU OFFSET(27) NUMBITS(1) []
],
XACT [
/// Initiate transaction in buffer
START OFFSET(0) NUMBITS(1) [],
/// Bits-1 in last byte transferred. The default assumes last byte will
/// have 8 bits, this should be sufficient for most usage.
BCNT OFFSET(1) NUMBITS(3) [],
/// Total number of transactions in bytes-1. If 64 bytes are to be
/// transferred, this should be programmed as 63.
SIZE OFFSET(4) NUMBITS(7) [],
/// Poll for ready
RDY_POLL OFFSET(11) NUMBITS(1) [],
/// Delay before polling in PCLK cycles + 1
RDY_POLL_DLY OFFSET(12) NUMBITS(5) []
],
ICTRL [
/// TX interrupt enable
TXDONE OFFSET(0) NUMBITS(1) []
],
ISTATE [
/// TX done interrupt
TXDONE OFFSET(0) NUMBITS(1) []
],
ISTATE_CLR [
/// TX done interrupt clear
TXDONE OFFSET(0) NUMBITS(1) []
]
];
const SPI_HOST0_BASE_ADDR: u32 = 0x4070_0000;
const SPI_HOST1_BASE_ADDR: u32 = 0x4071_0000;
const SPI_HOST0_REGISTERS: StaticRef<Registers> =
unsafe { StaticRef::new(SPI_HOST0_BASE_ADDR as *const Registers) };
const SPI_HOST1_REGISTERS: StaticRef<Registers> =
unsafe { StaticRef::new(SPI_HOST1_BASE_ADDR as *const Registers) };
pub static mut SPI_HOST0: SpiHostHardware = SpiHostHardware::new(SPI_HOST0_REGISTERS);
pub static mut SPI_HOST1: SpiHostHardware = SpiHostHardware::new(SPI_HOST1_REGISTERS);
/// A SPI Host
pub struct SpiHostHardware {
registers: StaticRef<Registers>,
transaction_len: Cell<usize>,
tx_buffer: TakeCell<'static, [u8]>,
rx_buffer: TakeCell<'static, [u8]>,
client: OptionalCell<&'static dyn SpiMasterClient>,
}
impl SpiHostHardware {
const fn new(base_addr: StaticRef<Registers>) -> SpiHostHardware {
SpiHostHardware {
registers: base_addr,
transaction_len: Cell::new(0),
tx_buffer: TakeCell::empty(),
rx_buffer: TakeCell::empty(),
client: OptionalCell::empty(),
}
}
pub fn init(&self) {
self.registers.ctrl.write(
CTRL::CPOL::CLEAR +
CTRL::CPHA::CLEAR +
CTRL::CSBSU::CLEAR +
CTRL::CSBHLD::CLEAR +
CTRL::IDIV.val(2) +
CTRL::CSBPOL::CLEAR +
CTRL::TXBITOR::SET +
CTRL::TXBYTOR::CLEAR +
CTRL::RXBITOR::SET +
CTRL::RXBYTOR::CLEAR +
CTRL::ENPASSTHRU::CLEAR);
self.registers.xact.write(
XACT::START::CLEAR +
XACT::BCNT.val(7) +
XACT::SIZE.val(0) +
XACT::RDY_POLL::CLEAR +
XACT::RDY_POLL_DLY.val(0));
}
fn enable_tx_interrupt(&self) {
self.registers.ictrl.modify(ICTRL::TXDONE::SET);
}
fn disable_tx_interrupt(&self) {
self.registers.ictrl.modify(ICTRL::TXDONE::CLEAR);
}
pub fn handle_interrupt(&self) {
//debug!("SpiHostHardware::handle_interrupt: ISTATE = {:08x}", self.registers.istate.get());
if self.registers.istate.is_set(ISTATE::TXDONE) {
self.registers.istate_clr.write(ISTATE_CLR::TXDONE::SET);
self.client.map(|client| {
self.tx_buffer.take()
.map(|tx_buf| {
self.rx_buffer
.map(|rx_buf| {
self.read_data(rx_buf);
});
client.read_write_done(
tx_buf,
self.rx_buffer.take(),
self.transaction_len.get())
});
});
}
self.disable_tx_interrupt();
}
fn start_transaction(
&self,
write_buffer: Option<&'static mut [u8]>,
read_buffer: Option<&'static mut [u8]>,
transaction_len: usize) -> ReturnCode {
//debug!("SpiHostHardware::start_transaction: transaction_len={}", transaction_len);
// The transaction needs at least one byte.
// It also cannot have more bytes than tx_fifo or rx_fifo is long.
if (transaction_len == 0) ||
(transaction_len > self.registers.tx_fifo.len()) ||
(transaction_len > self.registers.rx_fifo.len()) {
//debug!("SpiHostHardware::start_transaction: Invalid transaction_len={}", transaction_len);
return ReturnCode::ESIZE;
}
self.registers.xact.modify(XACT::BCNT.val(7));
self.registers.xact.modify(XACT::SIZE.val((transaction_len - 1) as u32));
let mut tx_buf_len = 0;
write_buffer.as_ref().map(|tx_buf| {
tx_buf_len = min(tx_buf.len(), transaction_len);
for idx in 0..tx_buf_len {
self.registers.tx_fifo[idx].set(tx_buf[idx]);
}
});
// Clear the TX FIFO for additional bytes not supplied by write_buffer.
// Since we have no control over how many bytes the SPI host reads, we
// want to make sure to not accidentally leak information that made it
// into the TX FIFO beyond the length of the `write_buffer`.
for idx in tx_buf_len..transaction_len {
self.registers.tx_fifo[idx].set(0xff);
}
write_buffer.map(|buf| {
self.tx_buffer.replace(buf);
});
read_buffer.map(|buf| {
self.rx_buffer.replace(buf);
});
self.transaction_len.set(transaction_len);
self.registers.istate_clr.write(ISTATE_CLR::TXDONE::SET);
self.enable_tx_interrupt();
self.registers.xact.modify(XACT::START::SET);
ReturnCode::SUCCESS
}
fn read_data(&self, read_buffer: &mut [u8]) {
let read_len = min(read_buffer.len(), self.transaction_len.get());
for idx in 0..read_len {
let val = self.registers.rx_fifo[idx].get();
read_buffer[idx] = val;
}
}
}
impl SpiHost for SpiHostHardware {
fn spi_device_spi_host_passthrough(&self, enabled: bool) {
self.registers.ctrl.modify(
if enabled { CTRL::ENPASSTHRU::SET } else | );
}
fn wait_busy_clear_in_transactions(&self, enabled: bool) {
self.registers.xact.modify(
if enabled { XACT::RDY_POLL::SET } else { XACT::RDY_POLL::CLEAR });
}
}
impl SpiMaster for SpiHostHardware {
type ChipSelect = bool;
fn set_client(&self, client: &'static dyn kernel::hil::spi::SpiMasterClient) {
self.client.set(client);
}
fn init(&self) {}
fn is_busy(&self) -> bool {
self.registers.istate.is_set(ISTATE::TXDONE)
}
fn read_write_bytes(
&self,
write_buffer: &'static mut [u8],
read_buffer: Option<&'static mut [u8]>,
len: usize,
) -> ReturnCode {
// If busy, don't start
if self.is_busy() {
return ReturnCode::EBUSY;
}
self.start_transaction(Some(write_buffer), read_buffer, len)
}
fn write_byte(&self, _val: u8) {
panic!("write_byte is not implemented");
}
fn read_byte(&self) -> u8 {
panic!("read_byte is not implemented");
}
fn read_write_byte(&self, _val: u8) -> u8 {
panic!("read_write_byte is not implemented");
}
fn specify_chip_select(&self, _cs: Self::ChipSelect) {
// Nothing to be done
}
/// Returns the actual rate set
fn set_rate(&self, _rate: u32) -> u32 {
panic!("set_rate is not implemented");
}
fn get_rate(&self) -> u32 {
panic!("get_rate is not implemented");
}
fn set_clock(&self, _polarity: ClockPolarity) {
panic!("set_clock is not implemented");
}
fn get_clock(&self) -> ClockPolarity {
panic!("get_clock is not implemented");
}
fn set_phase(&self, _phase: ClockPhase) {
panic!("set_phase is not implemented");
}
fn get_phase(&self) -> ClockPhase {
panic!("get_phase is not implemented");
}
fn hold_low(&self) {
panic!("hold_low is not implemented");
}
fn release_low(&self) {
// Nothing to do, since this is the only mode supported.
}
}
| { CTRL::ENPASSTHRU::CLEAR } | conditional_block |
adc.rs | MCU.
//!
//! ## Clocking
//!
//! The ADC requires a clock signal (ADCK), which is generated from the bus
//! clock, the bus clock divided by 2, the output of the OSC peripheral
//! (OSC_OUT), or an internal asynchronous clock, which, when selected,
//! operates in wait and stop modes. With any of these clock sources a
//! multi-value divider is provided to further divide the incoming clock by 1
//! (i.e. 1:1), 2, 4, or 8.
//!
//! The clock frequency must fall within 400kHz to 8MHz (4MHz in low power
//! mode), This is the same for all KEA MCUs. Ideally, the HAL will only
//! present valid options, but that is not yet implemented (pending clocks
//! improvements to output frequencies). For now you are trusted to input the
//! correct frequency.
//!
//! *Note:* When using the FIFO mode with FIFO scan mode disabled, the bus
//! clock must be faster than half the ADC clock (ADCK). Bus clock >= ADCK / 2.
//!
//! ## Pin Control
//!
//! This functionality is implemented in the GPIO module. See [Analog]
//! for details.
//!
//! ## Conversion Width
//!
//! The ADC can be run in 8, 10, or 12 bit modes. These modes are enumerated in
//! [AdcResolution].
//!
//! ## Hardware Trigger
//!
//! The ADC conversions can be started by a hardware trigger. This is not
//! implemented in all KEA chips, so implementation here will be Delayed. Use
//! the PAC. Enable is ADC_SC2\[ADTRG\] = 1, and trigger is the ADHWT source.
//!
//! ## Usage
//!
//! ### AdcConfig struct
//!
//! [AdcConfig] offers public fields to allow for creation in-place. The
//! [AdcConfig::calculate_divisor] method allows the user to specify the
//! desired Adc Clock frequency (given the clock source frequency). The clock
//! divider which gets the closest to that frequency is chosen.
//!
//! The AdcConfig structure also implements the [Default] trait.
//!
//! ```rust
//! let config: AdcConfig = Default::default();
//!
//! config.calculate_divisor(20_u32.MHz(), 2_u32.MHz());
//! assert!(matches!(config.clock_divisor, ClockDivisor::_8));
//! ```
use crate::hal::adc::{Channel, OneShot};
use crate::{pac::ADC, HALExt};
use core::{convert::Infallible, marker::PhantomData};
use embedded_time::rate::*;
/// Error Enumeration for this module
#[derive(Debug)]
pub enum Error {
/// The Channel has already been moved
Moved,
}
/// Analog type state for a GPIO pin.
///
/// This mode "gives" the pin to the ADC hardware peripheral.
/// The ADC Peripheral can take the GPIO pins in any state. The Peripheral will
/// reconfigure the pin to turn off any output drivers, disable input buffers
/// (reading the pin after configuring as analog will return a zero), and
/// disable the pullup. Electrically, an Analog pin that is not currently under
/// conversion is effectively HighImpedence.
///
/// Once a pin is released from the ADC, it will return to its previous state.
/// The previous state includes output enabled, input enabled, pullup enabled,
/// and level (for outputs). Note to accomplish this the pin implements the
/// outof_analog method, which is semantically different from the other type
/// states.
///
/// For example, [crate::gpio::gpioa::PTA0] is configured to be a Output that is set high is
/// converted into the analog mode with the [crate::gpio::gpioa::PTA0::into_analog] method.
/// Once measurements from that pin are completed it will be returned to an
/// Output that is set high by calling the [Analog::outof_analog] method.
///
/// ```rust
/// let pta0 = gpioa.pta0.into_push_pull_output();
/// pta0.set_high();
/// let mut pta0 = pta0.into_analog(); // pta0 is hi-Z
/// let value = adc.read(&mut pta0).unwrap_or(0);
/// let pta0 = pta0.outof_analog(); // pta0 is push-pull output, set high.
/// ```
///
/// Note: This is a hardware feature that requires effectively no clock cycles
/// to complete. "Manually" reconfiguring the pins to HighImpedence before
/// calling into_analog() is discouraged, but it would not hurt anything.
pub struct Analog<Pin> {
pin: Pin,
}
/// Interface for ADC Peripheral.
///
/// Returned by calling [HALExt::split] on the pac [ADC] structure. Holds state
/// of peripheral.
pub struct Adc<State> {
peripheral: ADC,
_state: PhantomData<State>,
/// Contains the On-Chip ADC Channels, like the MCU's temperature sensor.
pub onchip_channels: OnChipChannels,
}
impl HALExt for ADC {
type T = Adc<Disabled>;
fn split(self) -> Adc<Disabled> {
Adc {
peripheral: self,
_state: PhantomData,
onchip_channels: OnChipChannels {
vss: Some(Analog {
pin: Vss::<Input> { _mode: PhantomData },
}),
temp_sense: Some(Analog {
pin: TempSense::<Input> { _mode: PhantomData },
}),
bandgap: Some(Analog {
pin: Bandgap::<Input> { _mode: PhantomData },
}),
vref_h: Some(Analog {
pin: VrefH::<Input> { _mode: PhantomData },
}),
vref_l: Some(Analog {
pin: VrefL::<Input> { _mode: PhantomData },
}),
},
}
}
}
/// Configuration struct for Adc peripheral.
pub struct AdcConfig {
/// Determines the clock source for the ADC peripheral
///
/// Default is [AdcClocks::Bus]
pub clock_source: AdcClocks,
/// Divides the clock source to get the ADC clock into it's usable range of
/// 400kHz - 8MHz (4MHz in low power mode).
///
/// Default is [ClockDivisor::_1] (no divison)
pub clock_divisor: ClockDivisor,
/// Set the resolution of ADC conversion
///
/// Default is [AdcResolution::_8bit]
pub resolution: AdcResolution,
/// Set ADC sample time.
///
/// Default is [AdcSampleTime::Short]
pub sample_time: AdcSampleTime,
/// Set low power mode
///
/// Default is false.
pub low_power: bool,
}
impl AdcConfig {
/// Calculate the ADC clock divisor
///
/// Uses the current clock source and clock frequency to determine
/// the best divisor to use in order to have minimal error between
/// the ADC clock rate and the desired ADC clock rate.
///
/// Note: This relies on trustworthy values for source_freq and valid
/// values for req_adc_freq. In the future this should know or
/// determine what the current clock frequency is instead of relying
/// on the user to provide it.
pub fn calculate_divisor(&mut self, source_freq: Hertz, req_adc_freq: Hertz) {
let denom: u8 = (source_freq.integer() / req_adc_freq.integer()) as u8;
let mut output: u8 = 1;
let mut err: i8 = (denom - output) as i8;
let mut err_old: i8 = err;
let max_divisor = match self.clock_source {
AdcClocks::Bus => 16,
_ => 8,
};
while output < max_divisor {
err = (denom - (output << 1)) as i8;
if err.is_negative() {
err = err.abs();
}
if err <= err_old {
output <<= 1;
err_old = err;
} else {
break;
}
}
// I am of the mind that this assert is okay, at least until the input
// clock can be known at compile time.
let ad_clock = source_freq.integer() / output as u32;
assert!(400_000 <= ad_clock);
assert!(
ad_clock
<= match self.low_power {
false => 8_000_000,
true => 4_000_000,
}
);
self.clock_divisor = match output {
1 => ClockDivisor::_1,
2 => ClockDivisor::_2,
4 => ClockDivisor::_4,
8 => ClockDivisor::_8,
_ => ClockDivisor::_16,
}
}
/// Set the divisor directly. panics if divisor isn't supported by the
/// clock source.
///
/// TODO: Refactor to remove assert. Add Clock Source as a type state
pub fn set_divisor(&mut self, divisor: ClockDivisor) {
// divisor can't be 16 unless using the Bus clock
assert!(
!(!matches!(self.clock_source, AdcClocks::Bus) && matches!(divisor, ClockDivisor::_16))
);
self.clock_divisor = divisor;
}
/// Sets the clock source, panics if divisor isn't supported
///
/// TODO: Refactor to remove assert. Add Clock Source as a type state
pub fn set_clock_source(&mut self, clock: AdcClocks) {
// Panic if setting the clock to anything other than Bus if the divisor
// is set to 16
assert!(
!matches!(clock, AdcClocks::Bus) && matches!(self.clock_divisor, ClockDivisor::_16)
);
self.clock_source = clock;
}
}
impl Default for AdcConfig {
fn default() -> AdcConfig {
AdcConfig {
clock_source: AdcClocks::Bus,
clock_divisor: ClockDivisor::_1,
resolution: AdcResolution::_12bit,
sample_time: AdcSampleTime::Short,
low_power: false,
}
}
}
/// Clock types available to the Adc peripheral
///
/// Dividers will be chosen appropriately to suit requested clock rate.
pub enum AdcClocks {
/// Use the incoming Bus Clock
Bus,
/// jkl
External,
/// Available in Wait AND Stop Mode
Async,
}
/// This enum represents the availabe ADC resolutions
///
/// Regardless of resolution chosen, results are always right justified
#[repr(u8)]
pub enum AdcResolution {
/// 8 bit AD conversion mode
_8bit = 0,
/// 10 bit AD conversion mode
_10bit = 1,
/// 12 bit AD conversion mode
_12bit = 2,
}
/// Adc sample time
pub enum AdcSampleTime {
/// Sample for 3.5 ADC clock (ADCK) cycles.
Short = 0,
/// Sample for 23.5 ADC clock (ADCK) cycles.
///
/// Required for high impedence (>2k @ADCK > 4MHz, >5k @ ADCK < 4MHz)
/// inputs.
Long = 1,
}
/// Adc Clock Divisors
///
/// Note 1/16 divisor is only usable for the Bus clock
pub enum ClockDivisor {
/// Source / 1, No divison
_1 = 0,
/// Source / 2
_2 = 1,
/// Source / 4
_4 = 2,
/// Source / 8
_8 = 3,
/// Source / 16
_16 = 4,
}
/// Enabled state
pub struct Enabled;
/// Disabled state
pub struct Disabled;
impl Adc<Enabled> {
/// Poll to determine if ADC conversion is complete.
///
/// Note: This flag is cleared when the sampling mode is changed,
/// interrupts are enabled, [Adc::set_channel] is called, and when [Adc::result] is
/// called (including [Adc::try_result])
pub fn is_done(&self) -> bool {
self.peripheral.sc1.read().coco().bit()
}
/// Poll to determine if ADC conversion is underway
pub fn is_converting(&self) -> bool {
self.peripheral.sc2.read().adact().bit()
}
/// Grab the last ADC conversion result.
pub fn result(&self) -> u16 {
self.peripheral.r.read().adr().bits()
}
/// Poll for conversion completion, if done return the result.
pub fn try_result(&self) -> Option<u16> {
if self.is_done() {
Some(self.result())
} else {
None
}
}
/// Set ADC target channel.
///
/// In Single conversion mode (OneShot), setting the channel begins the conversion. In FIFO mode
/// the channel is added to the FIFO buffer.
///
/// Note: If the channel is changed while a conversion is in progress the
/// current conversion will be cancelled. If in FIFO mode, conversion will
/// resume once the FIFO channels are refilled.
pub fn set_channel<T: Channel<Adc<Enabled>, ID = u8>>(&self, _pin: &T) {
self.peripheral
.sc1
.modify(|_, w| unsafe { w.adch().bits(T::channel()) });
}
/// Set the ADC's configuration
pub fn configure(self, config: AdcConfig) -> Adc<Enabled> {
self.peripheral.sc3.modify(|_, w| {
use pac::adc::sc3::{ADICLK_A, ADIV_A, ADLSMP_A, MODE_A};
w.adiclk()
.variant(match config.clock_source {
AdcClocks::Bus =>
// If divisor is 16, use the Bus / 2 clock source, else use
// the 1:1 Bus clock source
{
match config.clock_divisor {
ClockDivisor::_16 => ADICLK_A::_01,
_ => ADICLK_A::_00,
}
}
AdcClocks::External => ADICLK_A::_10,
AdcClocks::Async => ADICLK_A::_11,
})
.mode()
.variant(match config.resolution {
AdcResolution::_8bit => MODE_A::_00,
AdcResolution::_10bit => MODE_A::_01,
AdcResolution::_12bit => MODE_A::_10,
})
.adlsmp()
.variant(match config.sample_time {
AdcSampleTime::Short => ADLSMP_A::_0,
AdcSampleTime::Long => ADLSMP_A::_1,
})
.adiv()
.variant(match config.clock_divisor {
ClockDivisor::_1 => ADIV_A::_00,
ClockDivisor::_2 => ADIV_A::_01,
ClockDivisor::_4 => ADIV_A::_10,
_ => ADIV_A::_11,
})
.adlpc()
.bit(config.low_power)
});
// It looks like SCGC has to be set before touching the peripheral
// at all, else hardfault. Go back later to confirm that if using external clock
// scgc can be cleared.
// w.adc().variant(match config.clock_source {
// AdcClocks::Bus => ADC_A::_1,
// _ => ADC_A::_0,
// })
Adc {
peripheral: self.peripheral,
_state: PhantomData,
onchip_channels: self.onchip_channels,
}
}
}
impl Adc<Disabled> {
/// Connects the bus clock to the adc via the SIM peripheral, allowing
/// read and write access to ADC registers.
///
/// Any attempt to access ADC registers while disabled results in a
/// HardFault, generated by hardware.
///
/// This also enables the bandgap voltage reference.
pub fn enable(self) -> Adc<Enabled> {
cortex_m::interrupt::free(|_| {
unsafe { &(*pac::SIM::ptr()) }.scgc.modify(|_, w| {
use pac::sim::scgc::ADC_A;
w.adc().variant(ADC_A::_1)
});
// Don't start a conversion (set channel to DummyDisable)
self.peripheral.sc1.modify(|_, w| w.adch()._11111());
// Bandgap. Grab directly, Currently the bandgap isn't implemented
// in [system::PMC]. We will eventually have to pass in the pmc
// peripheral handle as a variable.
unsafe { &(*pac::PMC::ptr()) }
.spmsc1
.modify(|_, w| w.bgbe()._1());
});
Adc {
peripheral: self.peripheral,
_state: PhantomData,
onchip_channels: self.onchip_channels,
}
}
/// Set the ADC's configuration
///
/// This is a sugar method for calling [Adc<Disabled>::enable] followed by
/// [Adc<Enabled>::configure]
pub fn configure(self, config: AdcConfig) -> Adc<Enabled> {
self.enable().configure(config)
}
}
impl<Mode> Adc<Mode> {
/// Not Implemented
pub fn into_interrupt(self) -> Adc<Mode> {
unimplemented!("Interrupt is not yet implemented");
// Adc::<Mode> {
// peripheral: self.peripheral,
// _state: PhantomData,
// onchip_channels: self.onchip_channels,
// }
}
/// Not Implemented
pub fn into_fifo(self, _depth: u8) -> Adc<Mode> {
// self.peripheral
// .sc4
// .modify(|_r, w| w.afdep().bits(depth & 0x7));
// Adc::<Mode> {
// peripheral: self.peripheral,
// _state: PhantomData,
// onchip_channels: self.onchip_channels,
// }
unimplemented!("FIFO is not yet implemented");
}
/// Not Implemented
pub fn into_continuous(self) -> Adc<Mode> {
unimplemented!("Continuous Conversion mode not yet implemented");
}
}
impl OnChipChannels {
/// Request an instance of an on-chip [Vss] channel.
pub fn vss(&mut self) -> Result<Analog<Vss<Input>>, Error> {
self.vss.take().ok_or(Error::Moved)
}
/// Return the instance of [Vss]
pub fn return_vss(&mut self, inst: Analog<Vss<Input>>) {
self.vss.replace(inst);
}
/// Try to grab an instance of the onchip [TempSense] channel.
pub fn tempsense(&mut self) -> Result<Analog<TempSense<Input>>, Error> {
self.temp_sense.take().ok_or(Error::Moved)
}
/// Return the instance of [TempSense]
pub fn return_tempsense(&mut self, inst: Analog<TempSense<Input>>) {
self.temp_sense.replace(inst);
}
/// Try to grab an instance of the onchip [Bandgap] channel.
///
/// The bandgap reference is a fixed 1.16V (nom, Factory trimmed to +/-
/// 0.02V at Vdd=5.0 at 125C) signal that is available to the ADC Module.
/// It can be used as a voltage reference for the ACMP and as an [Analog]
/// channel that can be used to (roughly) check the VDD voltage
pub fn bandgap(&mut self) -> Result<Analog<Bandgap<Input>>, Error> {
self.bandgap.take().ok_or(Error::Moved)
}
/// Return the instance of [Bandgap]
pub fn return_bandgap(&mut self, inst: Analog<Bandgap<Input>>) {
self.bandgap.replace(inst);
}
/// Try to grab an instance of the onchip Voltage Reference High ([VrefH]) channel.
pub fn vref_h(&mut self) -> Result<Analog<VrefH<Input>>, Error> {
self.vref_h.take().ok_or(Error::Moved)
}
/// Return the instance of [VrefH]
pub fn | (&mut self, inst: Analog<VrefH<Input>>) {
self.vref_h.replace(inst);
}
/// Try to grab an instance of the onchip Voltage Reference Low ([VrefL]) channel.
pub fn vref_l(&mut self) -> Result<Analog<VrefL<Input>>, Error> {
self.vref_l.take().ok_or(Error::Moved)
}
/// Return the instance of [VrefL]
pub fn return_vref_l(&mut self, inst: Analog<VrefL<Input>>) {
self.vref_l.replace(inst);
}
/// Grab a [DummyDisable] instance. Multiple Instances possible.
pub fn dummy_disable(&self) -> Analog<DummyDisable<Input>> {
Analog {
pin: DummyDisable::<Input> { _mode: PhantomData },
}
}
}
/// Holds On-Chip ADC Channel inputs and provides an interface to grab and return them.
// These have to have the Input dummy type to allow them to have the Channel
// trait.
pub struct OnChipChannels {
vss: Option<Analog<Vss<Input>>>,
temp_sense: Option<Analog<TempSense<Input>>>,
bandgap: Option<Analog<Bandgap<Input>>>,
vref_h: Option<Analog<VrefH<Input>>>,
vref_l: Option<Analog<VrefL<Input>>>,
}
/// Dummy type state for on-chip ADC input channels
pub struct Input;
/// Adc Input Channel, measures ground (should be 0?)
pub struct Vss<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, measures internal temperature sensor
pub struct TempSense<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Bandgap internal voltage reference
pub struct Bandgap<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Voltage Reference, High
pub struct VrefH<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Voltage Reference, Low
pub struct VrefL<Input> {
_mode: PhantomData<Input>,
}
/// Dummy Channel that temporarily disables the Adc Module.
pub struct DummyDisable<Input> {
_mode: PhantomData<Input>,
}
macro_rules! adc_input_channels {
( $($Chan:expr => $Pin:ident),+ $(,)*) => {
$(
impl<OldMode> Channel<Adc<Enabled>> for Analog<$Pin<OldMode>> {
type ID = u8;
fn channel() -> u8 { $Chan }
}
)+
};
}
use crate::gpio::{gpioa::*, gpiob::*};
adc_input_channels! (
0_u8 => PTA0,
1_u8 => PTA1,
2_u8 => PTA6,
3_u8 => PTA7,
4_u8 => PTB0,
5_u8 => PTB1,
6_u8 => P | return_vref_h | identifier_name |
adc.rs | the MCU.
//!
//! ## Clocking
//!
//! The ADC requires a clock signal (ADCK), which is generated from the bus
//! clock, the bus clock divided by 2, the output of the OSC peripheral
//! (OSC_OUT), or an internal asynchronous clock, which, when selected,
//! operates in wait and stop modes. With any of these clock sources a
//! multi-value divider is provided to further divide the incoming clock by 1
//! (i.e. 1:1), 2, 4, or 8.
//!
//! The clock frequency must fall within 400kHz to 8MHz (4MHz in low power
//! mode), This is the same for all KEA MCUs. Ideally, the HAL will only
//! present valid options, but that is not yet implemented (pending clocks
//! improvements to output frequencies). For now you are trusted to input the
//! correct frequency.
//!
//! *Note:* When using the FIFO mode with FIFO scan mode disabled, the bus
//! clock must be faster than half the ADC clock (ADCK). Bus clock >= ADCK / 2.
//!
//! ## Pin Control
//!
//! This functionality is implemented in the GPIO module. See [Analog]
//! for details.
//!
//! ## Conversion Width
//!
//! The ADC can be run in 8, 10, or 12 bit modes. These modes are enumerated in
//! [AdcResolution].
//!
//! ## Hardware Trigger
//!
//! The ADC conversions can be started by a hardware trigger. This is not
//! implemented in all KEA chips, so implementation here will be Delayed. Use
//! the PAC. Enable is ADC_SC2\[ADTRG\] = 1, and trigger is the ADHWT source.
//!
//! ## Usage
//!
//! ### AdcConfig struct
//!
//! [AdcConfig] offers public fields to allow for creation in-place. The
//! [AdcConfig::calculate_divisor] method allows the user to specify the
//! desired Adc Clock frequency (given the clock source frequency). The clock
//! divider which gets the closest to that frequency is chosen.
//!
//! The AdcConfig structure also implements the [Default] trait.
//!
//! ```rust
//! let config: AdcConfig = Default::default();
//!
//! config.calculate_divisor(20_u32.MHz(), 2_u32.MHz());
//! assert!(matches!(config.clock_divisor, ClockDivisor::_8));
//! ```
use crate::hal::adc::{Channel, OneShot};
use crate::{pac::ADC, HALExt};
use core::{convert::Infallible, marker::PhantomData};
use embedded_time::rate::*;
/// Error Enumeration for this module
#[derive(Debug)]
pub enum Error {
/// The Channel has already been moved
Moved,
}
/// Analog type state for a GPIO pin.
///
/// This mode "gives" the pin to the ADC hardware peripheral.
/// The ADC Peripheral can take the GPIO pins in any state. The Peripheral will
/// reconfigure the pin to turn off any output drivers, disable input buffers
/// (reading the pin after configuring as analog will return a zero), and
/// disable the pullup. Electrically, an Analog pin that is not currently under
/// conversion is effectively HighImpedence.
///
/// Once a pin is released from the ADC, it will return to its previous state.
/// The previous state includes output enabled, input enabled, pullup enabled,
/// and level (for outputs). Note to accomplish this the pin implements the
/// outof_analog method, which is semantically different from the other type
/// states.
///
/// For example, [crate::gpio::gpioa::PTA0] is configured to be a Output that is set high is
/// converted into the analog mode with the [crate::gpio::gpioa::PTA0::into_analog] method.
/// Once measurements from that pin are completed it will be returned to an
/// Output that is set high by calling the [Analog::outof_analog] method.
///
/// ```rust
/// let pta0 = gpioa.pta0.into_push_pull_output();
/// pta0.set_high();
/// let mut pta0 = pta0.into_analog(); // pta0 is hi-Z
/// let value = adc.read(&mut pta0).unwrap_or(0);
/// let pta0 = pta0.outof_analog(); // pta0 is push-pull output, set high.
/// ```
///
/// Note: This is a hardware feature that requires effectively no clock cycles
/// to complete. "Manually" reconfiguring the pins to HighImpedence before
/// calling into_analog() is discouraged, but it would not hurt anything.
pub struct Analog<Pin> {
pin: Pin,
}
/// Interface for ADC Peripheral.
///
/// Returned by calling [HALExt::split] on the pac [ADC] structure. Holds state
/// of peripheral.
pub struct Adc<State> {
peripheral: ADC,
_state: PhantomData<State>,
/// Contains the On-Chip ADC Channels, like the MCU's temperature sensor.
pub onchip_channels: OnChipChannels,
}
impl HALExt for ADC {
type T = Adc<Disabled>;
fn split(self) -> Adc<Disabled> {
Adc {
peripheral: self,
_state: PhantomData,
onchip_channels: OnChipChannels {
vss: Some(Analog {
pin: Vss::<Input> { _mode: PhantomData },
}),
temp_sense: Some(Analog {
pin: TempSense::<Input> { _mode: PhantomData },
}),
bandgap: Some(Analog {
pin: Bandgap::<Input> { _mode: PhantomData },
}),
vref_h: Some(Analog {
pin: VrefH::<Input> { _mode: PhantomData },
}),
vref_l: Some(Analog {
pin: VrefL::<Input> { _mode: PhantomData },
}),
},
}
}
}
/// Configuration struct for Adc peripheral.
pub struct AdcConfig {
/// Determines the clock source for the ADC peripheral
///
/// Default is [AdcClocks::Bus]
pub clock_source: AdcClocks,
/// Divides the clock source to get the ADC clock into it's usable range of
/// 400kHz - 8MHz (4MHz in low power mode).
///
/// Default is [ClockDivisor::_1] (no divison)
pub clock_divisor: ClockDivisor,
/// Set the resolution of ADC conversion
///
/// Default is [AdcResolution::_8bit]
pub resolution: AdcResolution,
/// Set ADC sample time.
///
/// Default is [AdcSampleTime::Short]
pub sample_time: AdcSampleTime,
/// Set low power mode
///
/// Default is false.
pub low_power: bool,
}
impl AdcConfig {
/// Calculate the ADC clock divisor
///
/// Uses the current clock source and clock frequency to determine
/// the best divisor to use in order to have minimal error between
/// the ADC clock rate and the desired ADC clock rate.
///
/// Note: This relies on trustworthy values for source_freq and valid
/// values for req_adc_freq. In the future this should know or
/// determine what the current clock frequency is instead of relying
/// on the user to provide it.
pub fn calculate_divisor(&mut self, source_freq: Hertz, req_adc_freq: Hertz) {
let denom: u8 = (source_freq.integer() / req_adc_freq.integer()) as u8;
let mut output: u8 = 1;
let mut err: i8 = (denom - output) as i8;
let mut err_old: i8 = err;
let max_divisor = match self.clock_source {
AdcClocks::Bus => 16,
_ => 8,
};
while output < max_divisor {
err = (denom - (output << 1)) as i8;
if err.is_negative() {
err = err.abs();
}
if err <= err_old {
output <<= 1;
err_old = err;
} else {
break;
}
}
// I am of the mind that this assert is okay, at least until the input
// clock can be known at compile time.
let ad_clock = source_freq.integer() / output as u32;
assert!(400_000 <= ad_clock);
assert!(
ad_clock
<= match self.low_power {
false => 8_000_000,
true => 4_000_000,
}
);
self.clock_divisor = match output {
1 => ClockDivisor::_1,
2 => ClockDivisor::_2,
4 => ClockDivisor::_4,
8 => ClockDivisor::_8,
_ => ClockDivisor::_16,
}
}
/// Set the divisor directly. panics if divisor isn't supported by the
/// clock source.
///
/// TODO: Refactor to remove assert. Add Clock Source as a type state
pub fn set_divisor(&mut self, divisor: ClockDivisor) {
// divisor can't be 16 unless using the Bus clock
assert!(
!(!matches!(self.clock_source, AdcClocks::Bus) && matches!(divisor, ClockDivisor::_16))
);
self.clock_divisor = divisor;
}
/// Sets the clock source, panics if divisor isn't supported
///
/// TODO: Refactor to remove assert. Add Clock Source as a type state
pub fn set_clock_source(&mut self, clock: AdcClocks) {
// Panic if setting the clock to anything other than Bus if the divisor
// is set to 16
assert!(
!matches!(clock, AdcClocks::Bus) && matches!(self.clock_divisor, ClockDivisor::_16)
);
self.clock_source = clock;
}
}
impl Default for AdcConfig {
fn default() -> AdcConfig {
AdcConfig {
clock_source: AdcClocks::Bus,
clock_divisor: ClockDivisor::_1,
resolution: AdcResolution::_12bit,
sample_time: AdcSampleTime::Short,
low_power: false,
}
}
}
/// Clock types available to the Adc peripheral
///
/// Dividers will be chosen appropriately to suit requested clock rate.
pub enum AdcClocks {
/// Use the incoming Bus Clock
Bus,
/// jkl
External,
/// Available in Wait AND Stop Mode
Async,
}
/// This enum represents the availabe ADC resolutions
///
/// Regardless of resolution chosen, results are always right justified
#[repr(u8)]
pub enum AdcResolution {
/// 8 bit AD conversion mode
_8bit = 0,
/// 10 bit AD conversion mode
_10bit = 1,
/// 12 bit AD conversion mode
_12bit = 2,
}
/// Adc sample time
pub enum AdcSampleTime {
/// Sample for 3.5 ADC clock (ADCK) cycles.
Short = 0,
/// Sample for 23.5 ADC clock (ADCK) cycles.
///
/// Required for high impedence (>2k @ADCK > 4MHz, >5k @ ADCK < 4MHz)
/// inputs.
Long = 1,
}
/// Adc Clock Divisors
///
/// Note 1/16 divisor is only usable for the Bus clock
pub enum ClockDivisor {
/// Source / 1, No divison
_1 = 0,
/// Source / 2
_2 = 1,
/// Source / 4
_4 = 2,
/// Source / 8
_8 = 3,
/// Source / 16
_16 = 4,
}
/// Enabled state
pub struct Enabled;
/// Disabled state
pub struct Disabled;
impl Adc<Enabled> {
/// Poll to determine if ADC conversion is complete.
///
/// Note: This flag is cleared when the sampling mode is changed,
/// interrupts are enabled, [Adc::set_channel] is called, and when [Adc::result] is
/// called (including [Adc::try_result])
pub fn is_done(&self) -> bool {
self.peripheral.sc1.read().coco().bit()
}
/// Poll to determine if ADC conversion is underway
pub fn is_converting(&self) -> bool {
self.peripheral.sc2.read().adact().bit()
}
/// Grab the last ADC conversion result.
pub fn result(&self) -> u16 {
self.peripheral.r.read().adr().bits()
}
/// Poll for conversion completion, if done return the result.
pub fn try_result(&self) -> Option<u16> {
if self.is_done() {
Some(self.result())
} else {
None
}
}
/// Set ADC target channel.
///
/// In Single conversion mode (OneShot), setting the channel begins the conversion. In FIFO mode
/// the channel is added to the FIFO buffer.
///
/// Note: If the channel is changed while a conversion is in progress the
/// current conversion will be cancelled. If in FIFO mode, conversion will
/// resume once the FIFO channels are refilled.
pub fn set_channel<T: Channel<Adc<Enabled>, ID = u8>>(&self, _pin: &T) {
self.peripheral
.sc1
.modify(|_, w| unsafe { w.adch().bits(T::channel()) });
}
/// Set the ADC's configuration
pub fn configure(self, config: AdcConfig) -> Adc<Enabled> {
self.peripheral.sc3.modify(|_, w| {
use pac::adc::sc3::{ADICLK_A, ADIV_A, ADLSMP_A, MODE_A};
w.adiclk()
.variant(match config.clock_source {
AdcClocks::Bus =>
// If divisor is 16, use the Bus / 2 clock source, else use
// the 1:1 Bus clock source
{
match config.clock_divisor {
ClockDivisor::_16 => ADICLK_A::_01,
_ => ADICLK_A::_00,
}
}
AdcClocks::External => ADICLK_A::_10,
AdcClocks::Async => ADICLK_A::_11,
})
.mode()
.variant(match config.resolution {
AdcResolution::_8bit => MODE_A::_00,
AdcResolution::_10bit => MODE_A::_01,
AdcResolution::_12bit => MODE_A::_10,
})
.adlsmp()
.variant(match config.sample_time {
AdcSampleTime::Short => ADLSMP_A::_0,
AdcSampleTime::Long => ADLSMP_A::_1,
})
.adiv()
.variant(match config.clock_divisor {
ClockDivisor::_1 => ADIV_A::_00,
ClockDivisor::_2 => ADIV_A::_01,
ClockDivisor::_4 => ADIV_A::_10,
_ => ADIV_A::_11,
})
.adlpc()
.bit(config.low_power)
});
// It looks like SCGC has to be set before touching the peripheral
// at all, else hardfault. Go back later to confirm that if using external clock
// scgc can be cleared.
// w.adc().variant(match config.clock_source {
// AdcClocks::Bus => ADC_A::_1,
// _ => ADC_A::_0,
// })
Adc {
peripheral: self.peripheral,
_state: PhantomData,
onchip_channels: self.onchip_channels,
}
}
}
impl Adc<Disabled> {
/// Connects the bus clock to the adc via the SIM peripheral, allowing
/// read and write access to ADC registers.
///
/// Any attempt to access ADC registers while disabled results in a
/// HardFault, generated by hardware.
///
/// This also enables the bandgap voltage reference.
pub fn enable(self) -> Adc<Enabled> {
cortex_m::interrupt::free(|_| {
unsafe { &(*pac::SIM::ptr()) }.scgc.modify(|_, w| {
use pac::sim::scgc::ADC_A;
w.adc().variant(ADC_A::_1)
});
|
// Bandgap. Grab directly, Currently the bandgap isn't implemented
// in [system::PMC]. We will eventually have to pass in the pmc
// peripheral handle as a variable.
unsafe { &(*pac::PMC::ptr()) }
.spmsc1
.modify(|_, w| w.bgbe()._1());
});
Adc {
peripheral: self.peripheral,
_state: PhantomData,
onchip_channels: self.onchip_channels,
}
}
/// Set the ADC's configuration
///
/// This is a sugar method for calling [Adc<Disabled>::enable] followed by
/// [Adc<Enabled>::configure]
pub fn configure(self, config: AdcConfig) -> Adc<Enabled> {
self.enable().configure(config)
}
}
impl<Mode> Adc<Mode> {
/// Not Implemented
pub fn into_interrupt(self) -> Adc<Mode> {
unimplemented!("Interrupt is not yet implemented");
// Adc::<Mode> {
// peripheral: self.peripheral,
// _state: PhantomData,
// onchip_channels: self.onchip_channels,
// }
}
/// Not Implemented
pub fn into_fifo(self, _depth: u8) -> Adc<Mode> {
// self.peripheral
// .sc4
// .modify(|_r, w| w.afdep().bits(depth & 0x7));
// Adc::<Mode> {
// peripheral: self.peripheral,
// _state: PhantomData,
// onchip_channels: self.onchip_channels,
// }
unimplemented!("FIFO is not yet implemented");
}
/// Not Implemented
pub fn into_continuous(self) -> Adc<Mode> {
unimplemented!("Continuous Conversion mode not yet implemented");
}
}
impl OnChipChannels {
/// Request an instance of an on-chip [Vss] channel.
pub fn vss(&mut self) -> Result<Analog<Vss<Input>>, Error> {
self.vss.take().ok_or(Error::Moved)
}
/// Return the instance of [Vss]
pub fn return_vss(&mut self, inst: Analog<Vss<Input>>) {
self.vss.replace(inst);
}
/// Try to grab an instance of the onchip [TempSense] channel.
pub fn tempsense(&mut self) -> Result<Analog<TempSense<Input>>, Error> {
self.temp_sense.take().ok_or(Error::Moved)
}
/// Return the instance of [TempSense]
pub fn return_tempsense(&mut self, inst: Analog<TempSense<Input>>) {
self.temp_sense.replace(inst);
}
/// Try to grab an instance of the onchip [Bandgap] channel.
///
/// The bandgap reference is a fixed 1.16V (nom, Factory trimmed to +/-
/// 0.02V at Vdd=5.0 at 125C) signal that is available to the ADC Module.
/// It can be used as a voltage reference for the ACMP and as an [Analog]
/// channel that can be used to (roughly) check the VDD voltage
pub fn bandgap(&mut self) -> Result<Analog<Bandgap<Input>>, Error> {
self.bandgap.take().ok_or(Error::Moved)
}
/// Return the instance of [Bandgap]
pub fn return_bandgap(&mut self, inst: Analog<Bandgap<Input>>) {
self.bandgap.replace(inst);
}
/// Try to grab an instance of the onchip Voltage Reference High ([VrefH]) channel.
pub fn vref_h(&mut self) -> Result<Analog<VrefH<Input>>, Error> {
self.vref_h.take().ok_or(Error::Moved)
}
/// Return the instance of [VrefH]
pub fn return_vref_h(&mut self, inst: Analog<VrefH<Input>>) {
self.vref_h.replace(inst);
}
/// Try to grab an instance of the onchip Voltage Reference Low ([VrefL]) channel.
pub fn vref_l(&mut self) -> Result<Analog<VrefL<Input>>, Error> {
self.vref_l.take().ok_or(Error::Moved)
}
/// Return the instance of [VrefL]
pub fn return_vref_l(&mut self, inst: Analog<VrefL<Input>>) {
self.vref_l.replace(inst);
}
/// Grab a [DummyDisable] instance. Multiple Instances possible.
pub fn dummy_disable(&self) -> Analog<DummyDisable<Input>> {
Analog {
pin: DummyDisable::<Input> { _mode: PhantomData },
}
}
}
/// Holds On-Chip ADC Channel inputs and provides an interface to grab and return them.
// These have to have the Input dummy type to allow them to have the Channel
// trait.
pub struct OnChipChannels {
vss: Option<Analog<Vss<Input>>>,
temp_sense: Option<Analog<TempSense<Input>>>,
bandgap: Option<Analog<Bandgap<Input>>>,
vref_h: Option<Analog<VrefH<Input>>>,
vref_l: Option<Analog<VrefL<Input>>>,
}
/// Dummy type state for on-chip ADC input channels
pub struct Input;
/// Adc Input Channel, measures ground (should be 0?)
pub struct Vss<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, measures internal temperature sensor
pub struct TempSense<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Bandgap internal voltage reference
pub struct Bandgap<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Voltage Reference, High
pub struct VrefH<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Voltage Reference, Low
pub struct VrefL<Input> {
_mode: PhantomData<Input>,
}
/// Dummy Channel that temporarily disables the Adc Module.
pub struct DummyDisable<Input> {
_mode: PhantomData<Input>,
}
macro_rules! adc_input_channels {
( $($Chan:expr => $Pin:ident),+ $(,)*) => {
$(
impl<OldMode> Channel<Adc<Enabled>> for Analog<$Pin<OldMode>> {
type ID = u8;
fn channel() -> u8 { $Chan }
}
)+
};
}
use crate::gpio::{gpioa::*, gpiob::*};
adc_input_channels! (
0_u8 => PTA0,
1_u8 => PTA1,
2_u8 => PTA6,
3_u8 => PTA7,
4_u8 => PTB0,
5_u8 => PTB1,
6_u8 => PTB2 | // Don't start a conversion (set channel to DummyDisable)
self.peripheral.sc1.modify(|_, w| w.adch()._11111()); | random_line_split |
adc.rs | MCU.
//!
//! ## Clocking
//!
//! The ADC requires a clock signal (ADCK), which is generated from the bus
//! clock, the bus clock divided by 2, the output of the OSC peripheral
//! (OSC_OUT), or an internal asynchronous clock, which, when selected,
//! operates in wait and stop modes. With any of these clock sources a
//! multi-value divider is provided to further divide the incoming clock by 1
//! (i.e. 1:1), 2, 4, or 8.
//!
//! The clock frequency must fall within 400kHz to 8MHz (4MHz in low power
//! mode), This is the same for all KEA MCUs. Ideally, the HAL will only
//! present valid options, but that is not yet implemented (pending clocks
//! improvements to output frequencies). For now you are trusted to input the
//! correct frequency.
//!
//! *Note:* When using the FIFO mode with FIFO scan mode disabled, the bus
//! clock must be faster than half the ADC clock (ADCK). Bus clock >= ADCK / 2.
//!
//! ## Pin Control
//!
//! This functionality is implemented in the GPIO module. See [Analog]
//! for details.
//!
//! ## Conversion Width
//!
//! The ADC can be run in 8, 10, or 12 bit modes. These modes are enumerated in
//! [AdcResolution].
//!
//! ## Hardware Trigger
//!
//! The ADC conversions can be started by a hardware trigger. This is not
//! implemented in all KEA chips, so implementation here will be Delayed. Use
//! the PAC. Enable is ADC_SC2\[ADTRG\] = 1, and trigger is the ADHWT source.
//!
//! ## Usage
//!
//! ### AdcConfig struct
//!
//! [AdcConfig] offers public fields to allow for creation in-place. The
//! [AdcConfig::calculate_divisor] method allows the user to specify the
//! desired Adc Clock frequency (given the clock source frequency). The clock
//! divider which gets the closest to that frequency is chosen.
//!
//! The AdcConfig structure also implements the [Default] trait.
//!
//! ```rust
//! let config: AdcConfig = Default::default();
//!
//! config.calculate_divisor(20_u32.MHz(), 2_u32.MHz());
//! assert!(matches!(config.clock_divisor, ClockDivisor::_8));
//! ```
use crate::hal::adc::{Channel, OneShot};
use crate::{pac::ADC, HALExt};
use core::{convert::Infallible, marker::PhantomData};
use embedded_time::rate::*;
/// Error Enumeration for this module
#[derive(Debug)]
pub enum Error {
/// The Channel has already been moved
Moved,
}
/// Analog type state for a GPIO pin.
///
/// This mode "gives" the pin to the ADC hardware peripheral.
/// The ADC Peripheral can take the GPIO pins in any state. The Peripheral will
/// reconfigure the pin to turn off any output drivers, disable input buffers
/// (reading the pin after configuring as analog will return a zero), and
/// disable the pullup. Electrically, an Analog pin that is not currently under
/// conversion is effectively HighImpedence.
///
/// Once a pin is released from the ADC, it will return to its previous state.
/// The previous state includes output enabled, input enabled, pullup enabled,
/// and level (for outputs). Note to accomplish this the pin implements the
/// outof_analog method, which is semantically different from the other type
/// states.
///
/// For example, [crate::gpio::gpioa::PTA0] is configured to be a Output that is set high is
/// converted into the analog mode with the [crate::gpio::gpioa::PTA0::into_analog] method.
/// Once measurements from that pin are completed it will be returned to an
/// Output that is set high by calling the [Analog::outof_analog] method.
///
/// ```rust
/// let pta0 = gpioa.pta0.into_push_pull_output();
/// pta0.set_high();
/// let mut pta0 = pta0.into_analog(); // pta0 is hi-Z
/// let value = adc.read(&mut pta0).unwrap_or(0);
/// let pta0 = pta0.outof_analog(); // pta0 is push-pull output, set high.
/// ```
///
/// Note: This is a hardware feature that requires effectively no clock cycles
/// to complete. "Manually" reconfiguring the pins to HighImpedence before
/// calling into_analog() is discouraged, but it would not hurt anything.
pub struct Analog<Pin> {
pin: Pin,
}
/// Interface for ADC Peripheral.
///
/// Returned by calling [HALExt::split] on the pac [ADC] structure. Holds state
/// of peripheral.
pub struct Adc<State> {
peripheral: ADC,
_state: PhantomData<State>,
/// Contains the On-Chip ADC Channels, like the MCU's temperature sensor.
pub onchip_channels: OnChipChannels,
}
impl HALExt for ADC {
type T = Adc<Disabled>;
fn split(self) -> Adc<Disabled> {
Adc {
peripheral: self,
_state: PhantomData,
onchip_channels: OnChipChannels {
vss: Some(Analog {
pin: Vss::<Input> { _mode: PhantomData },
}),
temp_sense: Some(Analog {
pin: TempSense::<Input> { _mode: PhantomData },
}),
bandgap: Some(Analog {
pin: Bandgap::<Input> { _mode: PhantomData },
}),
vref_h: Some(Analog {
pin: VrefH::<Input> { _mode: PhantomData },
}),
vref_l: Some(Analog {
pin: VrefL::<Input> { _mode: PhantomData },
}),
},
}
}
}
/// Configuration struct for Adc peripheral.
pub struct AdcConfig {
/// Determines the clock source for the ADC peripheral
///
/// Default is [AdcClocks::Bus]
pub clock_source: AdcClocks,
/// Divides the clock source to get the ADC clock into it's usable range of
/// 400kHz - 8MHz (4MHz in low power mode).
///
/// Default is [ClockDivisor::_1] (no divison)
pub clock_divisor: ClockDivisor,
/// Set the resolution of ADC conversion
///
/// Default is [AdcResolution::_8bit]
pub resolution: AdcResolution,
/// Set ADC sample time.
///
/// Default is [AdcSampleTime::Short]
pub sample_time: AdcSampleTime,
/// Set low power mode
///
/// Default is false.
pub low_power: bool,
}
impl AdcConfig {
/// Calculate the ADC clock divisor
///
/// Uses the current clock source and clock frequency to determine
/// the best divisor to use in order to have minimal error between
/// the ADC clock rate and the desired ADC clock rate.
///
/// Note: This relies on trustworthy values for source_freq and valid
/// values for req_adc_freq. In the future this should know or
/// determine what the current clock frequency is instead of relying
/// on the user to provide it.
pub fn calculate_divisor(&mut self, source_freq: Hertz, req_adc_freq: Hertz) {
let denom: u8 = (source_freq.integer() / req_adc_freq.integer()) as u8;
let mut output: u8 = 1;
let mut err: i8 = (denom - output) as i8;
let mut err_old: i8 = err;
let max_divisor = match self.clock_source {
AdcClocks::Bus => 16,
_ => 8,
};
while output < max_divisor {
err = (denom - (output << 1)) as i8;
if err.is_negative() {
err = err.abs();
}
if err <= err_old {
output <<= 1;
err_old = err;
} else {
break;
}
}
// I am of the mind that this assert is okay, at least until the input
// clock can be known at compile time.
let ad_clock = source_freq.integer() / output as u32;
assert!(400_000 <= ad_clock);
assert!(
ad_clock
<= match self.low_power {
false => 8_000_000,
true => 4_000_000,
}
);
self.clock_divisor = match output {
1 => ClockDivisor::_1,
2 => ClockDivisor::_2,
4 => ClockDivisor::_4,
8 => ClockDivisor::_8,
_ => ClockDivisor::_16,
}
}
/// Set the divisor directly. panics if divisor isn't supported by the
/// clock source.
///
/// TODO: Refactor to remove assert. Add Clock Source as a type state
pub fn set_divisor(&mut self, divisor: ClockDivisor) {
// divisor can't be 16 unless using the Bus clock
assert!(
!(!matches!(self.clock_source, AdcClocks::Bus) && matches!(divisor, ClockDivisor::_16))
);
self.clock_divisor = divisor;
}
/// Sets the clock source, panics if divisor isn't supported
///
/// TODO: Refactor to remove assert. Add Clock Source as a type state
pub fn set_clock_source(&mut self, clock: AdcClocks) {
// Panic if setting the clock to anything other than Bus if the divisor
// is set to 16
assert!(
!matches!(clock, AdcClocks::Bus) && matches!(self.clock_divisor, ClockDivisor::_16)
);
self.clock_source = clock;
}
}
impl Default for AdcConfig {
fn default() -> AdcConfig {
AdcConfig {
clock_source: AdcClocks::Bus,
clock_divisor: ClockDivisor::_1,
resolution: AdcResolution::_12bit,
sample_time: AdcSampleTime::Short,
low_power: false,
}
}
}
/// Clock types available to the Adc peripheral
///
/// Dividers will be chosen appropriately to suit requested clock rate.
pub enum AdcClocks {
/// Use the incoming Bus Clock
Bus,
/// jkl
External,
/// Available in Wait AND Stop Mode
Async,
}
/// This enum represents the availabe ADC resolutions
///
/// Regardless of resolution chosen, results are always right justified
#[repr(u8)]
pub enum AdcResolution {
/// 8 bit AD conversion mode
_8bit = 0,
/// 10 bit AD conversion mode
_10bit = 1,
/// 12 bit AD conversion mode
_12bit = 2,
}
/// Adc sample time
pub enum AdcSampleTime {
/// Sample for 3.5 ADC clock (ADCK) cycles.
Short = 0,
/// Sample for 23.5 ADC clock (ADCK) cycles.
///
/// Required for high impedence (>2k @ADCK > 4MHz, >5k @ ADCK < 4MHz)
/// inputs.
Long = 1,
}
/// Adc Clock Divisors
///
/// Note 1/16 divisor is only usable for the Bus clock
pub enum ClockDivisor {
/// Source / 1, No divison
_1 = 0,
/// Source / 2
_2 = 1,
/// Source / 4
_4 = 2,
/// Source / 8
_8 = 3,
/// Source / 16
_16 = 4,
}
/// Enabled state
pub struct Enabled;
/// Disabled state
pub struct Disabled;
impl Adc<Enabled> {
/// Poll to determine if ADC conversion is complete.
///
/// Note: This flag is cleared when the sampling mode is changed,
/// interrupts are enabled, [Adc::set_channel] is called, and when [Adc::result] is
/// called (including [Adc::try_result])
pub fn is_done(&self) -> bool {
self.peripheral.sc1.read().coco().bit()
}
/// Poll to determine if ADC conversion is underway
pub fn is_converting(&self) -> bool {
self.peripheral.sc2.read().adact().bit()
}
/// Grab the last ADC conversion result.
pub fn result(&self) -> u16 {
self.peripheral.r.read().adr().bits()
}
/// Poll for conversion completion, if done return the result.
pub fn try_result(&self) -> Option<u16> {
if self.is_done() {
Some(self.result())
} else {
None
}
}
/// Set ADC target channel.
///
/// In Single conversion mode (OneShot), setting the channel begins the conversion. In FIFO mode
/// the channel is added to the FIFO buffer.
///
/// Note: If the channel is changed while a conversion is in progress the
/// current conversion will be cancelled. If in FIFO mode, conversion will
/// resume once the FIFO channels are refilled.
pub fn set_channel<T: Channel<Adc<Enabled>, ID = u8>>(&self, _pin: &T) {
self.peripheral
.sc1
.modify(|_, w| unsafe { w.adch().bits(T::channel()) });
}
/// Set the ADC's configuration
pub fn configure(self, config: AdcConfig) -> Adc<Enabled> {
self.peripheral.sc3.modify(|_, w| {
use pac::adc::sc3::{ADICLK_A, ADIV_A, ADLSMP_A, MODE_A};
w.adiclk()
.variant(match config.clock_source {
AdcClocks::Bus =>
// If divisor is 16, use the Bus / 2 clock source, else use
// the 1:1 Bus clock source
{
match config.clock_divisor {
ClockDivisor::_16 => ADICLK_A::_01,
_ => ADICLK_A::_00,
}
}
AdcClocks::External => ADICLK_A::_10,
AdcClocks::Async => ADICLK_A::_11,
})
.mode()
.variant(match config.resolution {
AdcResolution::_8bit => MODE_A::_00,
AdcResolution::_10bit => MODE_A::_01,
AdcResolution::_12bit => MODE_A::_10,
})
.adlsmp()
.variant(match config.sample_time {
AdcSampleTime::Short => ADLSMP_A::_0,
AdcSampleTime::Long => ADLSMP_A::_1,
})
.adiv()
.variant(match config.clock_divisor {
ClockDivisor::_1 => ADIV_A::_00,
ClockDivisor::_2 => ADIV_A::_01,
ClockDivisor::_4 => ADIV_A::_10,
_ => ADIV_A::_11,
})
.adlpc()
.bit(config.low_power)
});
// It looks like SCGC has to be set before touching the peripheral
// at all, else hardfault. Go back later to confirm that if using external clock
// scgc can be cleared.
// w.adc().variant(match config.clock_source {
// AdcClocks::Bus => ADC_A::_1,
// _ => ADC_A::_0,
// })
Adc {
peripheral: self.peripheral,
_state: PhantomData,
onchip_channels: self.onchip_channels,
}
}
}
impl Adc<Disabled> {
/// Connects the bus clock to the adc via the SIM peripheral, allowing
/// read and write access to ADC registers.
///
/// Any attempt to access ADC registers while disabled results in a
/// HardFault, generated by hardware.
///
/// This also enables the bandgap voltage reference.
pub fn enable(self) -> Adc<Enabled> {
cortex_m::interrupt::free(|_| {
unsafe { &(*pac::SIM::ptr()) }.scgc.modify(|_, w| {
use pac::sim::scgc::ADC_A;
w.adc().variant(ADC_A::_1)
});
// Don't start a conversion (set channel to DummyDisable)
self.peripheral.sc1.modify(|_, w| w.adch()._11111());
// Bandgap. Grab directly, Currently the bandgap isn't implemented
// in [system::PMC]. We will eventually have to pass in the pmc
// peripheral handle as a variable.
unsafe { &(*pac::PMC::ptr()) }
.spmsc1
.modify(|_, w| w.bgbe()._1());
});
Adc {
peripheral: self.peripheral,
_state: PhantomData,
onchip_channels: self.onchip_channels,
}
}
/// Set the ADC's configuration
///
/// This is a sugar method for calling [Adc<Disabled>::enable] followed by
/// [Adc<Enabled>::configure]
pub fn configure(self, config: AdcConfig) -> Adc<Enabled> {
self.enable().configure(config)
}
}
impl<Mode> Adc<Mode> {
/// Not Implemented
pub fn into_interrupt(self) -> Adc<Mode> {
unimplemented!("Interrupt is not yet implemented");
// Adc::<Mode> {
// peripheral: self.peripheral,
// _state: PhantomData,
// onchip_channels: self.onchip_channels,
// }
}
/// Not Implemented
pub fn into_fifo(self, _depth: u8) -> Adc<Mode> {
// self.peripheral
// .sc4
// .modify(|_r, w| w.afdep().bits(depth & 0x7));
// Adc::<Mode> {
// peripheral: self.peripheral,
// _state: PhantomData,
// onchip_channels: self.onchip_channels,
// }
unimplemented!("FIFO is not yet implemented");
}
/// Not Implemented
pub fn into_continuous(self) -> Adc<Mode> {
unimplemented!("Continuous Conversion mode not yet implemented");
}
}
impl OnChipChannels {
/// Request an instance of an on-chip [Vss] channel.
pub fn vss(&mut self) -> Result<Analog<Vss<Input>>, Error> {
self.vss.take().ok_or(Error::Moved)
}
/// Return the instance of [Vss]
pub fn return_vss(&mut self, inst: Analog<Vss<Input>>) {
self.vss.replace(inst);
}
/// Try to grab an instance of the onchip [TempSense] channel.
pub fn tempsense(&mut self) -> Result<Analog<TempSense<Input>>, Error> {
self.temp_sense.take().ok_or(Error::Moved)
}
/// Return the instance of [TempSense]
pub fn return_tempsense(&mut self, inst: Analog<TempSense<Input>>) {
self.temp_sense.replace(inst);
}
/// Try to grab an instance of the onchip [Bandgap] channel.
///
/// The bandgap reference is a fixed 1.16V (nom, Factory trimmed to +/-
/// 0.02V at Vdd=5.0 at 125C) signal that is available to the ADC Module.
/// It can be used as a voltage reference for the ACMP and as an [Analog]
/// channel that can be used to (roughly) check the VDD voltage
pub fn bandgap(&mut self) -> Result<Analog<Bandgap<Input>>, Error> {
self.bandgap.take().ok_or(Error::Moved)
}
/// Return the instance of [Bandgap]
pub fn return_bandgap(&mut self, inst: Analog<Bandgap<Input>>) |
/// Try to grab an instance of the onchip Voltage Reference High ([VrefH]) channel.
pub fn vref_h(&mut self) -> Result<Analog<VrefH<Input>>, Error> {
self.vref_h.take().ok_or(Error::Moved)
}
/// Return the instance of [VrefH]
pub fn return_vref_h(&mut self, inst: Analog<VrefH<Input>>) {
self.vref_h.replace(inst);
}
/// Try to grab an instance of the onchip Voltage Reference Low ([VrefL]) channel.
pub fn vref_l(&mut self) -> Result<Analog<VrefL<Input>>, Error> {
self.vref_l.take().ok_or(Error::Moved)
}
/// Return the instance of [VrefL]
pub fn return_vref_l(&mut self, inst: Analog<VrefL<Input>>) {
self.vref_l.replace(inst);
}
/// Grab a [DummyDisable] instance. Multiple Instances possible.
pub fn dummy_disable(&self) -> Analog<DummyDisable<Input>> {
Analog {
pin: DummyDisable::<Input> { _mode: PhantomData },
}
}
}
/// Holds On-Chip ADC Channel inputs and provides an interface to grab and return them.
// These have to have the Input dummy type to allow them to have the Channel
// trait.
pub struct OnChipChannels {
vss: Option<Analog<Vss<Input>>>,
temp_sense: Option<Analog<TempSense<Input>>>,
bandgap: Option<Analog<Bandgap<Input>>>,
vref_h: Option<Analog<VrefH<Input>>>,
vref_l: Option<Analog<VrefL<Input>>>,
}
/// Dummy type state for on-chip ADC input channels
pub struct Input;
/// Adc Input Channel, measures ground (should be 0?)
pub struct Vss<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, measures internal temperature sensor
pub struct TempSense<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Bandgap internal voltage reference
pub struct Bandgap<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Voltage Reference, High
pub struct VrefH<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Voltage Reference, Low
pub struct VrefL<Input> {
_mode: PhantomData<Input>,
}
/// Dummy Channel that temporarily disables the Adc Module.
pub struct DummyDisable<Input> {
_mode: PhantomData<Input>,
}
macro_rules! adc_input_channels {
( $($Chan:expr => $Pin:ident),+ $(,)*) => {
$(
impl<OldMode> Channel<Adc<Enabled>> for Analog<$Pin<OldMode>> {
type ID = u8;
fn channel() -> u8 { $Chan }
}
)+
};
}
use crate::gpio::{gpioa::*, gpiob::*};
adc_input_channels! (
0_u8 => PTA0,
1_u8 => PTA1,
2_u8 => PTA6,
3_u8 => PTA7,
4_u8 => PTB0,
5_u8 => PTB1,
6_u8 => P | {
self.bandgap.replace(inst);
} | identifier_body |
adc.rs | MCU.
//!
//! ## Clocking
//!
//! The ADC requires a clock signal (ADCK), which is generated from the bus
//! clock, the bus clock divided by 2, the output of the OSC peripheral
//! (OSC_OUT), or an internal asynchronous clock, which, when selected,
//! operates in wait and stop modes. With any of these clock sources a
//! multi-value divider is provided to further divide the incoming clock by 1
//! (i.e. 1:1), 2, 4, or 8.
//!
//! The clock frequency must fall within 400kHz to 8MHz (4MHz in low power
//! mode), This is the same for all KEA MCUs. Ideally, the HAL will only
//! present valid options, but that is not yet implemented (pending clocks
//! improvements to output frequencies). For now you are trusted to input the
//! correct frequency.
//!
//! *Note:* When using the FIFO mode with FIFO scan mode disabled, the bus
//! clock must be faster than half the ADC clock (ADCK). Bus clock >= ADCK / 2.
//!
//! ## Pin Control
//!
//! This functionality is implemented in the GPIO module. See [Analog]
//! for details.
//!
//! ## Conversion Width
//!
//! The ADC can be run in 8, 10, or 12 bit modes. These modes are enumerated in
//! [AdcResolution].
//!
//! ## Hardware Trigger
//!
//! The ADC conversions can be started by a hardware trigger. This is not
//! implemented in all KEA chips, so implementation here will be Delayed. Use
//! the PAC. Enable is ADC_SC2\[ADTRG\] = 1, and trigger is the ADHWT source.
//!
//! ## Usage
//!
//! ### AdcConfig struct
//!
//! [AdcConfig] offers public fields to allow for creation in-place. The
//! [AdcConfig::calculate_divisor] method allows the user to specify the
//! desired Adc Clock frequency (given the clock source frequency). The clock
//! divider which gets the closest to that frequency is chosen.
//!
//! The AdcConfig structure also implements the [Default] trait.
//!
//! ```rust
//! let config: AdcConfig = Default::default();
//!
//! config.calculate_divisor(20_u32.MHz(), 2_u32.MHz());
//! assert!(matches!(config.clock_divisor, ClockDivisor::_8));
//! ```
use crate::hal::adc::{Channel, OneShot};
use crate::{pac::ADC, HALExt};
use core::{convert::Infallible, marker::PhantomData};
use embedded_time::rate::*;
/// Error Enumeration for this module
#[derive(Debug)]
pub enum Error {
/// The Channel has already been moved
Moved,
}
/// Analog type state for a GPIO pin.
///
/// This mode "gives" the pin to the ADC hardware peripheral.
/// The ADC Peripheral can take the GPIO pins in any state. The Peripheral will
/// reconfigure the pin to turn off any output drivers, disable input buffers
/// (reading the pin after configuring as analog will return a zero), and
/// disable the pullup. Electrically, an Analog pin that is not currently under
/// conversion is effectively HighImpedence.
///
/// Once a pin is released from the ADC, it will return to its previous state.
/// The previous state includes output enabled, input enabled, pullup enabled,
/// and level (for outputs). Note to accomplish this the pin implements the
/// outof_analog method, which is semantically different from the other type
/// states.
///
/// For example, [crate::gpio::gpioa::PTA0] is configured to be a Output that is set high is
/// converted into the analog mode with the [crate::gpio::gpioa::PTA0::into_analog] method.
/// Once measurements from that pin are completed it will be returned to an
/// Output that is set high by calling the [Analog::outof_analog] method.
///
/// ```rust
/// let pta0 = gpioa.pta0.into_push_pull_output();
/// pta0.set_high();
/// let mut pta0 = pta0.into_analog(); // pta0 is hi-Z
/// let value = adc.read(&mut pta0).unwrap_or(0);
/// let pta0 = pta0.outof_analog(); // pta0 is push-pull output, set high.
/// ```
///
/// Note: This is a hardware feature that requires effectively no clock cycles
/// to complete. "Manually" reconfiguring the pins to HighImpedence before
/// calling into_analog() is discouraged, but it would not hurt anything.
pub struct Analog<Pin> {
pin: Pin,
}
/// Interface for ADC Peripheral.
///
/// Returned by calling [HALExt::split] on the pac [ADC] structure. Holds state
/// of peripheral.
pub struct Adc<State> {
peripheral: ADC,
_state: PhantomData<State>,
/// Contains the On-Chip ADC Channels, like the MCU's temperature sensor.
pub onchip_channels: OnChipChannels,
}
impl HALExt for ADC {
type T = Adc<Disabled>;
fn split(self) -> Adc<Disabled> {
Adc {
peripheral: self,
_state: PhantomData,
onchip_channels: OnChipChannels {
vss: Some(Analog {
pin: Vss::<Input> { _mode: PhantomData },
}),
temp_sense: Some(Analog {
pin: TempSense::<Input> { _mode: PhantomData },
}),
bandgap: Some(Analog {
pin: Bandgap::<Input> { _mode: PhantomData },
}),
vref_h: Some(Analog {
pin: VrefH::<Input> { _mode: PhantomData },
}),
vref_l: Some(Analog {
pin: VrefL::<Input> { _mode: PhantomData },
}),
},
}
}
}
/// Configuration struct for Adc peripheral.
pub struct AdcConfig {
/// Determines the clock source for the ADC peripheral
///
/// Default is [AdcClocks::Bus]
pub clock_source: AdcClocks,
/// Divides the clock source to get the ADC clock into it's usable range of
/// 400kHz - 8MHz (4MHz in low power mode).
///
/// Default is [ClockDivisor::_1] (no divison)
pub clock_divisor: ClockDivisor,
/// Set the resolution of ADC conversion
///
/// Default is [AdcResolution::_8bit]
pub resolution: AdcResolution,
/// Set ADC sample time.
///
/// Default is [AdcSampleTime::Short]
pub sample_time: AdcSampleTime,
/// Set low power mode
///
/// Default is false.
pub low_power: bool,
}
impl AdcConfig {
/// Calculate the ADC clock divisor
///
/// Uses the current clock source and clock frequency to determine
/// the best divisor to use in order to have minimal error between
/// the ADC clock rate and the desired ADC clock rate.
///
/// Note: This relies on trustworthy values for source_freq and valid
/// values for req_adc_freq. In the future this should know or
/// determine what the current clock frequency is instead of relying
/// on the user to provide it.
pub fn calculate_divisor(&mut self, source_freq: Hertz, req_adc_freq: Hertz) {
let denom: u8 = (source_freq.integer() / req_adc_freq.integer()) as u8;
let mut output: u8 = 1;
let mut err: i8 = (denom - output) as i8;
let mut err_old: i8 = err;
let max_divisor = match self.clock_source {
AdcClocks::Bus => 16,
_ => 8,
};
while output < max_divisor {
err = (denom - (output << 1)) as i8;
if err.is_negative() {
err = err.abs();
}
if err <= err_old {
output <<= 1;
err_old = err;
} else {
break;
}
}
// I am of the mind that this assert is okay, at least until the input
// clock can be known at compile time.
let ad_clock = source_freq.integer() / output as u32;
assert!(400_000 <= ad_clock);
assert!(
ad_clock
<= match self.low_power {
false => 8_000_000,
true => 4_000_000,
}
);
self.clock_divisor = match output {
1 => ClockDivisor::_1,
2 => ClockDivisor::_2,
4 => ClockDivisor::_4,
8 => ClockDivisor::_8,
_ => ClockDivisor::_16,
}
}
/// Set the divisor directly. panics if divisor isn't supported by the
/// clock source.
///
/// TODO: Refactor to remove assert. Add Clock Source as a type state
pub fn set_divisor(&mut self, divisor: ClockDivisor) {
// divisor can't be 16 unless using the Bus clock
assert!(
!(!matches!(self.clock_source, AdcClocks::Bus) && matches!(divisor, ClockDivisor::_16))
);
self.clock_divisor = divisor;
}
/// Sets the clock source, panics if divisor isn't supported
///
/// TODO: Refactor to remove assert. Add Clock Source as a type state
pub fn set_clock_source(&mut self, clock: AdcClocks) {
// Panic if setting the clock to anything other than Bus if the divisor
// is set to 16
assert!(
!matches!(clock, AdcClocks::Bus) && matches!(self.clock_divisor, ClockDivisor::_16)
);
self.clock_source = clock;
}
}
impl Default for AdcConfig {
fn default() -> AdcConfig {
AdcConfig {
clock_source: AdcClocks::Bus,
clock_divisor: ClockDivisor::_1,
resolution: AdcResolution::_12bit,
sample_time: AdcSampleTime::Short,
low_power: false,
}
}
}
/// Clock types available to the Adc peripheral
///
/// Dividers will be chosen appropriately to suit requested clock rate.
pub enum AdcClocks {
/// Use the incoming Bus Clock
Bus,
/// jkl
External,
/// Available in Wait AND Stop Mode
Async,
}
/// This enum represents the availabe ADC resolutions
///
/// Regardless of resolution chosen, results are always right justified
#[repr(u8)]
pub enum AdcResolution {
/// 8 bit AD conversion mode
_8bit = 0,
/// 10 bit AD conversion mode
_10bit = 1,
/// 12 bit AD conversion mode
_12bit = 2,
}
/// Adc sample time
pub enum AdcSampleTime {
/// Sample for 3.5 ADC clock (ADCK) cycles.
Short = 0,
/// Sample for 23.5 ADC clock (ADCK) cycles.
///
/// Required for high impedence (>2k @ADCK > 4MHz, >5k @ ADCK < 4MHz)
/// inputs.
Long = 1,
}
/// Adc Clock Divisors
///
/// Note 1/16 divisor is only usable for the Bus clock
pub enum ClockDivisor {
/// Source / 1, No divison
_1 = 0,
/// Source / 2
_2 = 1,
/// Source / 4
_4 = 2,
/// Source / 8
_8 = 3,
/// Source / 16
_16 = 4,
}
/// Enabled state
pub struct Enabled;
/// Disabled state
pub struct Disabled;
impl Adc<Enabled> {
/// Poll to determine if ADC conversion is complete.
///
/// Note: This flag is cleared when the sampling mode is changed,
/// interrupts are enabled, [Adc::set_channel] is called, and when [Adc::result] is
/// called (including [Adc::try_result])
pub fn is_done(&self) -> bool {
self.peripheral.sc1.read().coco().bit()
}
/// Poll to determine if ADC conversion is underway
pub fn is_converting(&self) -> bool {
self.peripheral.sc2.read().adact().bit()
}
/// Grab the last ADC conversion result.
pub fn result(&self) -> u16 {
self.peripheral.r.read().adr().bits()
}
/// Poll for conversion completion, if done return the result.
pub fn try_result(&self) -> Option<u16> {
if self.is_done() {
Some(self.result())
} else |
}
/// Set ADC target channel.
///
/// In Single conversion mode (OneShot), setting the channel begins the conversion. In FIFO mode
/// the channel is added to the FIFO buffer.
///
/// Note: If the channel is changed while a conversion is in progress the
/// current conversion will be cancelled. If in FIFO mode, conversion will
/// resume once the FIFO channels are refilled.
pub fn set_channel<T: Channel<Adc<Enabled>, ID = u8>>(&self, _pin: &T) {
self.peripheral
.sc1
.modify(|_, w| unsafe { w.adch().bits(T::channel()) });
}
/// Set the ADC's configuration
pub fn configure(self, config: AdcConfig) -> Adc<Enabled> {
self.peripheral.sc3.modify(|_, w| {
use pac::adc::sc3::{ADICLK_A, ADIV_A, ADLSMP_A, MODE_A};
w.adiclk()
.variant(match config.clock_source {
AdcClocks::Bus =>
// If divisor is 16, use the Bus / 2 clock source, else use
// the 1:1 Bus clock source
{
match config.clock_divisor {
ClockDivisor::_16 => ADICLK_A::_01,
_ => ADICLK_A::_00,
}
}
AdcClocks::External => ADICLK_A::_10,
AdcClocks::Async => ADICLK_A::_11,
})
.mode()
.variant(match config.resolution {
AdcResolution::_8bit => MODE_A::_00,
AdcResolution::_10bit => MODE_A::_01,
AdcResolution::_12bit => MODE_A::_10,
})
.adlsmp()
.variant(match config.sample_time {
AdcSampleTime::Short => ADLSMP_A::_0,
AdcSampleTime::Long => ADLSMP_A::_1,
})
.adiv()
.variant(match config.clock_divisor {
ClockDivisor::_1 => ADIV_A::_00,
ClockDivisor::_2 => ADIV_A::_01,
ClockDivisor::_4 => ADIV_A::_10,
_ => ADIV_A::_11,
})
.adlpc()
.bit(config.low_power)
});
// It looks like SCGC has to be set before touching the peripheral
// at all, else hardfault. Go back later to confirm that if using external clock
// scgc can be cleared.
// w.adc().variant(match config.clock_source {
// AdcClocks::Bus => ADC_A::_1,
// _ => ADC_A::_0,
// })
Adc {
peripheral: self.peripheral,
_state: PhantomData,
onchip_channels: self.onchip_channels,
}
}
}
impl Adc<Disabled> {
/// Connects the bus clock to the adc via the SIM peripheral, allowing
/// read and write access to ADC registers.
///
/// Any attempt to access ADC registers while disabled results in a
/// HardFault, generated by hardware.
///
/// This also enables the bandgap voltage reference.
pub fn enable(self) -> Adc<Enabled> {
cortex_m::interrupt::free(|_| {
unsafe { &(*pac::SIM::ptr()) }.scgc.modify(|_, w| {
use pac::sim::scgc::ADC_A;
w.adc().variant(ADC_A::_1)
});
// Don't start a conversion (set channel to DummyDisable)
self.peripheral.sc1.modify(|_, w| w.adch()._11111());
// Bandgap. Grab directly, Currently the bandgap isn't implemented
// in [system::PMC]. We will eventually have to pass in the pmc
// peripheral handle as a variable.
unsafe { &(*pac::PMC::ptr()) }
.spmsc1
.modify(|_, w| w.bgbe()._1());
});
Adc {
peripheral: self.peripheral,
_state: PhantomData,
onchip_channels: self.onchip_channels,
}
}
/// Set the ADC's configuration
///
/// This is a sugar method for calling [Adc<Disabled>::enable] followed by
/// [Adc<Enabled>::configure]
pub fn configure(self, config: AdcConfig) -> Adc<Enabled> {
self.enable().configure(config)
}
}
impl<Mode> Adc<Mode> {
/// Not Implemented
pub fn into_interrupt(self) -> Adc<Mode> {
unimplemented!("Interrupt is not yet implemented");
// Adc::<Mode> {
// peripheral: self.peripheral,
// _state: PhantomData,
// onchip_channels: self.onchip_channels,
// }
}
/// Not Implemented
pub fn into_fifo(self, _depth: u8) -> Adc<Mode> {
// self.peripheral
// .sc4
// .modify(|_r, w| w.afdep().bits(depth & 0x7));
// Adc::<Mode> {
// peripheral: self.peripheral,
// _state: PhantomData,
// onchip_channels: self.onchip_channels,
// }
unimplemented!("FIFO is not yet implemented");
}
/// Not Implemented
pub fn into_continuous(self) -> Adc<Mode> {
unimplemented!("Continuous Conversion mode not yet implemented");
}
}
impl OnChipChannels {
/// Request an instance of an on-chip [Vss] channel.
pub fn vss(&mut self) -> Result<Analog<Vss<Input>>, Error> {
self.vss.take().ok_or(Error::Moved)
}
/// Return the instance of [Vss]
pub fn return_vss(&mut self, inst: Analog<Vss<Input>>) {
self.vss.replace(inst);
}
/// Try to grab an instance of the onchip [TempSense] channel.
pub fn tempsense(&mut self) -> Result<Analog<TempSense<Input>>, Error> {
self.temp_sense.take().ok_or(Error::Moved)
}
/// Return the instance of [TempSense]
pub fn return_tempsense(&mut self, inst: Analog<TempSense<Input>>) {
self.temp_sense.replace(inst);
}
/// Try to grab an instance of the onchip [Bandgap] channel.
///
/// The bandgap reference is a fixed 1.16V (nom, Factory trimmed to +/-
/// 0.02V at Vdd=5.0 at 125C) signal that is available to the ADC Module.
/// It can be used as a voltage reference for the ACMP and as an [Analog]
/// channel that can be used to (roughly) check the VDD voltage
pub fn bandgap(&mut self) -> Result<Analog<Bandgap<Input>>, Error> {
self.bandgap.take().ok_or(Error::Moved)
}
/// Return the instance of [Bandgap]
pub fn return_bandgap(&mut self, inst: Analog<Bandgap<Input>>) {
self.bandgap.replace(inst);
}
/// Try to grab an instance of the onchip Voltage Reference High ([VrefH]) channel.
pub fn vref_h(&mut self) -> Result<Analog<VrefH<Input>>, Error> {
self.vref_h.take().ok_or(Error::Moved)
}
/// Return the instance of [VrefH]
pub fn return_vref_h(&mut self, inst: Analog<VrefH<Input>>) {
self.vref_h.replace(inst);
}
/// Try to grab an instance of the onchip Voltage Reference Low ([VrefL]) channel.
pub fn vref_l(&mut self) -> Result<Analog<VrefL<Input>>, Error> {
self.vref_l.take().ok_or(Error::Moved)
}
/// Return the instance of [VrefL]
pub fn return_vref_l(&mut self, inst: Analog<VrefL<Input>>) {
self.vref_l.replace(inst);
}
/// Grab a [DummyDisable] instance. Multiple Instances possible.
pub fn dummy_disable(&self) -> Analog<DummyDisable<Input>> {
Analog {
pin: DummyDisable::<Input> { _mode: PhantomData },
}
}
}
/// Holds On-Chip ADC Channel inputs and provides an interface to grab and return them.
// These have to have the Input dummy type to allow them to have the Channel
// trait.
pub struct OnChipChannels {
vss: Option<Analog<Vss<Input>>>,
temp_sense: Option<Analog<TempSense<Input>>>,
bandgap: Option<Analog<Bandgap<Input>>>,
vref_h: Option<Analog<VrefH<Input>>>,
vref_l: Option<Analog<VrefL<Input>>>,
}
/// Dummy type state for on-chip ADC input channels
pub struct Input;
/// Adc Input Channel, measures ground (should be 0?)
pub struct Vss<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, measures internal temperature sensor
pub struct TempSense<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Bandgap internal voltage reference
pub struct Bandgap<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Voltage Reference, High
pub struct VrefH<Input> {
_mode: PhantomData<Input>,
}
/// Adc Input Channel, Voltage Reference, Low
pub struct VrefL<Input> {
_mode: PhantomData<Input>,
}
/// Dummy Channel that temporarily disables the Adc Module.
pub struct DummyDisable<Input> {
_mode: PhantomData<Input>,
}
macro_rules! adc_input_channels {
( $($Chan:expr => $Pin:ident),+ $(,)*) => {
$(
impl<OldMode> Channel<Adc<Enabled>> for Analog<$Pin<OldMode>> {
type ID = u8;
fn channel() -> u8 { $Chan }
}
)+
};
}
use crate::gpio::{gpioa::*, gpiob::*};
adc_input_channels! (
0_u8 => PTA0,
1_u8 => PTA1,
2_u8 => PTA6,
3_u8 => PTA7,
4_u8 => PTB0,
5_u8 => PTB1,
6_u8 => P | {
None
} | conditional_block |
installed.rs | // Copyright (c) 2016 Google Inc ([email protected]).
//
// Refer to the project root for licensing information.
//
extern crate serde_json;
extern crate url;
use std::borrow::BorrowMut;
use std::convert::AsRef;
use std::error::Error;
use std::io;
use std::io::Read;
use std::sync::Mutex;
use std::sync::mpsc::{channel, Receiver, Sender};
use hyper;
use hyper::{client, header, server, status, uri};
use serde_json::error;
use url::form_urlencoded;
use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET};
use types::{ApplicationSecret, Token};
use authenticator_delegate::AuthenticatorDelegate;
const OOB_REDIRECT_URI: &'static str = "urn:ietf:wg:oauth:2.0:oob";
/// Assembles a URL to request an authorization token (with user interaction).
/// Note that the redirect_uri here has to be either None or some variation of
/// http://localhost:{port}, or the authorization won't work (error "redirect_uri_mismatch")
fn build_authentication_request_url<'a, T, I>(auth_uri: &str,
client_id: &str,
scopes: I,
redirect_uri: Option<String>)
-> String
where T: AsRef<str> + 'a,
I: IntoIterator<Item = &'a T>
{
let mut url = String::new();
let mut scopes_string = scopes.into_iter().fold(String::new(), |mut acc, sc| {
acc.push_str(sc.as_ref());
acc.push_str(" ");
acc
});
// Remove last space
scopes_string.pop();
url.push_str(auth_uri);
vec![format!("?scope={}", scopes_string),
format!("&redirect_uri={}",
redirect_uri.unwrap_or(OOB_REDIRECT_URI.to_string())),
format!("&response_type=code"),
format!("&client_id={}", client_id)]
.into_iter()
.fold(url, |mut u, param| {
u.push_str(&percent_encode(param.as_ref(), QUERY_ENCODE_SET));
u
})
}
pub struct InstalledFlow<C> {
client: C,
server: Option<server::Listening>,
port: Option<u32>,
auth_code_rcv: Option<Receiver<String>>,
}
/// cf. https://developers.google.com/identity/protocols/OAuth2InstalledApp#choosingredirecturi
pub enum InstalledFlowReturnMethod {
/// Involves showing a URL to the user and asking to copy a code from their browser
/// (default)
Interactive,
/// Involves spinning up a local HTTP server and Google redirecting the browser to
/// the server with a URL containing the code (preferred, but not as reliable). The
/// parameter is the port to listen on.
HTTPRedirect(u32),
}
impl<C> InstalledFlow<C>
where C: BorrowMut<hyper::Client>
{
/// Starts a new Installed App auth flow.
/// If HTTPRedirect is chosen as method and the server can't be started, the flow falls
/// back to Interactive.
pub fn new(client: C, method: Option<InstalledFlowReturnMethod>) -> InstalledFlow<C> {
let default = InstalledFlow {
client: client,
server: None,
port: None,
auth_code_rcv: None,
};
match method {
None => default,
Some(InstalledFlowReturnMethod::Interactive) => default,
// Start server on localhost to accept auth code.
Some(InstalledFlowReturnMethod::HTTPRedirect(port)) => {
let server = server::Server::http(format!("127.0.0.1:{}", port).as_str());
match server {
Result::Err(_) => default,
Result::Ok(server) => {
let (tx, rx) = channel();
let listening =
server.handle(InstalledFlowHandler { auth_code_snd: Mutex::new(tx) });
match listening {
Result::Err(_) => default,
Result::Ok(listening) => {
InstalledFlow {
client: default.client,
server: Some(listening),
port: Some(port),
auth_code_rcv: Some(rx),
}
}
}
}
}
}
}
}
/// Handles the token request flow; it consists of the following steps:
///. Obtain a auhorization code with user cooperation or internal redirect.
///. Obtain a token and refresh token using that code.
///. Return that token
///
/// It's recommended not to use the DefaultAuthenticatorDelegate, but a specialized one.
pub fn obtain_token<'a, AD: AuthenticatorDelegate, S, T>(&mut self,
auth_delegate: &mut AD,
appsecret: &ApplicationSecret,
scopes: S)
-> Result<Token, Box<Error>>
where T: AsRef<str> + 'a,
S: Iterator<Item = &'a T>
{
let authcode = try!(self.get_authorization_code(auth_delegate, &appsecret, scopes));
let tokens = try!(self.request_token(&appsecret, &authcode));
// Successful response
if tokens.access_token.is_some() {
let mut token = Token {
access_token: tokens.access_token.unwrap(),
refresh_token: tokens.refresh_token.unwrap(),
token_type: tokens.token_type.unwrap(),
expires_in: tokens.expires_in,
expires_in_timestamp: None,
};
token.set_expiry_absolute();
Result::Ok(token)
} else {
let err = io::Error::new(io::ErrorKind::Other,
format!("Token API error: {} {}",
tokens.error.unwrap_or("<unknown err>".to_string()),
tokens.error_description
.unwrap_or("".to_string()))
.as_str());
Result::Err(Box::new(err))
}
}
/// Obtains an authorization code either interactively or via HTTP redirect (see
/// InstalledFlowReturnMethod).
fn get_authorization_code<'a, AD: AuthenticatorDelegate, S, T>(&mut self,
auth_delegate: &mut AD,
appsecret: &ApplicationSecret,
scopes: S)
-> Result<String, Box<Error>>
where T: AsRef<str> + 'a,
S: Iterator<Item = &'a T>
{
let result: Result<String, Box<Error>> = match self.server {
None => {
let url = build_authentication_request_url(&appsecret.auth_uri,
&appsecret.client_id,
scopes,
None);
match auth_delegate.present_user_url(&url, true /* need_code */) {
None => {
Result::Err(Box::new(io::Error::new(io::ErrorKind::UnexpectedEof,
"couldn't read code")))
}
// Remove newline
Some(mut code) => {
code.pop();
Result::Ok(code)
}
}
}
Some(_) => {
// The redirect URI must be this very localhost URL, otherwise Google refuses
// authorization.
let url = build_authentication_request_url(&appsecret.auth_uri,
&appsecret.client_id,
scopes,
Some(format!("http://localhost:{}",
self.port
.unwrap_or(8080))));
auth_delegate.present_user_url(&url, false /* need_code */);
match self.auth_code_rcv.as_ref().unwrap().recv() {
Result::Err(e) => Result::Err(Box::new(e)),
Result::Ok(s) => Result::Ok(s),
}
}
};
self.server.as_mut().map(|l| l.close()).is_some();
result
}
/// Sends the authorization code to the provider in order to obtain access and refresh tokens.
fn request_token(&mut self,
appsecret: &ApplicationSecret,
authcode: &str)
-> Result<JSONTokenResponse, Box<Error>> {
let redirect_uri;
match self.port {
None => redirect_uri = OOB_REDIRECT_URI.to_string(),
Some(p) => redirect_uri = format!("http://localhost:{}", p),
}
let body = form_urlencoded::serialize(vec![("code".to_string(), authcode.to_string()),
("client_id".to_string(),
appsecret.client_id.clone()),
("client_secret".to_string(),
appsecret.client_secret.clone()),
("redirect_uri".to_string(), redirect_uri),
("grant_type".to_string(),
"authorization_code".to_string())]);
let result: Result<client::Response, hyper::Error> = self.client
.borrow_mut()
.post(&appsecret.token_uri)
.body(&body)
.header(header::ContentType("application/x-www-form-urlencoded".parse().unwrap()))
.send();
let mut resp = String::new();
match result {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(mut response) => {
let result = response.read_to_string(&mut resp);
match result {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(_) => (),
}
}
}
let token_resp: Result<JSONTokenResponse, error::Error> = serde_json::from_str(&resp);
match token_resp {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(tok) => Result::Ok(tok) as Result<JSONTokenResponse, Box<Error>>,
}
}
}
#[derive(Deserialize)]
struct JSONTokenResponse {
access_token: Option<String>,
refresh_token: Option<String>,
token_type: Option<String>,
expires_in: Option<i64>,
error: Option<String>,
error_description: Option<String>,
}
/// HTTP handler handling the redirect from the provider.
struct InstalledFlowHandler {
auth_code_snd: Mutex<Sender<String>>,
}
impl server::Handler for InstalledFlowHandler {
fn handle(&self, rq: server::Request, mut rp: server::Response) {
match rq.uri {
uri::RequestUri::AbsolutePath(path) => |
_ => {
*rp.status_mut() = status::StatusCode::BadRequest;
let _ = rp.send("Invalid Request!".as_ref());
}
}
}
}
impl InstalledFlowHandler {
fn handle_url(&self, url: hyper::Url) {
// Google redirects to the specified localhost URL, appending the authorization
// code, like this: http://localhost:8080/xyz/?code=4/731fJ3BheyCouCniPufAd280GHNV5Ju35yYcGs
// We take that code and send it to the get_authorization_code() function that
// waits for it.
for (param, val) in url.query_pairs().into_owned() {
if param == "code".to_string() {
let _ = self.auth_code_snd.lock().unwrap().send(val);
}
}
}
}
#[cfg(test)]
mod tests {
use super::build_authentication_request_url;
use super::InstalledFlowHandler;
use std::sync::Mutex;
use std::sync::mpsc::channel;
use hyper::Url;
#[test]
fn test_request_url_builder() {
assert_eq!("https://accounts.google.\
com/o/oauth2/auth?scope=email%20profile&redirect_uri=urn:ietf:wg:oauth:2.0:\
oob&response_type=code&client_id=812741506391-h38jh0j4fv0ce1krdkiq0hfvt6n5amr\
f.apps.googleusercontent.com",
build_authentication_request_url("https://accounts.google.com/o/oauth2/auth",
"812741506391-h38jh0j4fv0ce1krdkiq0hfvt6n5am\
rf.apps.googleusercontent.com",
vec![&"email".to_string(),
&"profile".to_string()],
None));
}
#[test]
fn test_http_handle_url() {
let (tx, rx) = channel();
let handler = InstalledFlowHandler { auth_code_snd: Mutex::new(tx) };
// URLs are usually a bit botched
let url = Url::parse("http://example.com:1234/?code=ab/c%2Fd#").unwrap();
handler.handle_url(url);
assert_eq!(rx.recv().unwrap(), "ab/c/d".to_string());
}
}
| {
// We use a fake URL because the redirect goes to a URL, meaning we
// can't use the url form decode (because there's slashes and hashes and stuff in
// it).
let url = hyper::Url::parse(&format!("http://example.com{}", path));
if url.is_err() {
*rp.status_mut() = status::StatusCode::BadRequest;
let _ = rp.send("Unparseable URL".as_ref());
} else {
self.handle_url(url.unwrap());
*rp.status_mut() = status::StatusCode::Ok;
let _ =
rp.send("<html><head><title>Success</title></head><body>You may now \
close this window.</body></html>"
.as_ref());
}
} | conditional_block |
installed.rs | // Copyright (c) 2016 Google Inc ([email protected]).
//
// Refer to the project root for licensing information.
//
extern crate serde_json;
extern crate url;
use std::borrow::BorrowMut;
use std::convert::AsRef;
use std::error::Error;
use std::io;
use std::io::Read;
use std::sync::Mutex;
use std::sync::mpsc::{channel, Receiver, Sender};
use hyper;
use hyper::{client, header, server, status, uri};
use serde_json::error;
use url::form_urlencoded;
use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET};
use types::{ApplicationSecret, Token};
use authenticator_delegate::AuthenticatorDelegate;
const OOB_REDIRECT_URI: &'static str = "urn:ietf:wg:oauth:2.0:oob";
/// Assembles a URL to request an authorization token (with user interaction).
/// Note that the redirect_uri here has to be either None or some variation of
/// http://localhost:{port}, or the authorization won't work (error "redirect_uri_mismatch")
fn build_authentication_request_url<'a, T, I>(auth_uri: &str,
client_id: &str,
scopes: I,
redirect_uri: Option<String>)
-> String
where T: AsRef<str> + 'a,
I: IntoIterator<Item = &'a T>
{
let mut url = String::new();
let mut scopes_string = scopes.into_iter().fold(String::new(), |mut acc, sc| {
acc.push_str(sc.as_ref());
acc.push_str(" ");
acc
});
// Remove last space
scopes_string.pop();
url.push_str(auth_uri);
vec![format!("?scope={}", scopes_string),
format!("&redirect_uri={}",
redirect_uri.unwrap_or(OOB_REDIRECT_URI.to_string())),
format!("&response_type=code"),
format!("&client_id={}", client_id)]
.into_iter()
.fold(url, |mut u, param| {
u.push_str(&percent_encode(param.as_ref(), QUERY_ENCODE_SET));
u
})
}
pub struct InstalledFlow<C> {
client: C,
server: Option<server::Listening>,
port: Option<u32>,
auth_code_rcv: Option<Receiver<String>>,
}
/// cf. https://developers.google.com/identity/protocols/OAuth2InstalledApp#choosingredirecturi
pub enum InstalledFlowReturnMethod {
/// Involves showing a URL to the user and asking to copy a code from their browser
/// (default)
Interactive,
/// Involves spinning up a local HTTP server and Google redirecting the browser to
/// the server with a URL containing the code (preferred, but not as reliable). The
/// parameter is the port to listen on.
HTTPRedirect(u32),
}
impl<C> InstalledFlow<C>
where C: BorrowMut<hyper::Client>
{
/// Starts a new Installed App auth flow.
/// If HTTPRedirect is chosen as method and the server can't be started, the flow falls
/// back to Interactive.
pub fn new(client: C, method: Option<InstalledFlowReturnMethod>) -> InstalledFlow<C> {
let default = InstalledFlow {
client: client,
server: None,
port: None,
auth_code_rcv: None,
};
match method {
None => default,
Some(InstalledFlowReturnMethod::Interactive) => default,
// Start server on localhost to accept auth code.
Some(InstalledFlowReturnMethod::HTTPRedirect(port)) => {
let server = server::Server::http(format!("127.0.0.1:{}", port).as_str());
match server {
Result::Err(_) => default,
Result::Ok(server) => {
let (tx, rx) = channel();
let listening =
server.handle(InstalledFlowHandler { auth_code_snd: Mutex::new(tx) });
match listening {
Result::Err(_) => default,
Result::Ok(listening) => {
InstalledFlow {
client: default.client,
server: Some(listening),
port: Some(port),
auth_code_rcv: Some(rx),
}
}
}
}
}
}
}
}
/// Handles the token request flow; it consists of the following steps:
///. Obtain a auhorization code with user cooperation or internal redirect.
///. Obtain a token and refresh token using that code.
///. Return that token
///
/// It's recommended not to use the DefaultAuthenticatorDelegate, but a specialized one.
pub fn obtain_token<'a, AD: AuthenticatorDelegate, S, T>(&mut self,
auth_delegate: &mut AD,
appsecret: &ApplicationSecret,
scopes: S)
-> Result<Token, Box<Error>>
where T: AsRef<str> + 'a,
S: Iterator<Item = &'a T>
{
let authcode = try!(self.get_authorization_code(auth_delegate, &appsecret, scopes));
let tokens = try!(self.request_token(&appsecret, &authcode));
// Successful response
if tokens.access_token.is_some() {
let mut token = Token {
access_token: tokens.access_token.unwrap(),
refresh_token: tokens.refresh_token.unwrap(),
token_type: tokens.token_type.unwrap(),
expires_in: tokens.expires_in,
expires_in_timestamp: None,
};
token.set_expiry_absolute();
Result::Ok(token)
} else {
let err = io::Error::new(io::ErrorKind::Other,
format!("Token API error: {} {}",
tokens.error.unwrap_or("<unknown err>".to_string()),
tokens.error_description
.unwrap_or("".to_string()))
.as_str());
Result::Err(Box::new(err))
}
}
/// Obtains an authorization code either interactively or via HTTP redirect (see
/// InstalledFlowReturnMethod).
fn get_authorization_code<'a, AD: AuthenticatorDelegate, S, T>(&mut self,
auth_delegate: &mut AD,
appsecret: &ApplicationSecret,
scopes: S)
-> Result<String, Box<Error>>
where T: AsRef<str> + 'a,
S: Iterator<Item = &'a T>
{
let result: Result<String, Box<Error>> = match self.server {
None => {
let url = build_authentication_request_url(&appsecret.auth_uri,
&appsecret.client_id,
scopes,
None);
match auth_delegate.present_user_url(&url, true /* need_code */) {
None => {
Result::Err(Box::new(io::Error::new(io::ErrorKind::UnexpectedEof,
"couldn't read code")))
}
// Remove newline
Some(mut code) => {
code.pop();
Result::Ok(code)
}
}
}
Some(_) => {
// The redirect URI must be this very localhost URL, otherwise Google refuses
// authorization.
let url = build_authentication_request_url(&appsecret.auth_uri,
&appsecret.client_id,
scopes,
Some(format!("http://localhost:{}",
self.port
.unwrap_or(8080))));
auth_delegate.present_user_url(&url, false /* need_code */);
match self.auth_code_rcv.as_ref().unwrap().recv() {
Result::Err(e) => Result::Err(Box::new(e)),
Result::Ok(s) => Result::Ok(s),
}
}
};
self.server.as_mut().map(|l| l.close()).is_some();
result
}
/// Sends the authorization code to the provider in order to obtain access and refresh tokens.
fn request_token(&mut self,
appsecret: &ApplicationSecret,
authcode: &str)
-> Result<JSONTokenResponse, Box<Error>> {
let redirect_uri;
match self.port {
None => redirect_uri = OOB_REDIRECT_URI.to_string(),
Some(p) => redirect_uri = format!("http://localhost:{}", p),
}
let body = form_urlencoded::serialize(vec![("code".to_string(), authcode.to_string()),
("client_id".to_string(),
appsecret.client_id.clone()),
("client_secret".to_string(),
appsecret.client_secret.clone()),
("redirect_uri".to_string(), redirect_uri),
("grant_type".to_string(),
"authorization_code".to_string())]);
let result: Result<client::Response, hyper::Error> = self.client
.borrow_mut()
.post(&appsecret.token_uri)
.body(&body)
.header(header::ContentType("application/x-www-form-urlencoded".parse().unwrap()))
.send();
let mut resp = String::new();
match result {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(mut response) => {
let result = response.read_to_string(&mut resp);
match result {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(_) => (),
}
}
}
let token_resp: Result<JSONTokenResponse, error::Error> = serde_json::from_str(&resp);
match token_resp {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(tok) => Result::Ok(tok) as Result<JSONTokenResponse, Box<Error>>,
}
}
}
#[derive(Deserialize)]
struct JSONTokenResponse {
access_token: Option<String>,
refresh_token: Option<String>,
token_type: Option<String>,
expires_in: Option<i64>,
error: Option<String>,
error_description: Option<String>,
}
/// HTTP handler handling the redirect from the provider. | impl server::Handler for InstalledFlowHandler {
fn handle(&self, rq: server::Request, mut rp: server::Response) {
match rq.uri {
uri::RequestUri::AbsolutePath(path) => {
// We use a fake URL because the redirect goes to a URL, meaning we
// can't use the url form decode (because there's slashes and hashes and stuff in
// it).
let url = hyper::Url::parse(&format!("http://example.com{}", path));
if url.is_err() {
*rp.status_mut() = status::StatusCode::BadRequest;
let _ = rp.send("Unparseable URL".as_ref());
} else {
self.handle_url(url.unwrap());
*rp.status_mut() = status::StatusCode::Ok;
let _ =
rp.send("<html><head><title>Success</title></head><body>You may now \
close this window.</body></html>"
.as_ref());
}
}
_ => {
*rp.status_mut() = status::StatusCode::BadRequest;
let _ = rp.send("Invalid Request!".as_ref());
}
}
}
}
impl InstalledFlowHandler {
fn handle_url(&self, url: hyper::Url) {
// Google redirects to the specified localhost URL, appending the authorization
// code, like this: http://localhost:8080/xyz/?code=4/731fJ3BheyCouCniPufAd280GHNV5Ju35yYcGs
// We take that code and send it to the get_authorization_code() function that
// waits for it.
for (param, val) in url.query_pairs().into_owned() {
if param == "code".to_string() {
let _ = self.auth_code_snd.lock().unwrap().send(val);
}
}
}
}
#[cfg(test)]
mod tests {
use super::build_authentication_request_url;
use super::InstalledFlowHandler;
use std::sync::Mutex;
use std::sync::mpsc::channel;
use hyper::Url;
#[test]
fn test_request_url_builder() {
assert_eq!("https://accounts.google.\
com/o/oauth2/auth?scope=email%20profile&redirect_uri=urn:ietf:wg:oauth:2.0:\
oob&response_type=code&client_id=812741506391-h38jh0j4fv0ce1krdkiq0hfvt6n5amr\
f.apps.googleusercontent.com",
build_authentication_request_url("https://accounts.google.com/o/oauth2/auth",
"812741506391-h38jh0j4fv0ce1krdkiq0hfvt6n5am\
rf.apps.googleusercontent.com",
vec![&"email".to_string(),
&"profile".to_string()],
None));
}
#[test]
fn test_http_handle_url() {
let (tx, rx) = channel();
let handler = InstalledFlowHandler { auth_code_snd: Mutex::new(tx) };
// URLs are usually a bit botched
let url = Url::parse("http://example.com:1234/?code=ab/c%2Fd#").unwrap();
handler.handle_url(url);
assert_eq!(rx.recv().unwrap(), "ab/c/d".to_string());
}
} | struct InstalledFlowHandler {
auth_code_snd: Mutex<Sender<String>>,
}
| random_line_split |
installed.rs | // Copyright (c) 2016 Google Inc ([email protected]).
//
// Refer to the project root for licensing information.
//
extern crate serde_json;
extern crate url;
use std::borrow::BorrowMut;
use std::convert::AsRef;
use std::error::Error;
use std::io;
use std::io::Read;
use std::sync::Mutex;
use std::sync::mpsc::{channel, Receiver, Sender};
use hyper;
use hyper::{client, header, server, status, uri};
use serde_json::error;
use url::form_urlencoded;
use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET};
use types::{ApplicationSecret, Token};
use authenticator_delegate::AuthenticatorDelegate;
const OOB_REDIRECT_URI: &'static str = "urn:ietf:wg:oauth:2.0:oob";
/// Assembles a URL to request an authorization token (with user interaction).
/// Note that the redirect_uri here has to be either None or some variation of
/// http://localhost:{port}, or the authorization won't work (error "redirect_uri_mismatch")
fn build_authentication_request_url<'a, T, I>(auth_uri: &str,
client_id: &str,
scopes: I,
redirect_uri: Option<String>)
-> String
where T: AsRef<str> + 'a,
I: IntoIterator<Item = &'a T>
{
let mut url = String::new();
let mut scopes_string = scopes.into_iter().fold(String::new(), |mut acc, sc| {
acc.push_str(sc.as_ref());
acc.push_str(" ");
acc
});
// Remove last space
scopes_string.pop();
url.push_str(auth_uri);
vec![format!("?scope={}", scopes_string),
format!("&redirect_uri={}",
redirect_uri.unwrap_or(OOB_REDIRECT_URI.to_string())),
format!("&response_type=code"),
format!("&client_id={}", client_id)]
.into_iter()
.fold(url, |mut u, param| {
u.push_str(&percent_encode(param.as_ref(), QUERY_ENCODE_SET));
u
})
}
pub struct InstalledFlow<C> {
client: C,
server: Option<server::Listening>,
port: Option<u32>,
auth_code_rcv: Option<Receiver<String>>,
}
/// cf. https://developers.google.com/identity/protocols/OAuth2InstalledApp#choosingredirecturi
pub enum InstalledFlowReturnMethod {
/// Involves showing a URL to the user and asking to copy a code from their browser
/// (default)
Interactive,
/// Involves spinning up a local HTTP server and Google redirecting the browser to
/// the server with a URL containing the code (preferred, but not as reliable). The
/// parameter is the port to listen on.
HTTPRedirect(u32),
}
impl<C> InstalledFlow<C>
where C: BorrowMut<hyper::Client>
{
/// Starts a new Installed App auth flow.
/// If HTTPRedirect is chosen as method and the server can't be started, the flow falls
/// back to Interactive.
pub fn new(client: C, method: Option<InstalledFlowReturnMethod>) -> InstalledFlow<C> |
match listening {
Result::Err(_) => default,
Result::Ok(listening) => {
InstalledFlow {
client: default.client,
server: Some(listening),
port: Some(port),
auth_code_rcv: Some(rx),
}
}
}
}
}
}
}
}
/// Handles the token request flow; it consists of the following steps:
///. Obtain a auhorization code with user cooperation or internal redirect.
///. Obtain a token and refresh token using that code.
///. Return that token
///
/// It's recommended not to use the DefaultAuthenticatorDelegate, but a specialized one.
pub fn obtain_token<'a, AD: AuthenticatorDelegate, S, T>(&mut self,
auth_delegate: &mut AD,
appsecret: &ApplicationSecret,
scopes: S)
-> Result<Token, Box<Error>>
where T: AsRef<str> + 'a,
S: Iterator<Item = &'a T>
{
let authcode = try!(self.get_authorization_code(auth_delegate, &appsecret, scopes));
let tokens = try!(self.request_token(&appsecret, &authcode));
// Successful response
if tokens.access_token.is_some() {
let mut token = Token {
access_token: tokens.access_token.unwrap(),
refresh_token: tokens.refresh_token.unwrap(),
token_type: tokens.token_type.unwrap(),
expires_in: tokens.expires_in,
expires_in_timestamp: None,
};
token.set_expiry_absolute();
Result::Ok(token)
} else {
let err = io::Error::new(io::ErrorKind::Other,
format!("Token API error: {} {}",
tokens.error.unwrap_or("<unknown err>".to_string()),
tokens.error_description
.unwrap_or("".to_string()))
.as_str());
Result::Err(Box::new(err))
}
}
/// Obtains an authorization code either interactively or via HTTP redirect (see
/// InstalledFlowReturnMethod).
fn get_authorization_code<'a, AD: AuthenticatorDelegate, S, T>(&mut self,
auth_delegate: &mut AD,
appsecret: &ApplicationSecret,
scopes: S)
-> Result<String, Box<Error>>
where T: AsRef<str> + 'a,
S: Iterator<Item = &'a T>
{
let result: Result<String, Box<Error>> = match self.server {
None => {
let url = build_authentication_request_url(&appsecret.auth_uri,
&appsecret.client_id,
scopes,
None);
match auth_delegate.present_user_url(&url, true /* need_code */) {
None => {
Result::Err(Box::new(io::Error::new(io::ErrorKind::UnexpectedEof,
"couldn't read code")))
}
// Remove newline
Some(mut code) => {
code.pop();
Result::Ok(code)
}
}
}
Some(_) => {
// The redirect URI must be this very localhost URL, otherwise Google refuses
// authorization.
let url = build_authentication_request_url(&appsecret.auth_uri,
&appsecret.client_id,
scopes,
Some(format!("http://localhost:{}",
self.port
.unwrap_or(8080))));
auth_delegate.present_user_url(&url, false /* need_code */);
match self.auth_code_rcv.as_ref().unwrap().recv() {
Result::Err(e) => Result::Err(Box::new(e)),
Result::Ok(s) => Result::Ok(s),
}
}
};
self.server.as_mut().map(|l| l.close()).is_some();
result
}
/// Sends the authorization code to the provider in order to obtain access and refresh tokens.
fn request_token(&mut self,
appsecret: &ApplicationSecret,
authcode: &str)
-> Result<JSONTokenResponse, Box<Error>> {
let redirect_uri;
match self.port {
None => redirect_uri = OOB_REDIRECT_URI.to_string(),
Some(p) => redirect_uri = format!("http://localhost:{}", p),
}
let body = form_urlencoded::serialize(vec![("code".to_string(), authcode.to_string()),
("client_id".to_string(),
appsecret.client_id.clone()),
("client_secret".to_string(),
appsecret.client_secret.clone()),
("redirect_uri".to_string(), redirect_uri),
("grant_type".to_string(),
"authorization_code".to_string())]);
let result: Result<client::Response, hyper::Error> = self.client
.borrow_mut()
.post(&appsecret.token_uri)
.body(&body)
.header(header::ContentType("application/x-www-form-urlencoded".parse().unwrap()))
.send();
let mut resp = String::new();
match result {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(mut response) => {
let result = response.read_to_string(&mut resp);
match result {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(_) => (),
}
}
}
let token_resp: Result<JSONTokenResponse, error::Error> = serde_json::from_str(&resp);
match token_resp {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(tok) => Result::Ok(tok) as Result<JSONTokenResponse, Box<Error>>,
}
}
}
#[derive(Deserialize)]
struct JSONTokenResponse {
access_token: Option<String>,
refresh_token: Option<String>,
token_type: Option<String>,
expires_in: Option<i64>,
error: Option<String>,
error_description: Option<String>,
}
/// HTTP handler handling the redirect from the provider.
struct InstalledFlowHandler {
auth_code_snd: Mutex<Sender<String>>,
}
impl server::Handler for InstalledFlowHandler {
fn handle(&self, rq: server::Request, mut rp: server::Response) {
match rq.uri {
uri::RequestUri::AbsolutePath(path) => {
// We use a fake URL because the redirect goes to a URL, meaning we
// can't use the url form decode (because there's slashes and hashes and stuff in
// it).
let url = hyper::Url::parse(&format!("http://example.com{}", path));
if url.is_err() {
*rp.status_mut() = status::StatusCode::BadRequest;
let _ = rp.send("Unparseable URL".as_ref());
} else {
self.handle_url(url.unwrap());
*rp.status_mut() = status::StatusCode::Ok;
let _ =
rp.send("<html><head><title>Success</title></head><body>You may now \
close this window.</body></html>"
.as_ref());
}
}
_ => {
*rp.status_mut() = status::StatusCode::BadRequest;
let _ = rp.send("Invalid Request!".as_ref());
}
}
}
}
impl InstalledFlowHandler {
fn handle_url(&self, url: hyper::Url) {
// Google redirects to the specified localhost URL, appending the authorization
// code, like this: http://localhost:8080/xyz/?code=4/731fJ3BheyCouCniPufAd280GHNV5Ju35yYcGs
// We take that code and send it to the get_authorization_code() function that
// waits for it.
for (param, val) in url.query_pairs().into_owned() {
if param == "code".to_string() {
let _ = self.auth_code_snd.lock().unwrap().send(val);
}
}
}
}
#[cfg(test)]
mod tests {
use super::build_authentication_request_url;
use super::InstalledFlowHandler;
use std::sync::Mutex;
use std::sync::mpsc::channel;
use hyper::Url;
#[test]
fn test_request_url_builder() {
assert_eq!("https://accounts.google.\
com/o/oauth2/auth?scope=email%20profile&redirect_uri=urn:ietf:wg:oauth:2.0:\
oob&response_type=code&client_id=812741506391-h38jh0j4fv0ce1krdkiq0hfvt6n5amr\
f.apps.googleusercontent.com",
build_authentication_request_url("https://accounts.google.com/o/oauth2/auth",
"812741506391-h38jh0j4fv0ce1krdkiq0hfvt6n5am\
rf.apps.googleusercontent.com",
vec![&"email".to_string(),
&"profile".to_string()],
None));
}
#[test]
fn test_http_handle_url() {
let (tx, rx) = channel();
let handler = InstalledFlowHandler { auth_code_snd: Mutex::new(tx) };
// URLs are usually a bit botched
let url = Url::parse("http://example.com:1234/?code=ab/c%2Fd#").unwrap();
handler.handle_url(url);
assert_eq!(rx.recv().unwrap(), "ab/c/d".to_string());
}
}
| {
let default = InstalledFlow {
client: client,
server: None,
port: None,
auth_code_rcv: None,
};
match method {
None => default,
Some(InstalledFlowReturnMethod::Interactive) => default,
// Start server on localhost to accept auth code.
Some(InstalledFlowReturnMethod::HTTPRedirect(port)) => {
let server = server::Server::http(format!("127.0.0.1:{}", port).as_str());
match server {
Result::Err(_) => default,
Result::Ok(server) => {
let (tx, rx) = channel();
let listening =
server.handle(InstalledFlowHandler { auth_code_snd: Mutex::new(tx) }); | identifier_body |
installed.rs | // Copyright (c) 2016 Google Inc ([email protected]).
//
// Refer to the project root for licensing information.
//
extern crate serde_json;
extern crate url;
use std::borrow::BorrowMut;
use std::convert::AsRef;
use std::error::Error;
use std::io;
use std::io::Read;
use std::sync::Mutex;
use std::sync::mpsc::{channel, Receiver, Sender};
use hyper;
use hyper::{client, header, server, status, uri};
use serde_json::error;
use url::form_urlencoded;
use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET};
use types::{ApplicationSecret, Token};
use authenticator_delegate::AuthenticatorDelegate;
const OOB_REDIRECT_URI: &'static str = "urn:ietf:wg:oauth:2.0:oob";
/// Assembles a URL to request an authorization token (with user interaction).
/// Note that the redirect_uri here has to be either None or some variation of
/// http://localhost:{port}, or the authorization won't work (error "redirect_uri_mismatch")
fn build_authentication_request_url<'a, T, I>(auth_uri: &str,
client_id: &str,
scopes: I,
redirect_uri: Option<String>)
-> String
where T: AsRef<str> + 'a,
I: IntoIterator<Item = &'a T>
{
let mut url = String::new();
let mut scopes_string = scopes.into_iter().fold(String::new(), |mut acc, sc| {
acc.push_str(sc.as_ref());
acc.push_str(" ");
acc
});
// Remove last space
scopes_string.pop();
url.push_str(auth_uri);
vec![format!("?scope={}", scopes_string),
format!("&redirect_uri={}",
redirect_uri.unwrap_or(OOB_REDIRECT_URI.to_string())),
format!("&response_type=code"),
format!("&client_id={}", client_id)]
.into_iter()
.fold(url, |mut u, param| {
u.push_str(&percent_encode(param.as_ref(), QUERY_ENCODE_SET));
u
})
}
pub struct InstalledFlow<C> {
client: C,
server: Option<server::Listening>,
port: Option<u32>,
auth_code_rcv: Option<Receiver<String>>,
}
/// cf. https://developers.google.com/identity/protocols/OAuth2InstalledApp#choosingredirecturi
pub enum | {
/// Involves showing a URL to the user and asking to copy a code from their browser
/// (default)
Interactive,
/// Involves spinning up a local HTTP server and Google redirecting the browser to
/// the server with a URL containing the code (preferred, but not as reliable). The
/// parameter is the port to listen on.
HTTPRedirect(u32),
}
impl<C> InstalledFlow<C>
where C: BorrowMut<hyper::Client>
{
/// Starts a new Installed App auth flow.
/// If HTTPRedirect is chosen as method and the server can't be started, the flow falls
/// back to Interactive.
pub fn new(client: C, method: Option<InstalledFlowReturnMethod>) -> InstalledFlow<C> {
let default = InstalledFlow {
client: client,
server: None,
port: None,
auth_code_rcv: None,
};
match method {
None => default,
Some(InstalledFlowReturnMethod::Interactive) => default,
// Start server on localhost to accept auth code.
Some(InstalledFlowReturnMethod::HTTPRedirect(port)) => {
let server = server::Server::http(format!("127.0.0.1:{}", port).as_str());
match server {
Result::Err(_) => default,
Result::Ok(server) => {
let (tx, rx) = channel();
let listening =
server.handle(InstalledFlowHandler { auth_code_snd: Mutex::new(tx) });
match listening {
Result::Err(_) => default,
Result::Ok(listening) => {
InstalledFlow {
client: default.client,
server: Some(listening),
port: Some(port),
auth_code_rcv: Some(rx),
}
}
}
}
}
}
}
}
/// Handles the token request flow; it consists of the following steps:
///. Obtain a auhorization code with user cooperation or internal redirect.
///. Obtain a token and refresh token using that code.
///. Return that token
///
/// It's recommended not to use the DefaultAuthenticatorDelegate, but a specialized one.
pub fn obtain_token<'a, AD: AuthenticatorDelegate, S, T>(&mut self,
auth_delegate: &mut AD,
appsecret: &ApplicationSecret,
scopes: S)
-> Result<Token, Box<Error>>
where T: AsRef<str> + 'a,
S: Iterator<Item = &'a T>
{
let authcode = try!(self.get_authorization_code(auth_delegate, &appsecret, scopes));
let tokens = try!(self.request_token(&appsecret, &authcode));
// Successful response
if tokens.access_token.is_some() {
let mut token = Token {
access_token: tokens.access_token.unwrap(),
refresh_token: tokens.refresh_token.unwrap(),
token_type: tokens.token_type.unwrap(),
expires_in: tokens.expires_in,
expires_in_timestamp: None,
};
token.set_expiry_absolute();
Result::Ok(token)
} else {
let err = io::Error::new(io::ErrorKind::Other,
format!("Token API error: {} {}",
tokens.error.unwrap_or("<unknown err>".to_string()),
tokens.error_description
.unwrap_or("".to_string()))
.as_str());
Result::Err(Box::new(err))
}
}
/// Obtains an authorization code either interactively or via HTTP redirect (see
/// InstalledFlowReturnMethod).
fn get_authorization_code<'a, AD: AuthenticatorDelegate, S, T>(&mut self,
auth_delegate: &mut AD,
appsecret: &ApplicationSecret,
scopes: S)
-> Result<String, Box<Error>>
where T: AsRef<str> + 'a,
S: Iterator<Item = &'a T>
{
let result: Result<String, Box<Error>> = match self.server {
None => {
let url = build_authentication_request_url(&appsecret.auth_uri,
&appsecret.client_id,
scopes,
None);
match auth_delegate.present_user_url(&url, true /* need_code */) {
None => {
Result::Err(Box::new(io::Error::new(io::ErrorKind::UnexpectedEof,
"couldn't read code")))
}
// Remove newline
Some(mut code) => {
code.pop();
Result::Ok(code)
}
}
}
Some(_) => {
// The redirect URI must be this very localhost URL, otherwise Google refuses
// authorization.
let url = build_authentication_request_url(&appsecret.auth_uri,
&appsecret.client_id,
scopes,
Some(format!("http://localhost:{}",
self.port
.unwrap_or(8080))));
auth_delegate.present_user_url(&url, false /* need_code */);
match self.auth_code_rcv.as_ref().unwrap().recv() {
Result::Err(e) => Result::Err(Box::new(e)),
Result::Ok(s) => Result::Ok(s),
}
}
};
self.server.as_mut().map(|l| l.close()).is_some();
result
}
/// Sends the authorization code to the provider in order to obtain access and refresh tokens.
fn request_token(&mut self,
appsecret: &ApplicationSecret,
authcode: &str)
-> Result<JSONTokenResponse, Box<Error>> {
let redirect_uri;
match self.port {
None => redirect_uri = OOB_REDIRECT_URI.to_string(),
Some(p) => redirect_uri = format!("http://localhost:{}", p),
}
let body = form_urlencoded::serialize(vec![("code".to_string(), authcode.to_string()),
("client_id".to_string(),
appsecret.client_id.clone()),
("client_secret".to_string(),
appsecret.client_secret.clone()),
("redirect_uri".to_string(), redirect_uri),
("grant_type".to_string(),
"authorization_code".to_string())]);
let result: Result<client::Response, hyper::Error> = self.client
.borrow_mut()
.post(&appsecret.token_uri)
.body(&body)
.header(header::ContentType("application/x-www-form-urlencoded".parse().unwrap()))
.send();
let mut resp = String::new();
match result {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(mut response) => {
let result = response.read_to_string(&mut resp);
match result {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(_) => (),
}
}
}
let token_resp: Result<JSONTokenResponse, error::Error> = serde_json::from_str(&resp);
match token_resp {
Result::Err(e) => return Result::Err(Box::new(e)),
Result::Ok(tok) => Result::Ok(tok) as Result<JSONTokenResponse, Box<Error>>,
}
}
}
#[derive(Deserialize)]
struct JSONTokenResponse {
access_token: Option<String>,
refresh_token: Option<String>,
token_type: Option<String>,
expires_in: Option<i64>,
error: Option<String>,
error_description: Option<String>,
}
/// HTTP handler handling the redirect from the provider.
struct InstalledFlowHandler {
auth_code_snd: Mutex<Sender<String>>,
}
impl server::Handler for InstalledFlowHandler {
fn handle(&self, rq: server::Request, mut rp: server::Response) {
match rq.uri {
uri::RequestUri::AbsolutePath(path) => {
// We use a fake URL because the redirect goes to a URL, meaning we
// can't use the url form decode (because there's slashes and hashes and stuff in
// it).
let url = hyper::Url::parse(&format!("http://example.com{}", path));
if url.is_err() {
*rp.status_mut() = status::StatusCode::BadRequest;
let _ = rp.send("Unparseable URL".as_ref());
} else {
self.handle_url(url.unwrap());
*rp.status_mut() = status::StatusCode::Ok;
let _ =
rp.send("<html><head><title>Success</title></head><body>You may now \
close this window.</body></html>"
.as_ref());
}
}
_ => {
*rp.status_mut() = status::StatusCode::BadRequest;
let _ = rp.send("Invalid Request!".as_ref());
}
}
}
}
impl InstalledFlowHandler {
fn handle_url(&self, url: hyper::Url) {
// Google redirects to the specified localhost URL, appending the authorization
// code, like this: http://localhost:8080/xyz/?code=4/731fJ3BheyCouCniPufAd280GHNV5Ju35yYcGs
// We take that code and send it to the get_authorization_code() function that
// waits for it.
for (param, val) in url.query_pairs().into_owned() {
if param == "code".to_string() {
let _ = self.auth_code_snd.lock().unwrap().send(val);
}
}
}
}
#[cfg(test)]
mod tests {
use super::build_authentication_request_url;
use super::InstalledFlowHandler;
use std::sync::Mutex;
use std::sync::mpsc::channel;
use hyper::Url;
#[test]
fn test_request_url_builder() {
assert_eq!("https://accounts.google.\
com/o/oauth2/auth?scope=email%20profile&redirect_uri=urn:ietf:wg:oauth:2.0:\
oob&response_type=code&client_id=812741506391-h38jh0j4fv0ce1krdkiq0hfvt6n5amr\
f.apps.googleusercontent.com",
build_authentication_request_url("https://accounts.google.com/o/oauth2/auth",
"812741506391-h38jh0j4fv0ce1krdkiq0hfvt6n5am\
rf.apps.googleusercontent.com",
vec![&"email".to_string(),
&"profile".to_string()],
None));
}
#[test]
fn test_http_handle_url() {
let (tx, rx) = channel();
let handler = InstalledFlowHandler { auth_code_snd: Mutex::new(tx) };
// URLs are usually a bit botched
let url = Url::parse("http://example.com:1234/?code=ab/c%2Fd#").unwrap();
handler.handle_url(url);
assert_eq!(rx.recv().unwrap(), "ab/c/d".to_string());
}
}
| InstalledFlowReturnMethod | identifier_name |
patterns.rs | use insta::assert_snapshot;
use test_utils::mark;
use super::{infer, infer_with_mismatches};
#[test]
fn infer_pattern() {
assert_snapshot!(
infer(r#"
fn test(x: &i32) {
let y = x;
let &z = x;
let a = z;
let (c, d) = (1, "hello");
for (e, f) in some_iter {
let g = e;
}
if let [val] = opt {
let h = val;
}
let lambda = |a: u64, b, c: i32| { a + b; c };
let ref ref_to_x = x;
let mut mut_x = x;
let ref mut mut_ref_to_x = x;
let k = mut_ref_to_x;
}
"#),
@r###"
9..10 'x': &i32
18..369 '{ ...o_x; }': ()
28..29 'y': &i32
32..33 'x': &i32
43..45 '&z': &i32
44..45 'z': i32
48..49 'x': &i32
59..60 'a': i32
63..64 'z': i32
74..80 '(c, d)': (i32, &str)
75..76 'c': i32
78..79 'd': &str
83..95 '(1, "hello")': (i32, &str)
84..85 '1': i32
87..94 '"hello"': &str
102..152 'for (e... }': ()
106..112 '(e, f)': ({unknown}, {unknown})
107..108 'e': {unknown}
110..111 'f': {unknown}
116..125'some_iter': {unknown}
126..152 '{ ... }': ()
140..141 'g': {unknown}
144..145 'e': {unknown}
158..205 'if let... }': ()
165..170 '[val]': [{unknown}]
166..169 'val': {unknown}
173..176 'opt': [{unknown}]
177..205 '{ ... }': ()
191..192 'h': {unknown}
195..198 'val': {unknown}
215..221 'lambda': |u64, u64, i32| -> i32
224..256 '|a: u6...b; c }': |u64, u64, i32| -> i32
225..226 'a': u64
233..234 'b': u64
236..237 'c': i32
244..256 '{ a + b; c }': i32
246..247 'a': u64
246..251 'a + b': u64
250..251 'b': u64
253..254 'c': i32
267..279'ref ref_to_x': &&i32
282..283 'x': &i32
293..302'mut mut_x': &i32
305..306 'x': &i32
316..336'ref mu...f_to_x': &mut &i32
339..340 'x': &i32
350..351 'k': &mut &i32
354..366'mut_ref_to_x': &mut &i32
"###
);
}
#[test]
fn infer_literal_pattern() {
assert_snapshot!(
infer_with_mismatches(r#"
fn any<T>() -> T { loop {} }
fn test(x: &i32) {
if let "foo" = any() {}
if let 1 = any() {}
if let 1u32 = any() {}
if let 1f32 = any() {}
if let 1.0 = any() {}
if let true = any() {}
}
"#, true),
@r###"
18..29 '{ loop {} }': T
20..27 'loop {}':!
25..27 '{}': ()
38..39 'x': &i32
47..209 '{ ...) {} }': ()
53..76 'if let...y() {}': ()
60..65 '"foo"': &str
60..65 '"foo"': &str
68..71 'any': fn any<&str>() -> &str
68..73 'any()': &str
74..76 '{}': ()
81..100 'if let...y() {}': ()
88..89 '1': i32
88..89 '1': i32
92..95 'any': fn any<i32>() -> i32
92..97 'any()': i32
98..100 '{}': ()
105..127 'if let...y() {}': ()
112..116 '1u32': u32
112..116 '1u32': u32
119..122 'any': fn any<u32>() -> u32
119..124 'any()': u32
125..127 '{}': ()
132..154 'if let...y() {}': ()
139..143 '1f32': f32
139..143 '1f32': f32
146..149 'any': fn any<f32>() -> f32
146..151 'any()': f32
152..154 '{}': ()
159..180 'if let...y() {}': ()
166..169 '1.0': f64
166..169 '1.0': f64
172..175 'any': fn any<f64>() -> f64
172..177 'any()': f64
178..180 '{}': ()
185..207 'if let...y() {}': ()
192..196 'true': bool
192..196 'true': bool
199..202 'any': fn any<bool>() -> bool
199..204 'any()': bool
205..207 '{}': ()
"###
);
}
#[test]
fn infer_range_pattern() | );
}
#[test]
fn infer_pattern_match_ergonomics() {
assert_snapshot!(
infer(r#"
struct A<T>(T);
fn test() {
let A(n) = &A(1);
let A(n) = &mut A(1);
}
"#),
@r###"
28..79 '{ ...(1); }': ()
38..42 'A(n)': A<i32>
40..41 'n': &i32
45..50 '&A(1)': &A<i32>
46..47 'A': A<i32>(i32) -> A<i32>
46..50 'A(1)': A<i32>
48..49 '1': i32
60..64 'A(n)': A<i32>
62..63 'n': &mut i32
67..76 '&mut A(1)': &mut A<i32>
72..73 'A': A<i32>(i32) -> A<i32>
72..76 'A(1)': A<i32>
74..75 '1': i32
"###
);
}
#[test]
fn infer_pattern_match_ergonomics_ref() {
mark::check!(match_ergonomics_ref);
assert_snapshot!(
infer(r#"
fn test() {
let v = &(1, &2);
let (_, &w) = v;
}
"#),
@r###"
11..57 '{ ...= v; }': ()
21..22 'v': &(i32, &i32)
25..33 '&(1, &2)': &(i32, &i32)
26..33 '(1, &2)': (i32, &i32)
27..28 '1': i32
30..32 '&2': &i32
31..32 '2': i32
43..50 '(_, &w)': (i32, &i32)
44..45 '_': i32
47..49 '&w': &i32
48..49 'w': i32
53..54 'v': &(i32, &i32)
"###
);
}
#[test]
fn infer_pattern_match_slice() {
assert_snapshot!(
infer(r#"
fn test() {
let slice: &[f64] = &[0.0];
match slice {
&[] => {},
&[a] => {
a;
},
&[b, c] => {
b;
c;
}
_ => {}
}
}
"#),
@r###"
11..210 '{ ... } }': ()
21..26'slice': &[f64]
37..43 '&[0.0]': &[f64; _]
38..43 '[0.0]': [f64; _]
39..42 '0.0': f64
49..208'match... }': ()
55..60'slice': &[f64]
71..74 '&[]': &[f64]
72..74 '[]': [f64]
78..80 '{}': ()
90..94 '&[a]': &[f64]
91..94 '[a]': [f64]
92..93 'a': f64
98..124 '{ ... }': ()
112..113 'a': f64
134..141 '&[b, c]': &[f64]
135..141 '[b, c]': [f64]
136..137 'b': f64
139..140 'c': f64
145..186 '{ ... }': ()
159..160 'b': f64
174..175 'c': f64
195..196 '_': &[f64]
200..202 '{}': ()
"###
);
}
#[test]
fn infer_pattern_match_arr() {
assert_snapshot!(
infer(r#"
fn test() {
let arr: [f64; 2] = [0.0, 1.0];
match arr {
[1.0, a] => {
a;
},
[b, c] => {
b;
c;
}
}
}
"#),
@r###"
11..180 '{ ... } }': ()
21..24 'arr': [f64; _]
37..47 '[0.0, 1.0]': [f64; _]
38..41 '0.0': f64
43..46 '1.0': f64
53..178'match... }': ()
59..62 'arr': [f64; _]
73..81 '[1.0, a]': [f64; _]
74..77 '1.0': f64
74..77 '1.0': f64
79..80 'a': f64
85..111 '{ ... }': ()
99..100 'a': f64
121..127 '[b, c]': [f64; _]
122..123 'b': f64
125..126 'c': f64
131..172 '{ ... }': ()
145..146 'b': f64
160..161 'c': f64
"###
);
}
#[test]
fn infer_adt_pattern() {
assert_snapshot!(
infer(r#"
enum E {
A { x: usize },
B
}
struct S(u32, E);
fn test() {
let e = E::A { x: 3 };
let S(y, z) = foo;
let E::A { x: new_var } = e;
match e {
E::A { x } => x,
E::B if foo => 1,
E::B => 10,
};
let ref d @ E::A {.. } = e;
d;
}
"#),
@r###"
68..289 '{ ... d; }': ()
78..79 'e': E
82..95 'E::A { x: 3 }': E
92..93 '3': usize
106..113 'S(y, z)': S
108..109 'y': u32
111..112 'z': E
116..119 'foo': S
129..148 'E::A {..._var }': E
139..146 'new_var': usize
151..152 'e': E
159..245'match... }': usize
165..166 'e': E
177..187 'E::A { x }': E
184..185 'x': usize
191..192 'x': usize
202..206 'E::B': E
210..213 'foo': bool
217..218 '1': usize
228..232 'E::B': E
236..238 '10': usize
256..275'ref d...{.. }': &E
264..275 'E::A {.. }': E
278..279 'e': E
285..286 'd': &E
"###
);
}
#[test]
fn enum_variant_through_self_in_pattern() {
assert_snapshot!(
infer(r#"
enum E {
A { x: usize },
B(usize),
C
}
impl E {
fn test() {
match (loop {}) {
Self::A { x } => { x; },
Self::B(x) => { x; },
Self::C => {},
};
}
}
"#),
@r###"
76..218 '{ ... }': ()
86..211'match... }': ()
93..100 'loop {}':!
98..100 '{}': ()
116..129 'Self::A { x }': E
126..127 'x': usize
133..139 '{ x; }': ()
135..136 'x': usize
153..163 'Self::B(x)': E
161..162 'x': usize
167..173 '{ x; }': ()
169..170 'x': usize
187..194 'Self::C': E
198..200 '{}': ()
"###
);
}
#[test]
fn infer_generics_in_patterns() {
assert_snapshot!(
infer(r#"
struct A<T> {
x: T,
}
enum Option<T> {
Some(T),
None,
}
fn test(a1: A<u32>, o: Option<u64>) {
let A { x: x2 } = a1;
let A::<i64> { x: x3 } = A { x: 1 };
match o {
Option::Some(t) => t,
_ => 1,
};
}
"#),
@r###"
79..81 'a1': A<u32>
91..92 'o': Option<u64>
107..244 '{ ... }; }': ()
117..128 'A { x: x2 }': A<u32>
124..126 'x2': u32
131..133 'a1': A<u32>
143..161 'A::<i6...: x3 }': A<i64>
157..159 'x3': i64
164..174 'A { x: 1 }': A<i64>
171..172 '1': i64
180..241'match... }': u64
186..187 'o': Option<u64>
198..213 'Option::Some(t)': Option<u64>
211..212 't': u64
217..218 't': u64
228..229 '_': Option<u64>
233..234 '1': u64
"###
);
}
#[test]
fn infer_const_pattern() {
assert_snapshot!(
infer_with_mismatches(r#"
enum Option<T> { None }
use Option::None;
struct Foo;
const Bar: usize = 1;
fn test() {
let a: Option<u32> = None;
let b: Option<i64> = match a {
None => None,
};
let _: () = match () { Foo => Foo }; // Expected mismatch
let _: () = match () { Bar => Bar }; // Expected mismatch
}
"#, true),
@r###"
74..75 '1': usize
88..310 '{ ...atch }': ()
98..99 'a': Option<u32>
115..119 'None': Option<u32>
129..130 'b': Option<i64>
146..183'match... }': Option<i64>
152..153 'a': Option<u32>
164..168 'None': Option<u32>
172..176 'None': Option<i64>
193..194 '_': ()
201..224'match... Foo }': Foo
207..209 '()': ()
212..215 'Foo': Foo
219..222 'Foo': Foo
255..256 '_': ()
263..286'match... Bar }': usize
269..271 '()': ()
274..277 'Bar': usize
281..284 'Bar': usize
201..224: expected (), got Foo
263..286: expected (), got usize
"###
);
}
#[test]
fn infer_guard() {
assert_snapshot!(
infer(r#"
struct S;
impl S { fn foo(&self) -> bool { false } }
fn main() {
match S {
s if s.foo() => (),
}
}
"#), @"
28..32'self': &S
42..51 '{ false }': bool
44..49 'false': bool
65..116 '{ ... } }': ()
71..114'match... }': ()
77..78 'S': S
89..90's': S
94..95's': S
94..101's.foo()': bool
105..107 '()': ()
")
}
#[test]
fn match_ergonomics_in_closure_params() {
assert_snapshot!(
infer(r#"
#[lang = "fn_once"]
trait FnOnce<Args> {
type Output;
}
fn foo<T, U, F: FnOnce(T) -> U>(t: T, f: F) -> U { loop {} }
fn test() {
foo(&(1, "a"), |&(x, y)| x); // normal, no match ergonomics
foo(&(1, "a"), |(x, y)| x);
}
"#),
@r###"
94..95 't': T
100..101 'f': F
111..122 '{ loop {} }': U
113..120 'loop {}':!
118..120 '{}': ()
134..233 '{ ... x); }': ()
140..143 'foo': fn foo<&(i32, &str), i32, |&(i32, &str)| -> i32>(&(i32, &str), |&(i32, &str)| -> i32) -> i32
140..167 'foo(&(...y)| x)': i32
144..153 '&(1, "a")': &(i32, &str)
145..153 '(1, "a")': (i32, &str)
146..147 '1': i32
149..152 '"a"': &str
155..166 '|&(x, y)| x': |&(i32, &str)| -> i32
156..163 '&(x, y)': &(i32, &str)
157..163 '(x, y)': (i32, &str)
158..159 'x': i32
161..162 'y': &str
165..166 'x': i32
204..207 'foo': fn foo<&(i32, &str), &i32, |&(i32, &str)| -> &i32>(&(i32, &str), |&(i32, &str)| -> &i32) -> &i32
204..230 'foo(&(...y)| x)': &i32
208..217 '&(1, "a")': &(i32, &str)
209..217 '(1, "a")': (i32, &str)
210..211 '1': i32
213..216 '"a"': &str
219..229 '|(x, y)| x': |&(i32, &str)| -> &i32
220..226 '(x, y)': (i32, &str)
221..222 'x': &i32
224..225 'y': &&str
228..229 'x': &i32
"###
);
}
| {
assert_snapshot!(
infer_with_mismatches(r#"
fn test(x: &i32) {
if let 1..76 = 2u32 {}
if let 1..=76 = 2u32 {}
}
"#, true),
@r###"
9..10 'x': &i32
18..76 '{ ...2 {} }': ()
24..46 'if let...u32 {}': ()
31..36 '1..76': u32
39..43 '2u32': u32
44..46 '{}': ()
51..74 'if let...u32 {}': ()
58..64 '1..=76': u32
67..71 '2u32': u32
72..74 '{}': ()
"### | identifier_body |
patterns.rs | use insta::assert_snapshot;
use test_utils::mark;
use super::{infer, infer_with_mismatches};
#[test]
fn infer_pattern() {
assert_snapshot!(
infer(r#"
fn test(x: &i32) {
let y = x;
let &z = x;
let a = z;
let (c, d) = (1, "hello");
for (e, f) in some_iter {
let g = e;
}
if let [val] = opt {
let h = val;
}
let lambda = |a: u64, b, c: i32| { a + b; c };
let ref ref_to_x = x;
let mut mut_x = x;
let ref mut mut_ref_to_x = x;
let k = mut_ref_to_x;
}
"#),
@r###"
9..10 'x': &i32
18..369 '{ ...o_x; }': ()
28..29 'y': &i32
32..33 'x': &i32
43..45 '&z': &i32
44..45 'z': i32
48..49 'x': &i32
59..60 'a': i32
63..64 'z': i32
74..80 '(c, d)': (i32, &str)
75..76 'c': i32
78..79 'd': &str
83..95 '(1, "hello")': (i32, &str)
84..85 '1': i32
87..94 '"hello"': &str
102..152 'for (e... }': ()
106..112 '(e, f)': ({unknown}, {unknown})
107..108 'e': {unknown}
110..111 'f': {unknown}
116..125'some_iter': {unknown}
126..152 '{ ... }': ()
140..141 'g': {unknown}
144..145 'e': {unknown}
158..205 'if let... }': ()
165..170 '[val]': [{unknown}]
166..169 'val': {unknown}
173..176 'opt': [{unknown}]
177..205 '{ ... }': ()
191..192 'h': {unknown}
195..198 'val': {unknown}
215..221 'lambda': |u64, u64, i32| -> i32
224..256 '|a: u6...b; c }': |u64, u64, i32| -> i32
225..226 'a': u64
233..234 'b': u64
236..237 'c': i32
244..256 '{ a + b; c }': i32
246..247 'a': u64
246..251 'a + b': u64
250..251 'b': u64
253..254 'c': i32
267..279'ref ref_to_x': &&i32
282..283 'x': &i32
293..302'mut mut_x': &i32
305..306 'x': &i32
316..336'ref mu...f_to_x': &mut &i32
339..340 'x': &i32
350..351 'k': &mut &i32
354..366'mut_ref_to_x': &mut &i32
"###
);
}
#[test]
fn infer_literal_pattern() {
assert_snapshot!(
infer_with_mismatches(r#"
fn any<T>() -> T { loop {} }
fn test(x: &i32) {
if let "foo" = any() {}
if let 1 = any() {}
if let 1u32 = any() {}
if let 1f32 = any() {}
if let 1.0 = any() {}
if let true = any() {}
}
"#, true),
@r###"
18..29 '{ loop {} }': T
20..27 'loop {}':!
25..27 '{}': ()
38..39 'x': &i32
47..209 '{ ...) {} }': ()
53..76 'if let...y() {}': ()
60..65 '"foo"': &str
60..65 '"foo"': &str
68..71 'any': fn any<&str>() -> &str
68..73 'any()': &str
74..76 '{}': ()
81..100 'if let...y() {}': ()
88..89 '1': i32
88..89 '1': i32
92..95 'any': fn any<i32>() -> i32
92..97 'any()': i32
98..100 '{}': ()
105..127 'if let...y() {}': ()
112..116 '1u32': u32
112..116 '1u32': u32
119..122 'any': fn any<u32>() -> u32
119..124 'any()': u32
125..127 '{}': ()
132..154 'if let...y() {}': ()
139..143 '1f32': f32
139..143 '1f32': f32
146..149 'any': fn any<f32>() -> f32
146..151 'any()': f32
152..154 '{}': ()
159..180 'if let...y() {}': ()
166..169 '1.0': f64
166..169 '1.0': f64
172..175 'any': fn any<f64>() -> f64
172..177 'any()': f64
178..180 '{}': ()
185..207 'if let...y() {}': ()
192..196 'true': bool
192..196 'true': bool
199..202 'any': fn any<bool>() -> bool
199..204 'any()': bool
205..207 '{}': ()
"###
);
}
#[test]
fn infer_range_pattern() {
assert_snapshot!(
infer_with_mismatches(r#"
fn test(x: &i32) {
if let 1..76 = 2u32 {}
if let 1..=76 = 2u32 {}
}
"#, true),
@r###"
9..10 'x': &i32
18..76 '{ ...2 {} }': ()
24..46 'if let...u32 {}': ()
31..36 '1..76': u32
39..43 '2u32': u32
44..46 '{}': ()
51..74 'if let...u32 {}': ()
58..64 '1..=76': u32
67..71 '2u32': u32
72..74 '{}': ()
"###
);
}
#[test]
fn infer_pattern_match_ergonomics() {
assert_snapshot!(
infer(r#"
struct A<T>(T);
fn test() {
let A(n) = &A(1);
let A(n) = &mut A(1);
}
"#),
@r###"
28..79 '{ ...(1); }': ()
38..42 'A(n)': A<i32>
40..41 'n': &i32
45..50 '&A(1)': &A<i32>
46..47 'A': A<i32>(i32) -> A<i32>
46..50 'A(1)': A<i32>
48..49 '1': i32
60..64 'A(n)': A<i32>
62..63 'n': &mut i32
67..76 '&mut A(1)': &mut A<i32>
72..73 'A': A<i32>(i32) -> A<i32>
72..76 'A(1)': A<i32>
74..75 '1': i32
"###
);
}
#[test]
fn | () {
mark::check!(match_ergonomics_ref);
assert_snapshot!(
infer(r#"
fn test() {
let v = &(1, &2);
let (_, &w) = v;
}
"#),
@r###"
11..57 '{ ...= v; }': ()
21..22 'v': &(i32, &i32)
25..33 '&(1, &2)': &(i32, &i32)
26..33 '(1, &2)': (i32, &i32)
27..28 '1': i32
30..32 '&2': &i32
31..32 '2': i32
43..50 '(_, &w)': (i32, &i32)
44..45 '_': i32
47..49 '&w': &i32
48..49 'w': i32
53..54 'v': &(i32, &i32)
"###
);
}
#[test]
fn infer_pattern_match_slice() {
assert_snapshot!(
infer(r#"
fn test() {
let slice: &[f64] = &[0.0];
match slice {
&[] => {},
&[a] => {
a;
},
&[b, c] => {
b;
c;
}
_ => {}
}
}
"#),
@r###"
11..210 '{ ... } }': ()
21..26'slice': &[f64]
37..43 '&[0.0]': &[f64; _]
38..43 '[0.0]': [f64; _]
39..42 '0.0': f64
49..208'match... }': ()
55..60'slice': &[f64]
71..74 '&[]': &[f64]
72..74 '[]': [f64]
78..80 '{}': ()
90..94 '&[a]': &[f64]
91..94 '[a]': [f64]
92..93 'a': f64
98..124 '{ ... }': ()
112..113 'a': f64
134..141 '&[b, c]': &[f64]
135..141 '[b, c]': [f64]
136..137 'b': f64
139..140 'c': f64
145..186 '{ ... }': ()
159..160 'b': f64
174..175 'c': f64
195..196 '_': &[f64]
200..202 '{}': ()
"###
);
}
#[test]
fn infer_pattern_match_arr() {
assert_snapshot!(
infer(r#"
fn test() {
let arr: [f64; 2] = [0.0, 1.0];
match arr {
[1.0, a] => {
a;
},
[b, c] => {
b;
c;
}
}
}
"#),
@r###"
11..180 '{ ... } }': ()
21..24 'arr': [f64; _]
37..47 '[0.0, 1.0]': [f64; _]
38..41 '0.0': f64
43..46 '1.0': f64
53..178'match... }': ()
59..62 'arr': [f64; _]
73..81 '[1.0, a]': [f64; _]
74..77 '1.0': f64
74..77 '1.0': f64
79..80 'a': f64
85..111 '{ ... }': ()
99..100 'a': f64
121..127 '[b, c]': [f64; _]
122..123 'b': f64
125..126 'c': f64
131..172 '{ ... }': ()
145..146 'b': f64
160..161 'c': f64
"###
);
}
#[test]
fn infer_adt_pattern() {
assert_snapshot!(
infer(r#"
enum E {
A { x: usize },
B
}
struct S(u32, E);
fn test() {
let e = E::A { x: 3 };
let S(y, z) = foo;
let E::A { x: new_var } = e;
match e {
E::A { x } => x,
E::B if foo => 1,
E::B => 10,
};
let ref d @ E::A {.. } = e;
d;
}
"#),
@r###"
68..289 '{ ... d; }': ()
78..79 'e': E
82..95 'E::A { x: 3 }': E
92..93 '3': usize
106..113 'S(y, z)': S
108..109 'y': u32
111..112 'z': E
116..119 'foo': S
129..148 'E::A {..._var }': E
139..146 'new_var': usize
151..152 'e': E
159..245'match... }': usize
165..166 'e': E
177..187 'E::A { x }': E
184..185 'x': usize
191..192 'x': usize
202..206 'E::B': E
210..213 'foo': bool
217..218 '1': usize
228..232 'E::B': E
236..238 '10': usize
256..275'ref d...{.. }': &E
264..275 'E::A {.. }': E
278..279 'e': E
285..286 'd': &E
"###
);
}
#[test]
fn enum_variant_through_self_in_pattern() {
assert_snapshot!(
infer(r#"
enum E {
A { x: usize },
B(usize),
C
}
impl E {
fn test() {
match (loop {}) {
Self::A { x } => { x; },
Self::B(x) => { x; },
Self::C => {},
};
}
}
"#),
@r###"
76..218 '{ ... }': ()
86..211'match... }': ()
93..100 'loop {}':!
98..100 '{}': ()
116..129 'Self::A { x }': E
126..127 'x': usize
133..139 '{ x; }': ()
135..136 'x': usize
153..163 'Self::B(x)': E
161..162 'x': usize
167..173 '{ x; }': ()
169..170 'x': usize
187..194 'Self::C': E
198..200 '{}': ()
"###
);
}
#[test]
fn infer_generics_in_patterns() {
assert_snapshot!(
infer(r#"
struct A<T> {
x: T,
}
enum Option<T> {
Some(T),
None,
}
fn test(a1: A<u32>, o: Option<u64>) {
let A { x: x2 } = a1;
let A::<i64> { x: x3 } = A { x: 1 };
match o {
Option::Some(t) => t,
_ => 1,
};
}
"#),
@r###"
79..81 'a1': A<u32>
91..92 'o': Option<u64>
107..244 '{ ... }; }': ()
117..128 'A { x: x2 }': A<u32>
124..126 'x2': u32
131..133 'a1': A<u32>
143..161 'A::<i6...: x3 }': A<i64>
157..159 'x3': i64
164..174 'A { x: 1 }': A<i64>
171..172 '1': i64
180..241'match... }': u64
186..187 'o': Option<u64>
198..213 'Option::Some(t)': Option<u64>
211..212 't': u64
217..218 't': u64
228..229 '_': Option<u64>
233..234 '1': u64
"###
);
}
#[test]
fn infer_const_pattern() {
assert_snapshot!(
infer_with_mismatches(r#"
enum Option<T> { None }
use Option::None;
struct Foo;
const Bar: usize = 1;
fn test() {
let a: Option<u32> = None;
let b: Option<i64> = match a {
None => None,
};
let _: () = match () { Foo => Foo }; // Expected mismatch
let _: () = match () { Bar => Bar }; // Expected mismatch
}
"#, true),
@r###"
74..75 '1': usize
88..310 '{ ...atch }': ()
98..99 'a': Option<u32>
115..119 'None': Option<u32>
129..130 'b': Option<i64>
146..183'match... }': Option<i64>
152..153 'a': Option<u32>
164..168 'None': Option<u32>
172..176 'None': Option<i64>
193..194 '_': ()
201..224'match... Foo }': Foo
207..209 '()': ()
212..215 'Foo': Foo
219..222 'Foo': Foo
255..256 '_': ()
263..286'match... Bar }': usize
269..271 '()': ()
274..277 'Bar': usize
281..284 'Bar': usize
201..224: expected (), got Foo
263..286: expected (), got usize
"###
);
}
#[test]
fn infer_guard() {
assert_snapshot!(
infer(r#"
struct S;
impl S { fn foo(&self) -> bool { false } }
fn main() {
match S {
s if s.foo() => (),
}
}
"#), @"
28..32'self': &S
42..51 '{ false }': bool
44..49 'false': bool
65..116 '{ ... } }': ()
71..114'match... }': ()
77..78 'S': S
89..90's': S
94..95's': S
94..101's.foo()': bool
105..107 '()': ()
")
}
#[test]
fn match_ergonomics_in_closure_params() {
assert_snapshot!(
infer(r#"
#[lang = "fn_once"]
trait FnOnce<Args> {
type Output;
}
fn foo<T, U, F: FnOnce(T) -> U>(t: T, f: F) -> U { loop {} }
fn test() {
foo(&(1, "a"), |&(x, y)| x); // normal, no match ergonomics
foo(&(1, "a"), |(x, y)| x);
}
"#),
@r###"
94..95 't': T
100..101 'f': F
111..122 '{ loop {} }': U
113..120 'loop {}':!
118..120 '{}': ()
134..233 '{ ... x); }': ()
140..143 'foo': fn foo<&(i32, &str), i32, |&(i32, &str)| -> i32>(&(i32, &str), |&(i32, &str)| -> i32) -> i32
140..167 'foo(&(...y)| x)': i32
144..153 '&(1, "a")': &(i32, &str)
145..153 '(1, "a")': (i32, &str)
146..147 '1': i32
149..152 '"a"': &str
155..166 '|&(x, y)| x': |&(i32, &str)| -> i32
156..163 '&(x, y)': &(i32, &str)
157..163 '(x, y)': (i32, &str)
158..159 'x': i32
161..162 'y': &str
165..166 'x': i32
204..207 'foo': fn foo<&(i32, &str), &i32, |&(i32, &str)| -> &i32>(&(i32, &str), |&(i32, &str)| -> &i32) -> &i32
204..230 'foo(&(...y)| x)': &i32
208..217 '&(1, "a")': &(i32, &str)
209..217 '(1, "a")': (i32, &str)
210..211 '1': i32
213..216 '"a"': &str
219..229 '|(x, y)| x': |&(i32, &str)| -> &i32
220..226 '(x, y)': (i32, &str)
221..222 'x': &i32
224..225 'y': &&str
228..229 'x': &i32
"###
);
}
| infer_pattern_match_ergonomics_ref | identifier_name |
patterns.rs | use insta::assert_snapshot;
use test_utils::mark;
use super::{infer, infer_with_mismatches};
#[test]
fn infer_pattern() {
assert_snapshot!( | let (c, d) = (1, "hello");
for (e, f) in some_iter {
let g = e;
}
if let [val] = opt {
let h = val;
}
let lambda = |a: u64, b, c: i32| { a + b; c };
let ref ref_to_x = x;
let mut mut_x = x;
let ref mut mut_ref_to_x = x;
let k = mut_ref_to_x;
}
"#),
@r###"
9..10 'x': &i32
18..369 '{ ...o_x; }': ()
28..29 'y': &i32
32..33 'x': &i32
43..45 '&z': &i32
44..45 'z': i32
48..49 'x': &i32
59..60 'a': i32
63..64 'z': i32
74..80 '(c, d)': (i32, &str)
75..76 'c': i32
78..79 'd': &str
83..95 '(1, "hello")': (i32, &str)
84..85 '1': i32
87..94 '"hello"': &str
102..152 'for (e... }': ()
106..112 '(e, f)': ({unknown}, {unknown})
107..108 'e': {unknown}
110..111 'f': {unknown}
116..125'some_iter': {unknown}
126..152 '{ ... }': ()
140..141 'g': {unknown}
144..145 'e': {unknown}
158..205 'if let... }': ()
165..170 '[val]': [{unknown}]
166..169 'val': {unknown}
173..176 'opt': [{unknown}]
177..205 '{ ... }': ()
191..192 'h': {unknown}
195..198 'val': {unknown}
215..221 'lambda': |u64, u64, i32| -> i32
224..256 '|a: u6...b; c }': |u64, u64, i32| -> i32
225..226 'a': u64
233..234 'b': u64
236..237 'c': i32
244..256 '{ a + b; c }': i32
246..247 'a': u64
246..251 'a + b': u64
250..251 'b': u64
253..254 'c': i32
267..279'ref ref_to_x': &&i32
282..283 'x': &i32
293..302'mut mut_x': &i32
305..306 'x': &i32
316..336'ref mu...f_to_x': &mut &i32
339..340 'x': &i32
350..351 'k': &mut &i32
354..366'mut_ref_to_x': &mut &i32
"###
);
}
#[test]
fn infer_literal_pattern() {
assert_snapshot!(
infer_with_mismatches(r#"
fn any<T>() -> T { loop {} }
fn test(x: &i32) {
if let "foo" = any() {}
if let 1 = any() {}
if let 1u32 = any() {}
if let 1f32 = any() {}
if let 1.0 = any() {}
if let true = any() {}
}
"#, true),
@r###"
18..29 '{ loop {} }': T
20..27 'loop {}':!
25..27 '{}': ()
38..39 'x': &i32
47..209 '{ ...) {} }': ()
53..76 'if let...y() {}': ()
60..65 '"foo"': &str
60..65 '"foo"': &str
68..71 'any': fn any<&str>() -> &str
68..73 'any()': &str
74..76 '{}': ()
81..100 'if let...y() {}': ()
88..89 '1': i32
88..89 '1': i32
92..95 'any': fn any<i32>() -> i32
92..97 'any()': i32
98..100 '{}': ()
105..127 'if let...y() {}': ()
112..116 '1u32': u32
112..116 '1u32': u32
119..122 'any': fn any<u32>() -> u32
119..124 'any()': u32
125..127 '{}': ()
132..154 'if let...y() {}': ()
139..143 '1f32': f32
139..143 '1f32': f32
146..149 'any': fn any<f32>() -> f32
146..151 'any()': f32
152..154 '{}': ()
159..180 'if let...y() {}': ()
166..169 '1.0': f64
166..169 '1.0': f64
172..175 'any': fn any<f64>() -> f64
172..177 'any()': f64
178..180 '{}': ()
185..207 'if let...y() {}': ()
192..196 'true': bool
192..196 'true': bool
199..202 'any': fn any<bool>() -> bool
199..204 'any()': bool
205..207 '{}': ()
"###
);
}
#[test]
fn infer_range_pattern() {
assert_snapshot!(
infer_with_mismatches(r#"
fn test(x: &i32) {
if let 1..76 = 2u32 {}
if let 1..=76 = 2u32 {}
}
"#, true),
@r###"
9..10 'x': &i32
18..76 '{ ...2 {} }': ()
24..46 'if let...u32 {}': ()
31..36 '1..76': u32
39..43 '2u32': u32
44..46 '{}': ()
51..74 'if let...u32 {}': ()
58..64 '1..=76': u32
67..71 '2u32': u32
72..74 '{}': ()
"###
);
}
#[test]
fn infer_pattern_match_ergonomics() {
assert_snapshot!(
infer(r#"
struct A<T>(T);
fn test() {
let A(n) = &A(1);
let A(n) = &mut A(1);
}
"#),
@r###"
28..79 '{ ...(1); }': ()
38..42 'A(n)': A<i32>
40..41 'n': &i32
45..50 '&A(1)': &A<i32>
46..47 'A': A<i32>(i32) -> A<i32>
46..50 'A(1)': A<i32>
48..49 '1': i32
60..64 'A(n)': A<i32>
62..63 'n': &mut i32
67..76 '&mut A(1)': &mut A<i32>
72..73 'A': A<i32>(i32) -> A<i32>
72..76 'A(1)': A<i32>
74..75 '1': i32
"###
);
}
#[test]
fn infer_pattern_match_ergonomics_ref() {
mark::check!(match_ergonomics_ref);
assert_snapshot!(
infer(r#"
fn test() {
let v = &(1, &2);
let (_, &w) = v;
}
"#),
@r###"
11..57 '{ ...= v; }': ()
21..22 'v': &(i32, &i32)
25..33 '&(1, &2)': &(i32, &i32)
26..33 '(1, &2)': (i32, &i32)
27..28 '1': i32
30..32 '&2': &i32
31..32 '2': i32
43..50 '(_, &w)': (i32, &i32)
44..45 '_': i32
47..49 '&w': &i32
48..49 'w': i32
53..54 'v': &(i32, &i32)
"###
);
}
#[test]
fn infer_pattern_match_slice() {
assert_snapshot!(
infer(r#"
fn test() {
let slice: &[f64] = &[0.0];
match slice {
&[] => {},
&[a] => {
a;
},
&[b, c] => {
b;
c;
}
_ => {}
}
}
"#),
@r###"
11..210 '{ ... } }': ()
21..26'slice': &[f64]
37..43 '&[0.0]': &[f64; _]
38..43 '[0.0]': [f64; _]
39..42 '0.0': f64
49..208'match... }': ()
55..60'slice': &[f64]
71..74 '&[]': &[f64]
72..74 '[]': [f64]
78..80 '{}': ()
90..94 '&[a]': &[f64]
91..94 '[a]': [f64]
92..93 'a': f64
98..124 '{ ... }': ()
112..113 'a': f64
134..141 '&[b, c]': &[f64]
135..141 '[b, c]': [f64]
136..137 'b': f64
139..140 'c': f64
145..186 '{ ... }': ()
159..160 'b': f64
174..175 'c': f64
195..196 '_': &[f64]
200..202 '{}': ()
"###
);
}
#[test]
fn infer_pattern_match_arr() {
assert_snapshot!(
infer(r#"
fn test() {
let arr: [f64; 2] = [0.0, 1.0];
match arr {
[1.0, a] => {
a;
},
[b, c] => {
b;
c;
}
}
}
"#),
@r###"
11..180 '{ ... } }': ()
21..24 'arr': [f64; _]
37..47 '[0.0, 1.0]': [f64; _]
38..41 '0.0': f64
43..46 '1.0': f64
53..178'match... }': ()
59..62 'arr': [f64; _]
73..81 '[1.0, a]': [f64; _]
74..77 '1.0': f64
74..77 '1.0': f64
79..80 'a': f64
85..111 '{ ... }': ()
99..100 'a': f64
121..127 '[b, c]': [f64; _]
122..123 'b': f64
125..126 'c': f64
131..172 '{ ... }': ()
145..146 'b': f64
160..161 'c': f64
"###
);
}
#[test]
fn infer_adt_pattern() {
assert_snapshot!(
infer(r#"
enum E {
A { x: usize },
B
}
struct S(u32, E);
fn test() {
let e = E::A { x: 3 };
let S(y, z) = foo;
let E::A { x: new_var } = e;
match e {
E::A { x } => x,
E::B if foo => 1,
E::B => 10,
};
let ref d @ E::A {.. } = e;
d;
}
"#),
@r###"
68..289 '{ ... d; }': ()
78..79 'e': E
82..95 'E::A { x: 3 }': E
92..93 '3': usize
106..113 'S(y, z)': S
108..109 'y': u32
111..112 'z': E
116..119 'foo': S
129..148 'E::A {..._var }': E
139..146 'new_var': usize
151..152 'e': E
159..245'match... }': usize
165..166 'e': E
177..187 'E::A { x }': E
184..185 'x': usize
191..192 'x': usize
202..206 'E::B': E
210..213 'foo': bool
217..218 '1': usize
228..232 'E::B': E
236..238 '10': usize
256..275'ref d...{.. }': &E
264..275 'E::A {.. }': E
278..279 'e': E
285..286 'd': &E
"###
);
}
#[test]
fn enum_variant_through_self_in_pattern() {
assert_snapshot!(
infer(r#"
enum E {
A { x: usize },
B(usize),
C
}
impl E {
fn test() {
match (loop {}) {
Self::A { x } => { x; },
Self::B(x) => { x; },
Self::C => {},
};
}
}
"#),
@r###"
76..218 '{ ... }': ()
86..211'match... }': ()
93..100 'loop {}':!
98..100 '{}': ()
116..129 'Self::A { x }': E
126..127 'x': usize
133..139 '{ x; }': ()
135..136 'x': usize
153..163 'Self::B(x)': E
161..162 'x': usize
167..173 '{ x; }': ()
169..170 'x': usize
187..194 'Self::C': E
198..200 '{}': ()
"###
);
}
#[test]
fn infer_generics_in_patterns() {
assert_snapshot!(
infer(r#"
struct A<T> {
x: T,
}
enum Option<T> {
Some(T),
None,
}
fn test(a1: A<u32>, o: Option<u64>) {
let A { x: x2 } = a1;
let A::<i64> { x: x3 } = A { x: 1 };
match o {
Option::Some(t) => t,
_ => 1,
};
}
"#),
@r###"
79..81 'a1': A<u32>
91..92 'o': Option<u64>
107..244 '{ ... }; }': ()
117..128 'A { x: x2 }': A<u32>
124..126 'x2': u32
131..133 'a1': A<u32>
143..161 'A::<i6...: x3 }': A<i64>
157..159 'x3': i64
164..174 'A { x: 1 }': A<i64>
171..172 '1': i64
180..241'match... }': u64
186..187 'o': Option<u64>
198..213 'Option::Some(t)': Option<u64>
211..212 't': u64
217..218 't': u64
228..229 '_': Option<u64>
233..234 '1': u64
"###
);
}
#[test]
fn infer_const_pattern() {
assert_snapshot!(
infer_with_mismatches(r#"
enum Option<T> { None }
use Option::None;
struct Foo;
const Bar: usize = 1;
fn test() {
let a: Option<u32> = None;
let b: Option<i64> = match a {
None => None,
};
let _: () = match () { Foo => Foo }; // Expected mismatch
let _: () = match () { Bar => Bar }; // Expected mismatch
}
"#, true),
@r###"
74..75 '1': usize
88..310 '{ ...atch }': ()
98..99 'a': Option<u32>
115..119 'None': Option<u32>
129..130 'b': Option<i64>
146..183'match... }': Option<i64>
152..153 'a': Option<u32>
164..168 'None': Option<u32>
172..176 'None': Option<i64>
193..194 '_': ()
201..224'match... Foo }': Foo
207..209 '()': ()
212..215 'Foo': Foo
219..222 'Foo': Foo
255..256 '_': ()
263..286'match... Bar }': usize
269..271 '()': ()
274..277 'Bar': usize
281..284 'Bar': usize
201..224: expected (), got Foo
263..286: expected (), got usize
"###
);
}
#[test]
fn infer_guard() {
assert_snapshot!(
infer(r#"
struct S;
impl S { fn foo(&self) -> bool { false } }
fn main() {
match S {
s if s.foo() => (),
}
}
"#), @"
28..32'self': &S
42..51 '{ false }': bool
44..49 'false': bool
65..116 '{ ... } }': ()
71..114'match... }': ()
77..78 'S': S
89..90's': S
94..95's': S
94..101's.foo()': bool
105..107 '()': ()
")
}
#[test]
fn match_ergonomics_in_closure_params() {
assert_snapshot!(
infer(r#"
#[lang = "fn_once"]
trait FnOnce<Args> {
type Output;
}
fn foo<T, U, F: FnOnce(T) -> U>(t: T, f: F) -> U { loop {} }
fn test() {
foo(&(1, "a"), |&(x, y)| x); // normal, no match ergonomics
foo(&(1, "a"), |(x, y)| x);
}
"#),
@r###"
94..95 't': T
100..101 'f': F
111..122 '{ loop {} }': U
113..120 'loop {}':!
118..120 '{}': ()
134..233 '{ ... x); }': ()
140..143 'foo': fn foo<&(i32, &str), i32, |&(i32, &str)| -> i32>(&(i32, &str), |&(i32, &str)| -> i32) -> i32
140..167 'foo(&(...y)| x)': i32
144..153 '&(1, "a")': &(i32, &str)
145..153 '(1, "a")': (i32, &str)
146..147 '1': i32
149..152 '"a"': &str
155..166 '|&(x, y)| x': |&(i32, &str)| -> i32
156..163 '&(x, y)': &(i32, &str)
157..163 '(x, y)': (i32, &str)
158..159 'x': i32
161..162 'y': &str
165..166 'x': i32
204..207 'foo': fn foo<&(i32, &str), &i32, |&(i32, &str)| -> &i32>(&(i32, &str), |&(i32, &str)| -> &i32) -> &i32
204..230 'foo(&(...y)| x)': &i32
208..217 '&(1, "a")': &(i32, &str)
209..217 '(1, "a")': (i32, &str)
210..211 '1': i32
213..216 '"a"': &str
219..229 '|(x, y)| x': |&(i32, &str)| -> &i32
220..226 '(x, y)': (i32, &str)
221..222 'x': &i32
224..225 'y': &&str
228..229 'x': &i32
"###
);
} | infer(r#"
fn test(x: &i32) {
let y = x;
let &z = x;
let a = z; | random_line_split |
list_view.rs | use std::sync::Arc;
use crate::aliases::WinResult;
use crate::co;
use crate::funcs::{GetAsyncKeyState, GetCursorPos, PostQuitMessage};
use crate::gui::base::Base;
use crate::gui::events::ListViewEvents;
use crate::gui::native_controls::list_view_columns::ListViewColumns;
use crate::gui::native_controls::list_view_items::ListViewItems;
use crate::gui::native_controls::base_native_control::{BaseNativeControl, OptsId};
use crate::gui::privs::{auto_ctrl_id, multiply_dpi};
use crate::gui::traits::{baseref_from_parent, Parent};
use crate::handles::{HIMAGELIST, HMENU, HWND};
use crate::msg::lvm;
use crate::structs::{LVHITTESTINFO, NMITEMACTIVATE, NMLVKEYDOWN, POINT, SIZE};
/// Native
/// [list view](https://docs.microsoft.com/en-us/windows/win32/controls/list-view-controls-overview)
/// control. Not to be confused with the simpler [list box](crate::gui::ListBox)
/// control.
///
/// Implements [`Child`](crate::gui::Child) trait.
#[derive(Clone)]
pub struct ListView(Arc<Obj>);
struct Obj { // actual fields of ListView
base: BaseNativeControl,
opts_id: OptsId<ListViewOpts>,
events: ListViewEvents,
columns: ListViewColumns,
items: ListViewItems,
context_menu: Option<HMENU>,
}
impl_send_sync_child!(ListView);
impl ListView {
/// Instantiates a new `ListView` object, to be created on the parent window
/// with [`HWND::CreateWindowEx`](crate::HWND::CreateWindowEx).
pub fn new(parent: &dyn Parent, opts: ListViewOpts) -> ListView {
let parent_base_ref = baseref_from_parent(parent);
let opts = ListViewOpts::define_ctrl_id(opts);
let ctrl_id = opts.ctrl_id;
let context_menu = opts.context_menu;
let new_self = Self(
Arc::new(
Obj {
base: BaseNativeControl::new(parent_base_ref),
opts_id: OptsId::Wnd(opts),
events: ListViewEvents::new(parent_base_ref, ctrl_id),
columns: ListViewColumns::new(),
items: ListViewItems::new(),
context_menu,
},
),
);
new_self.0.columns.set_hwnd_ref(new_self.0.base.hwnd_ref());
new_self.0.items.set_hwnd_ref(new_self.0.base.hwnd_ref());
parent_base_ref.privileged_events_ref().wm(parent_base_ref.creation_wm(), {
let me = new_self.clone();
move |_| { me.create(); 0 }
});
new_self.handled_events(parent_base_ref, ctrl_id);
new_self
}
/// Instantiates a new `ListView` object, to be loaded from a dialog
/// resource with [`HWND::GetDlgItem`](crate::HWND::GetDlgItem).
///
/// **Note:** The optional `context_menu` is shared: it must be destroyed
/// manually after the control is destroyed. But note that menus loaded from
/// resources don't need to be destroyed.
pub fn new_dlg(
parent: &dyn Parent,
ctrl_id: u16,
context_menu: Option<HMENU>) -> ListView
{
let parent_base_ref = baseref_from_parent(parent);
let new_self = Self(
Arc::new(
Obj {
base: BaseNativeControl::new(parent_base_ref),
opts_id: OptsId::Dlg(ctrl_id),
events: ListViewEvents::new(parent_base_ref, ctrl_id),
columns: ListViewColumns::new(),
items: ListViewItems::new(),
context_menu,
},
),
);
new_self.0.columns.set_hwnd_ref(new_self.0.base.hwnd_ref());
new_self.0.items.set_hwnd_ref(new_self.0.base.hwnd_ref());
parent_base_ref.privileged_events_ref().wm_init_dialog({
let me = new_self.clone();
move |_| { me.create(); true }
});
new_self.handled_events(parent_base_ref, ctrl_id);
new_self
}
fn create(&self) {
| match &self.0.opts_id {
OptsId::Wnd(opts) => {
let mut pos = opts.position;
let mut sz = opts.size;
multiply_dpi(Some(&mut pos), Some(&mut sz))?;
self.0.base.create_window( // may panic
"SysListView32", None, pos, sz,
opts.ctrl_id,
opts.window_ex_style,
opts.window_style | opts.list_view_style.into(),
)?;
if opts.list_view_ex_style!= co::LVS_EX::NoValue {
self.toggle_extended_style(true, opts.list_view_ex_style);
}
self.columns().add(&opts.columns)?;
Ok(())
},
OptsId::Dlg(ctrl_id) => self.0.base.create_dlg(*ctrl_id).map(|_| ()), // may panic
}
}().unwrap_or_else(|err| PostQuitMessage(err))
}
fn handled_events(&self, parent_base_ref: &Base, ctrl_id: u16) {
parent_base_ref.privileged_events_ref().add_nfy(ctrl_id, co::LVN::KEYDOWN.into(), {
let me = self.clone();
move |p| {
let lvnk = unsafe { p.cast_nmhdr::<NMLVKEYDOWN>() };
let has_ctrl = GetAsyncKeyState(co::VK::CONTROL);
let has_shift = GetAsyncKeyState(co::VK::SHIFT);
if has_ctrl && lvnk.wVKey == co::VK('A' as _) { // Ctrl+A
me.items().set_selected_all(true)
.unwrap_or_else(|err| PostQuitMessage(err));
} else if lvnk.wVKey == co::VK::APPS { // context menu key
me.show_context_menu(false, has_ctrl, has_shift).unwrap();
}
None
}
});
parent_base_ref.privileged_events_ref().add_nfy(ctrl_id, co::NM::RCLICK.into(), {
let me = self.clone();
move |p| {
let nmia = unsafe { p.cast_nmhdr::<NMITEMACTIVATE>() };
let has_ctrl = nmia.uKeyFlags.has(co::LVKF::CONTROL);
let has_shift = nmia.uKeyFlags.has(co::LVKF::SHIFT);
me.show_context_menu(true, has_ctrl, has_shift).unwrap();
None
}
});
}
pub_fn_ctrlid_hwnd_on_onsubclass!(ListViewEvents);
/// Exposes the column methods.
pub fn columns(&self) -> &ListViewColumns {
&self.0.columns
}
/// Returns the context menu attached to this list view, if any.
///
/// The context menu is attached when the list view is created, either by
/// calling [`ListView::new`](crate::gui::ListView::new) or
/// [`ListView::new_dlg`](crate::gui::ListView::new_dlg).
pub fn context_menu(&self) -> Option<HMENU> {
self.0.context_menu
}
/// Retrieves one of the associated image lists by sending an
/// [`LVM_GETIMAGELIST`](crate::msg::lvm::GetImageList) message.
pub fn image_list(&self, kind: co::LVSIL) -> Option<HIMAGELIST> {
self.hwnd().SendMessage(lvm::GetImageList { kind })
}
/// Exposes the item methods.
pub fn items(&self) -> &ListViewItems {
&self.0.items
}
/// Retrieves the current view by sending an
/// [`LVM_GETVIEW`](crate::msg::lvm::GetView) message.
pub fn current_view(&self) -> co::LV_VIEW {
self.hwnd().SendMessage(lvm::GetView {})
}
/// Sets the current view by sending an
/// [`LVM_SETVIEW`](crate::msg::lvm::SetView) message.
pub fn set_current_view(&self, view: co::LV_VIEW) -> WinResult<()> {
self.hwnd().SendMessage(lvm::SetView { view })
}
/// Sets the one of the associated image lists by sending an
/// [`LVM_SETIMAGELIST`](crate::msg::lvm::SetImageList) message.
///
/// Returns the previous image list, if any.
pub fn set_image_list(&self,
kind: co::LVSIL, himagelist: HIMAGELIST) -> Option<HIMAGELIST>
{
self.hwnd().SendMessage(lvm::SetImageList { kind, himagelist })
}
/// Toggles the given extended list view styles by sending an
/// [`LVM_SETEXTENDEDLISTVIEWSTYLE`](crate::msg::lvm::SetExtendedListViewStyle)
/// message.
pub fn toggle_extended_style(&self, set: bool, ex_style: co::LVS_EX) {
self.hwnd().SendMessage(lvm::SetExtendedListViewStyle {
mask: ex_style,
style: if set { ex_style } else { co::LVS_EX::NoValue },
});
}
fn show_context_menu(&self,
follow_cursor: bool, has_ctrl: bool, has_shift: bool) -> WinResult<()>
{
let hmenu = match self.0.context_menu {
Some(h) => h,
None => return Ok(()), // no menu, nothing to do
};
let menu_pos = if follow_cursor { // usually when fired by a right-click
let mut menu_pos = GetCursorPos()?; // relative to screen
self.hwnd().ScreenToClient(&mut menu_pos)?; // now relative to list view
let mut lvhti = LVHITTESTINFO::default(); // find item below cursor, if any
lvhti.pt = menu_pos;
match self.items().hit_test(&mut lvhti) {
Some(idx) => { // an item was right-clicked
if!has_ctrl &&!has_shift {
if!self.items().is_selected(idx) {
self.items().set_selected_all(false)?;
self.items().set_selected(true, &[idx])?;
}
self.items().set_focused(idx)?;
}
},
None => { // no item was right-clicked
self.items().set_selected_all(false)?;
},
}
self.hwnd().SetFocus(); // because a right-click won't set the focus by itself
menu_pos
} else { // usually fired by the context meny key
let focused_idx_opt = self.items().focused();
if focused_idx_opt.is_some() && self.items().is_visible(focused_idx_opt.unwrap()) {
let focused_idx = focused_idx_opt.unwrap();
let rc_item = self.items().rect(focused_idx, co::LVIR::BOUNDS)?;
POINT::new(rc_item.left + 16,
rc_item.top + (rc_item.bottom - rc_item.top) / 2)
} else { // no item is focused and visible
POINT::new(6, 10) // arbitrary
}
};
hmenu.TrackPopupMenuAtPoint(
menu_pos, self.hwnd().GetParent()?, self.hwnd())
}
}
//------------------------------------------------------------------------------
/// Options to create a [`ListView`](crate::gui::ListView) programmatically with
/// [`ListView::new`](crate::gui::ListView::new).
pub struct ListViewOpts {
/// Control position within parent client area, in pixels, to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Will be adjusted to match current system DPI.
///
/// Defaults to 0 x 0.
pub position: POINT,
/// Control size, in pixels, to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Will be adjusted to match current system DPI.
///
/// Defaults to 50 x 50.
pub size: SIZE,
/// List view styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `LVS::REPORT | LVS::NOSORTHEADER | LVS::SHOWSELALWAYS | LVS::SHAREIMAGELISTS`.
pub list_view_style: co::LVS,
/// Extended list view styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `LVS_EX::NoValue`.
pub list_view_ex_style: co::LVS_EX,
/// Window styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `WS::CHILD | WS::VISIBLE | WS::TABSTOP | WS::GROUP`.
pub window_style: co::WS,
/// Extended window styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `WS_EX::LEFT | WS_EX::CLIENTEDGE`.
pub window_ex_style: co::WS_EX,
/// The control ID.
///
/// Defaults to an auto-generated ID.
pub ctrl_id: u16,
/// Context popup menu.
///
/// This menu is shared: it must be destroyed manually after the control is
/// destroyed. But note that menus loaded from resources don't need to be
/// destroyed.
///
/// Defaults to `None`.
pub context_menu: Option<HMENU>,
/// Text and width of columns to be added right away. The columns only show
/// in report mode.
///
/// Defaults to none.
pub columns: Vec<(String, u32)>,
}
impl Default for ListViewOpts {
fn default() -> Self {
Self {
position: POINT::new(0, 0),
size: SIZE::new(50, 50),
list_view_style: co::LVS::REPORT | co::LVS::NOSORTHEADER | co::LVS::SHOWSELALWAYS | co::LVS::SHAREIMAGELISTS,
list_view_ex_style: co::LVS_EX::NoValue,
window_style: co::WS::CHILD | co::WS::VISIBLE | co::WS::TABSTOP | co::WS::GROUP,
window_ex_style: co::WS_EX::LEFT | co::WS_EX::CLIENTEDGE,
ctrl_id: 0,
context_menu: None,
columns: Vec::default(),
}
}
}
impl ListViewOpts {
fn define_ctrl_id(mut self) -> Self {
if self.ctrl_id == 0 {
self.ctrl_id = auto_ctrl_id();
}
self
}
} | || -> WinResult<()> {
| random_line_split |
list_view.rs | use std::sync::Arc;
use crate::aliases::WinResult;
use crate::co;
use crate::funcs::{GetAsyncKeyState, GetCursorPos, PostQuitMessage};
use crate::gui::base::Base;
use crate::gui::events::ListViewEvents;
use crate::gui::native_controls::list_view_columns::ListViewColumns;
use crate::gui::native_controls::list_view_items::ListViewItems;
use crate::gui::native_controls::base_native_control::{BaseNativeControl, OptsId};
use crate::gui::privs::{auto_ctrl_id, multiply_dpi};
use crate::gui::traits::{baseref_from_parent, Parent};
use crate::handles::{HIMAGELIST, HMENU, HWND};
use crate::msg::lvm;
use crate::structs::{LVHITTESTINFO, NMITEMACTIVATE, NMLVKEYDOWN, POINT, SIZE};
/// Native
/// [list view](https://docs.microsoft.com/en-us/windows/win32/controls/list-view-controls-overview)
/// control. Not to be confused with the simpler [list box](crate::gui::ListBox)
/// control.
///
/// Implements [`Child`](crate::gui::Child) trait.
#[derive(Clone)]
pub struct ListView(Arc<Obj>);
struct Obj { // actual fields of ListView
base: BaseNativeControl,
opts_id: OptsId<ListViewOpts>,
events: ListViewEvents,
columns: ListViewColumns,
items: ListViewItems,
context_menu: Option<HMENU>,
}
impl_send_sync_child!(ListView);
impl ListView {
/// Instantiates a new `ListView` object, to be created on the parent window
/// with [`HWND::CreateWindowEx`](crate::HWND::CreateWindowEx).
pub fn new(parent: &dyn Parent, opts: ListViewOpts) -> ListView {
let parent_base_ref = baseref_from_parent(parent);
let opts = ListViewOpts::define_ctrl_id(opts);
let ctrl_id = opts.ctrl_id;
let context_menu = opts.context_menu;
let new_self = Self(
Arc::new(
Obj {
base: BaseNativeControl::new(parent_base_ref),
opts_id: OptsId::Wnd(opts),
events: ListViewEvents::new(parent_base_ref, ctrl_id),
columns: ListViewColumns::new(),
items: ListViewItems::new(),
context_menu,
},
),
);
new_self.0.columns.set_hwnd_ref(new_self.0.base.hwnd_ref());
new_self.0.items.set_hwnd_ref(new_self.0.base.hwnd_ref());
parent_base_ref.privileged_events_ref().wm(parent_base_ref.creation_wm(), {
let me = new_self.clone();
move |_| { me.create(); 0 }
});
new_self.handled_events(parent_base_ref, ctrl_id);
new_self
}
/// Instantiates a new `ListView` object, to be loaded from a dialog
/// resource with [`HWND::GetDlgItem`](crate::HWND::GetDlgItem).
///
/// **Note:** The optional `context_menu` is shared: it must be destroyed
/// manually after the control is destroyed. But note that menus loaded from
/// resources don't need to be destroyed.
pub fn new_dlg(
parent: &dyn Parent,
ctrl_id: u16,
context_menu: Option<HMENU>) -> ListView
{
let parent_base_ref = baseref_from_parent(parent);
let new_self = Self(
Arc::new(
Obj {
base: BaseNativeControl::new(parent_base_ref),
opts_id: OptsId::Dlg(ctrl_id),
events: ListViewEvents::new(parent_base_ref, ctrl_id),
columns: ListViewColumns::new(),
items: ListViewItems::new(),
context_menu,
},
),
);
new_self.0.columns.set_hwnd_ref(new_self.0.base.hwnd_ref());
new_self.0.items.set_hwnd_ref(new_self.0.base.hwnd_ref());
parent_base_ref.privileged_events_ref().wm_init_dialog({
let me = new_self.clone();
move |_| { me.create(); true }
});
new_self.handled_events(parent_base_ref, ctrl_id);
new_self
}
fn create(&self) {
|| -> WinResult<()> {
match &self.0.opts_id {
OptsId::Wnd(opts) => {
let mut pos = opts.position;
let mut sz = opts.size;
multiply_dpi(Some(&mut pos), Some(&mut sz))?;
self.0.base.create_window( // may panic
"SysListView32", None, pos, sz,
opts.ctrl_id,
opts.window_ex_style,
opts.window_style | opts.list_view_style.into(),
)?;
if opts.list_view_ex_style!= co::LVS_EX::NoValue {
self.toggle_extended_style(true, opts.list_view_ex_style);
}
self.columns().add(&opts.columns)?;
Ok(())
},
OptsId::Dlg(ctrl_id) => self.0.base.create_dlg(*ctrl_id).map(|_| ()), // may panic
}
}().unwrap_or_else(|err| PostQuitMessage(err))
}
fn handled_events(&self, parent_base_ref: &Base, ctrl_id: u16) {
parent_base_ref.privileged_events_ref().add_nfy(ctrl_id, co::LVN::KEYDOWN.into(), {
let me = self.clone();
move |p| {
let lvnk = unsafe { p.cast_nmhdr::<NMLVKEYDOWN>() };
let has_ctrl = GetAsyncKeyState(co::VK::CONTROL);
let has_shift = GetAsyncKeyState(co::VK::SHIFT);
if has_ctrl && lvnk.wVKey == co::VK('A' as _) { // Ctrl+A
me.items().set_selected_all(true)
.unwrap_or_else(|err| PostQuitMessage(err));
} else if lvnk.wVKey == co::VK::APPS { // context menu key
me.show_context_menu(false, has_ctrl, has_shift).unwrap();
}
None
}
});
parent_base_ref.privileged_events_ref().add_nfy(ctrl_id, co::NM::RCLICK.into(), {
let me = self.clone();
move |p| {
let nmia = unsafe { p.cast_nmhdr::<NMITEMACTIVATE>() };
let has_ctrl = nmia.uKeyFlags.has(co::LVKF::CONTROL);
let has_shift = nmia.uKeyFlags.has(co::LVKF::SHIFT);
me.show_context_menu(true, has_ctrl, has_shift).unwrap();
None
}
});
}
pub_fn_ctrlid_hwnd_on_onsubclass!(ListViewEvents);
/// Exposes the column methods.
pub fn columns(&self) -> &ListViewColumns {
&self.0.columns
}
/// Returns the context menu attached to this list view, if any.
///
/// The context menu is attached when the list view is created, either by
/// calling [`ListView::new`](crate::gui::ListView::new) or
/// [`ListView::new_dlg`](crate::gui::ListView::new_dlg).
pub fn context_menu(&self) -> Option<HMENU> {
self.0.context_menu
}
/// Retrieves one of the associated image lists by sending an
/// [`LVM_GETIMAGELIST`](crate::msg::lvm::GetImageList) message.
pub fn image_list(&self, kind: co::LVSIL) -> Option<HIMAGELIST> {
self.hwnd().SendMessage(lvm::GetImageList { kind })
}
/// Exposes the item methods.
pub fn items(&self) -> &ListViewItems {
&self.0.items
}
/// Retrieves the current view by sending an
/// [`LVM_GETVIEW`](crate::msg::lvm::GetView) message.
pub fn current_view(&self) -> co::LV_VIEW {
self.hwnd().SendMessage(lvm::GetView {})
}
/// Sets the current view by sending an
/// [`LVM_SETVIEW`](crate::msg::lvm::SetView) message.
pub fn set_current_view(&self, view: co::LV_VIEW) -> WinResult<()> {
self.hwnd().SendMessage(lvm::SetView { view })
}
/// Sets the one of the associated image lists by sending an
/// [`LVM_SETIMAGELIST`](crate::msg::lvm::SetImageList) message.
///
/// Returns the previous image list, if any.
pub fn set_image_list(&self,
kind: co::LVSIL, himagelist: HIMAGELIST) -> Option<HIMAGELIST>
{
self.hwnd().SendMessage(lvm::SetImageList { kind, himagelist })
}
/// Toggles the given extended list view styles by sending an
/// [`LVM_SETEXTENDEDLISTVIEWSTYLE`](crate::msg::lvm::SetExtendedListViewStyle)
/// message.
pub fn | (&self, set: bool, ex_style: co::LVS_EX) {
self.hwnd().SendMessage(lvm::SetExtendedListViewStyle {
mask: ex_style,
style: if set { ex_style } else { co::LVS_EX::NoValue },
});
}
fn show_context_menu(&self,
follow_cursor: bool, has_ctrl: bool, has_shift: bool) -> WinResult<()>
{
let hmenu = match self.0.context_menu {
Some(h) => h,
None => return Ok(()), // no menu, nothing to do
};
let menu_pos = if follow_cursor { // usually when fired by a right-click
let mut menu_pos = GetCursorPos()?; // relative to screen
self.hwnd().ScreenToClient(&mut menu_pos)?; // now relative to list view
let mut lvhti = LVHITTESTINFO::default(); // find item below cursor, if any
lvhti.pt = menu_pos;
match self.items().hit_test(&mut lvhti) {
Some(idx) => { // an item was right-clicked
if!has_ctrl &&!has_shift {
if!self.items().is_selected(idx) {
self.items().set_selected_all(false)?;
self.items().set_selected(true, &[idx])?;
}
self.items().set_focused(idx)?;
}
},
None => { // no item was right-clicked
self.items().set_selected_all(false)?;
},
}
self.hwnd().SetFocus(); // because a right-click won't set the focus by itself
menu_pos
} else { // usually fired by the context meny key
let focused_idx_opt = self.items().focused();
if focused_idx_opt.is_some() && self.items().is_visible(focused_idx_opt.unwrap()) {
let focused_idx = focused_idx_opt.unwrap();
let rc_item = self.items().rect(focused_idx, co::LVIR::BOUNDS)?;
POINT::new(rc_item.left + 16,
rc_item.top + (rc_item.bottom - rc_item.top) / 2)
} else { // no item is focused and visible
POINT::new(6, 10) // arbitrary
}
};
hmenu.TrackPopupMenuAtPoint(
menu_pos, self.hwnd().GetParent()?, self.hwnd())
}
}
//------------------------------------------------------------------------------
/// Options to create a [`ListView`](crate::gui::ListView) programmatically with
/// [`ListView::new`](crate::gui::ListView::new).
pub struct ListViewOpts {
/// Control position within parent client area, in pixels, to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Will be adjusted to match current system DPI.
///
/// Defaults to 0 x 0.
pub position: POINT,
/// Control size, in pixels, to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Will be adjusted to match current system DPI.
///
/// Defaults to 50 x 50.
pub size: SIZE,
/// List view styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `LVS::REPORT | LVS::NOSORTHEADER | LVS::SHOWSELALWAYS | LVS::SHAREIMAGELISTS`.
pub list_view_style: co::LVS,
/// Extended list view styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `LVS_EX::NoValue`.
pub list_view_ex_style: co::LVS_EX,
/// Window styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `WS::CHILD | WS::VISIBLE | WS::TABSTOP | WS::GROUP`.
pub window_style: co::WS,
/// Extended window styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `WS_EX::LEFT | WS_EX::CLIENTEDGE`.
pub window_ex_style: co::WS_EX,
/// The control ID.
///
/// Defaults to an auto-generated ID.
pub ctrl_id: u16,
/// Context popup menu.
///
/// This menu is shared: it must be destroyed manually after the control is
/// destroyed. But note that menus loaded from resources don't need to be
/// destroyed.
///
/// Defaults to `None`.
pub context_menu: Option<HMENU>,
/// Text and width of columns to be added right away. The columns only show
/// in report mode.
///
/// Defaults to none.
pub columns: Vec<(String, u32)>,
}
impl Default for ListViewOpts {
fn default() -> Self {
Self {
position: POINT::new(0, 0),
size: SIZE::new(50, 50),
list_view_style: co::LVS::REPORT | co::LVS::NOSORTHEADER | co::LVS::SHOWSELALWAYS | co::LVS::SHAREIMAGELISTS,
list_view_ex_style: co::LVS_EX::NoValue,
window_style: co::WS::CHILD | co::WS::VISIBLE | co::WS::TABSTOP | co::WS::GROUP,
window_ex_style: co::WS_EX::LEFT | co::WS_EX::CLIENTEDGE,
ctrl_id: 0,
context_menu: None,
columns: Vec::default(),
}
}
}
impl ListViewOpts {
fn define_ctrl_id(mut self) -> Self {
if self.ctrl_id == 0 {
self.ctrl_id = auto_ctrl_id();
}
self
}
}
| toggle_extended_style | identifier_name |
list_view.rs | use std::sync::Arc;
use crate::aliases::WinResult;
use crate::co;
use crate::funcs::{GetAsyncKeyState, GetCursorPos, PostQuitMessage};
use crate::gui::base::Base;
use crate::gui::events::ListViewEvents;
use crate::gui::native_controls::list_view_columns::ListViewColumns;
use crate::gui::native_controls::list_view_items::ListViewItems;
use crate::gui::native_controls::base_native_control::{BaseNativeControl, OptsId};
use crate::gui::privs::{auto_ctrl_id, multiply_dpi};
use crate::gui::traits::{baseref_from_parent, Parent};
use crate::handles::{HIMAGELIST, HMENU, HWND};
use crate::msg::lvm;
use crate::structs::{LVHITTESTINFO, NMITEMACTIVATE, NMLVKEYDOWN, POINT, SIZE};
/// Native
/// [list view](https://docs.microsoft.com/en-us/windows/win32/controls/list-view-controls-overview)
/// control. Not to be confused with the simpler [list box](crate::gui::ListBox)
/// control.
///
/// Implements [`Child`](crate::gui::Child) trait.
#[derive(Clone)]
pub struct ListView(Arc<Obj>);
struct Obj { // actual fields of ListView
base: BaseNativeControl,
opts_id: OptsId<ListViewOpts>,
events: ListViewEvents,
columns: ListViewColumns,
items: ListViewItems,
context_menu: Option<HMENU>,
}
impl_send_sync_child!(ListView);
impl ListView {
/// Instantiates a new `ListView` object, to be created on the parent window
/// with [`HWND::CreateWindowEx`](crate::HWND::CreateWindowEx).
pub fn new(parent: &dyn Parent, opts: ListViewOpts) -> ListView {
let parent_base_ref = baseref_from_parent(parent);
let opts = ListViewOpts::define_ctrl_id(opts);
let ctrl_id = opts.ctrl_id;
let context_menu = opts.context_menu;
let new_self = Self(
Arc::new(
Obj {
base: BaseNativeControl::new(parent_base_ref),
opts_id: OptsId::Wnd(opts),
events: ListViewEvents::new(parent_base_ref, ctrl_id),
columns: ListViewColumns::new(),
items: ListViewItems::new(),
context_menu,
},
),
);
new_self.0.columns.set_hwnd_ref(new_self.0.base.hwnd_ref());
new_self.0.items.set_hwnd_ref(new_self.0.base.hwnd_ref());
parent_base_ref.privileged_events_ref().wm(parent_base_ref.creation_wm(), {
let me = new_self.clone();
move |_| { me.create(); 0 }
});
new_self.handled_events(parent_base_ref, ctrl_id);
new_self
}
/// Instantiates a new `ListView` object, to be loaded from a dialog
/// resource with [`HWND::GetDlgItem`](crate::HWND::GetDlgItem).
///
/// **Note:** The optional `context_menu` is shared: it must be destroyed
/// manually after the control is destroyed. But note that menus loaded from
/// resources don't need to be destroyed.
pub fn new_dlg(
parent: &dyn Parent,
ctrl_id: u16,
context_menu: Option<HMENU>) -> ListView
{
let parent_base_ref = baseref_from_parent(parent);
let new_self = Self(
Arc::new(
Obj {
base: BaseNativeControl::new(parent_base_ref),
opts_id: OptsId::Dlg(ctrl_id),
events: ListViewEvents::new(parent_base_ref, ctrl_id),
columns: ListViewColumns::new(),
items: ListViewItems::new(),
context_menu,
},
),
);
new_self.0.columns.set_hwnd_ref(new_self.0.base.hwnd_ref());
new_self.0.items.set_hwnd_ref(new_self.0.base.hwnd_ref());
parent_base_ref.privileged_events_ref().wm_init_dialog({
let me = new_self.clone();
move |_| { me.create(); true }
});
new_self.handled_events(parent_base_ref, ctrl_id);
new_self
}
fn create(&self) {
|| -> WinResult<()> {
match &self.0.opts_id {
OptsId::Wnd(opts) => {
let mut pos = opts.position;
let mut sz = opts.size;
multiply_dpi(Some(&mut pos), Some(&mut sz))?;
self.0.base.create_window( // may panic
"SysListView32", None, pos, sz,
opts.ctrl_id,
opts.window_ex_style,
opts.window_style | opts.list_view_style.into(),
)?;
if opts.list_view_ex_style!= co::LVS_EX::NoValue {
self.toggle_extended_style(true, opts.list_view_ex_style);
}
self.columns().add(&opts.columns)?;
Ok(())
},
OptsId::Dlg(ctrl_id) => self.0.base.create_dlg(*ctrl_id).map(|_| ()), // may panic
}
}().unwrap_or_else(|err| PostQuitMessage(err))
}
fn handled_events(&self, parent_base_ref: &Base, ctrl_id: u16) {
parent_base_ref.privileged_events_ref().add_nfy(ctrl_id, co::LVN::KEYDOWN.into(), {
let me = self.clone();
move |p| {
let lvnk = unsafe { p.cast_nmhdr::<NMLVKEYDOWN>() };
let has_ctrl = GetAsyncKeyState(co::VK::CONTROL);
let has_shift = GetAsyncKeyState(co::VK::SHIFT);
if has_ctrl && lvnk.wVKey == co::VK('A' as _) { // Ctrl+A
me.items().set_selected_all(true)
.unwrap_or_else(|err| PostQuitMessage(err));
} else if lvnk.wVKey == co::VK::APPS { // context menu key
me.show_context_menu(false, has_ctrl, has_shift).unwrap();
}
None
}
});
parent_base_ref.privileged_events_ref().add_nfy(ctrl_id, co::NM::RCLICK.into(), {
let me = self.clone();
move |p| {
let nmia = unsafe { p.cast_nmhdr::<NMITEMACTIVATE>() };
let has_ctrl = nmia.uKeyFlags.has(co::LVKF::CONTROL);
let has_shift = nmia.uKeyFlags.has(co::LVKF::SHIFT);
me.show_context_menu(true, has_ctrl, has_shift).unwrap();
None
}
});
}
pub_fn_ctrlid_hwnd_on_onsubclass!(ListViewEvents);
/// Exposes the column methods.
pub fn columns(&self) -> &ListViewColumns {
&self.0.columns
}
/// Returns the context menu attached to this list view, if any.
///
/// The context menu is attached when the list view is created, either by
/// calling [`ListView::new`](crate::gui::ListView::new) or
/// [`ListView::new_dlg`](crate::gui::ListView::new_dlg).
pub fn context_menu(&self) -> Option<HMENU> {
self.0.context_menu
}
/// Retrieves one of the associated image lists by sending an
/// [`LVM_GETIMAGELIST`](crate::msg::lvm::GetImageList) message.
pub fn image_list(&self, kind: co::LVSIL) -> Option<HIMAGELIST> {
self.hwnd().SendMessage(lvm::GetImageList { kind })
}
/// Exposes the item methods.
pub fn items(&self) -> &ListViewItems {
&self.0.items
}
/// Retrieves the current view by sending an
/// [`LVM_GETVIEW`](crate::msg::lvm::GetView) message.
pub fn current_view(&self) -> co::LV_VIEW {
self.hwnd().SendMessage(lvm::GetView {})
}
/// Sets the current view by sending an
/// [`LVM_SETVIEW`](crate::msg::lvm::SetView) message.
pub fn set_current_view(&self, view: co::LV_VIEW) -> WinResult<()> {
self.hwnd().SendMessage(lvm::SetView { view })
}
/// Sets the one of the associated image lists by sending an
/// [`LVM_SETIMAGELIST`](crate::msg::lvm::SetImageList) message.
///
/// Returns the previous image list, if any.
pub fn set_image_list(&self,
kind: co::LVSIL, himagelist: HIMAGELIST) -> Option<HIMAGELIST>
{
self.hwnd().SendMessage(lvm::SetImageList { kind, himagelist })
}
/// Toggles the given extended list view styles by sending an
/// [`LVM_SETEXTENDEDLISTVIEWSTYLE`](crate::msg::lvm::SetExtendedListViewStyle)
/// message.
pub fn toggle_extended_style(&self, set: bool, ex_style: co::LVS_EX) {
self.hwnd().SendMessage(lvm::SetExtendedListViewStyle {
mask: ex_style,
style: if set { ex_style } else { co::LVS_EX::NoValue },
});
}
fn show_context_menu(&self,
follow_cursor: bool, has_ctrl: bool, has_shift: bool) -> WinResult<()>
{
let hmenu = match self.0.context_menu {
Some(h) => h,
None => return Ok(()), // no menu, nothing to do
};
let menu_pos = if follow_cursor { // usually when fired by a right-click
let mut menu_pos = GetCursorPos()?; // relative to screen
self.hwnd().ScreenToClient(&mut menu_pos)?; // now relative to list view
let mut lvhti = LVHITTESTINFO::default(); // find item below cursor, if any
lvhti.pt = menu_pos;
match self.items().hit_test(&mut lvhti) {
Some(idx) => { // an item was right-clicked
if!has_ctrl &&!has_shift {
if!self.items().is_selected(idx) {
self.items().set_selected_all(false)?;
self.items().set_selected(true, &[idx])?;
}
self.items().set_focused(idx)?;
}
},
None => { // no item was right-clicked
self.items().set_selected_all(false)?;
},
}
self.hwnd().SetFocus(); // because a right-click won't set the focus by itself
menu_pos
} else { // usually fired by the context meny key
let focused_idx_opt = self.items().focused();
if focused_idx_opt.is_some() && self.items().is_visible(focused_idx_opt.unwrap()) {
let focused_idx = focused_idx_opt.unwrap();
let rc_item = self.items().rect(focused_idx, co::LVIR::BOUNDS)?;
POINT::new(rc_item.left + 16,
rc_item.top + (rc_item.bottom - rc_item.top) / 2)
} else { // no item is focused and visible
POINT::new(6, 10) // arbitrary
}
};
hmenu.TrackPopupMenuAtPoint(
menu_pos, self.hwnd().GetParent()?, self.hwnd())
}
}
//------------------------------------------------------------------------------
/// Options to create a [`ListView`](crate::gui::ListView) programmatically with
/// [`ListView::new`](crate::gui::ListView::new).
pub struct ListViewOpts {
/// Control position within parent client area, in pixels, to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Will be adjusted to match current system DPI.
///
/// Defaults to 0 x 0.
pub position: POINT,
/// Control size, in pixels, to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Will be adjusted to match current system DPI.
///
/// Defaults to 50 x 50.
pub size: SIZE,
/// List view styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `LVS::REPORT | LVS::NOSORTHEADER | LVS::SHOWSELALWAYS | LVS::SHAREIMAGELISTS`.
pub list_view_style: co::LVS,
/// Extended list view styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `LVS_EX::NoValue`.
pub list_view_ex_style: co::LVS_EX,
/// Window styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `WS::CHILD | WS::VISIBLE | WS::TABSTOP | WS::GROUP`.
pub window_style: co::WS,
/// Extended window styles to be
/// [created](https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-createwindowexw).
///
/// Defaults to `WS_EX::LEFT | WS_EX::CLIENTEDGE`.
pub window_ex_style: co::WS_EX,
/// The control ID.
///
/// Defaults to an auto-generated ID.
pub ctrl_id: u16,
/// Context popup menu.
///
/// This menu is shared: it must be destroyed manually after the control is
/// destroyed. But note that menus loaded from resources don't need to be
/// destroyed.
///
/// Defaults to `None`.
pub context_menu: Option<HMENU>,
/// Text and width of columns to be added right away. The columns only show
/// in report mode.
///
/// Defaults to none.
pub columns: Vec<(String, u32)>,
}
impl Default for ListViewOpts {
fn default() -> Self {
Self {
position: POINT::new(0, 0),
size: SIZE::new(50, 50),
list_view_style: co::LVS::REPORT | co::LVS::NOSORTHEADER | co::LVS::SHOWSELALWAYS | co::LVS::SHAREIMAGELISTS,
list_view_ex_style: co::LVS_EX::NoValue,
window_style: co::WS::CHILD | co::WS::VISIBLE | co::WS::TABSTOP | co::WS::GROUP,
window_ex_style: co::WS_EX::LEFT | co::WS_EX::CLIENTEDGE,
ctrl_id: 0,
context_menu: None,
columns: Vec::default(),
}
}
}
impl ListViewOpts {
fn define_ctrl_id(mut self) -> Self {
if self.ctrl_id == 0 |
self
}
}
| {
self.ctrl_id = auto_ctrl_id();
} | conditional_block |
main.rs | //
extern crate bio;
extern crate itertools;
use std::collections::HashMap;
use std::cmp;
use std::env;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use bio::io::fastq;
use bio::alignment::pairwise::*;
use bio::alignment::AlignmentOperation;
// fn check(rec: &fastq::Record, read: &str) -> (u16, Vec<(usize, char, char)>) {
// let mut distance : u16 = 0;
// let qual = rec.qual();
// let mut dif : Vec<(usize, char, char)> = vec![];
// let mut index : usize = 0;
// for (i, j) in String::from_utf8_lossy(rec.seq()).chars().dropping(8).zip(read.chars()) {
// if qual[index] > 63 {
// if i!= j {
// dif.push((index, i, j));
// distance += 1;
// }
// }
// else {
// distance += 1;
// }
// index += 1;
// }
// (distance, dif)
// }
fn hamming(seq1: &str, seq2: &str) -> u32 {
let mut score = 0;
for (i, j) in seq1.chars().zip(seq2.chars()) {
if i!= j {
score += 1;
}
}
score
}
fn ham_mutations(seq1: &str, seq2: &str) -> (u32, String) {
let mut score = 0;
let mut mutations = "".to_string();
let mut n = 1;
for (i, j) in seq1.chars().zip(seq2.chars()) {
if i!= j {
score += 1;
if score == 1 {
mutations = mutations + &format!("{}{}", n, i);
} else {
mutations = mutations + &format!(" {}{}", n, i);
}
}
n += 1;
}
(score, mutations)
}
fn reverse_complement(seq: &str) -> String {
seq.chars()
.map(|t| match t {
'A' => 'T',
'T' => 'A',
'G' => 'C',
'C' => 'G',
_ => 'N',
}).rev().collect::<String>()
}
fn qual_check(a: &[u8], b: &[u8]) -> bool {
for (i, j) in a.iter().zip(b.iter()) {
if i < j {
continue;
}
return false;
}
return true
}
fn data_stat(results: &HashMap<String, (String, Vec<u8>)>, output_file: &str) -> Result<String, Box<Error>> {
// statistics on the datasets
let wt_pac = "AGAGAAGATTTATCTGAAGTCGTTACGCGAG";
let mut diff_counts : [usize; 31] = [0; 31];
let mut diff_freq : [usize; 31] = [0; 31];
let mut output = try!(File::create(output_file));
let mut pac_stat = HashMap::new();
for (_, pac_info) in results {
let ref pac = pac_info.0;
let ref qual = pac_info.1;
// mutation statistics
let mut index = 0;
let mut distance = 0;
for (i, j) in pac.chars().zip(wt_pac.chars()) {
if qual[index] > 63 && i!= j {
diff_freq[index] += 1;
distance += 1;
}
index += 1;
}
diff_counts[distance] += 1;
if distance > 8 {
println!("# {} {}", distance, pac);
}
// pac sites statistics
if pac_stat.contains_key(pac) {
*pac_stat.get_mut(pac).unwrap() += 1;
}
else {
pac_stat.insert(pac, 1);
}
}
println!("# Overall statistics:");
for i in 0..31 {
println!("# {}\t{}", i, diff_counts[i]);
}
println!("# Per-base statistics:");
for i in 0..31 {
println!("# {}\t{}", i, diff_freq[i]);
}
//try!(write!(output, "{}", "# pac counts:\n"));
for (pac, counts) in &pac_stat {
try!(write!(output, "{} {} {}\n", pac, hamming(&pac, &wt_pac), counts));
}
Ok("Done".into())
}
fn | () {
let args : Vec<String> = env::args().collect();
let file1 = fastq::Reader::from_file(&args[1]).unwrap();
let file2 = fastq::Reader::from_file(&args[2]).unwrap();
let mut num_records = 0;
let mut num_duplicates = 0;
let mut num_qual_skip = 0;
let mut results : HashMap<String, (String, Vec<u8>)>= HashMap::new();
let wt_read1 = if &args[3] == "M" {b"ACTAAGTGAGATGAATATGGCGGCACCAAAGGGCAACCGATTTTGGGAGGCCCGCAGTAGTCATGGGCGAAATCCTAAATTCGAATCGCCTGAGGCGCTGTGGGCTGCTTGTTGTGAA"}
else {b"AAGTGAGATGAATATGGCGGCACCAAAGGGCAACCGATTTTGGGAGGCCCGCAGTAGTCATGGGCGAAATCCTAAATTCGAATCGCCTGAGGCGCTGTGGGCTGCTTGTTGTGAATAC"};
for (record1, record2) in file1.records().zip(file2.records()) {
// take read1, filter low quality reads
let read1 = record1.unwrap();
let desc = read1.id().unwrap().split(":").skip(5).collect::<Vec<&str>>();
let description = desc[0].to_string() + ":" + desc[1];
let mut trim = 124;
let mut am = " ".to_string();
for i in 0..120 {
if qual_check(&read1.qual()[i.. i+5], &[63, 63, 63, 63, 63]) {
trim = i+1;
println!("# {} {}: Read 1 trimmed at {}.", num_records, description, trim);
break;
}
}
if trim < 18 {
println!("# {}: Useful read too short. Skipping. L = {}", num_records, trim);
num_qual_skip += 1;
num_records += 1;
continue;
}
// check if the read is the right read
let seq1 = String::from_utf8_lossy(&read1.seq()[0.. trim]);
let score = |a: u8, b: u8| if a == b {1i32} else {-1i32};
let mut aligner = Aligner::with_capacity(seq1.len(), wt_read1.len(), -5, -1, &score);
let alignment = aligner.global(&seq1[8..seq1.len()].as_bytes(), wt_read1);
if alignment.score < (2 * trim as i32 - 133 - 30) {
println!("# {} {}: wrong read 1 skipping", num_records, description);
println!("# {} {}", &seq1[8..seq1.len()], alignment.score);
num_records += 1;
num_qual_skip += 1;
continue;
}
// identifying AM/WT
if &args[3] == "M" {
if trim < 33 {
println!("# {}: Useful read too short for M. Skipping. L = {}", num_records, trim);
num_qual_skip += 1;
num_records += 1;
continue;
}
// Allowing 1 mismatch
if hamming(&seq1[27.. 32], "GCGGC") < 2 {
match &seq1[32.. 33] {
"A" => am = "WT".to_string(),
"G" => am = "AM".to_string(),
_ => am = " ".to_string(),
}
println!("# 1 am_codon = {}", &seq1[27.. 33]);
}
if am == " " {
for i in 0.. trim-6 {
if &seq1[i.. i+5] == "GCGGC" {
match &seq1[i+5.. i+6] {
"A" => am = "WT".to_string(),
"G" => am = "AM".to_string(),
_ => am = " ".to_string(),
}
println!("# 2 am_codon = {}", &seq1[i.. i+6]);
break;
}
}
}
}
// average quality filtering
//let avg_qual = read1.qual().iter().fold(0, |a, &b| a as u32 + b as u32);
//if avg_qual < (125 * 30) { // corresponding to an average quality of 20
// println!("# low quality read 1 skipping: {}", avg_qual);
// continue;
//}
// now deal with read2
let read2 = record2.unwrap();
// average quality filtering
//let avg_qual = read2.qual().iter().fold(0, |a, &b| a as u32 + b as u32);
//if avg_qual < 125*30 {
// println!("# {}: low quality read 2 skipping: {}", num_records, avg_qual);
// num_qual_skip += 1;
// continue;
//}
trim = 124;
for i in 0..119 {
if qual_check(&read2.qual()[i.. i+5], &[63, 63, 63, 63, 63]) {
trim = i+1;
println!("# {} {}: Read 2 trimmed at {}.", num_records, description, i);
break;
}
}
if trim < 80 {
println!("# {}: Useful read too short. Skipping. L = {}", num_records, trim);
num_qual_skip += 1;
num_records += 1;
continue;
}
let seq2 =String::from_utf8_lossy(&read2.seq()[0.. trim]);
// extract barcodes
let bc1 = &seq1[0..8];
let bc2 = &seq2[0..8];
let bc = bc1.to_string() + bc2;
// check the pac sequences
let wt_pac = "AGAGAAGATTTATCTGAAGTCGTTACGCGAG";
let seq2_rc = reverse_complement(&seq2);
let qual : Vec<u8> = read2.qual().iter().cloned().rev().collect();
let mut pac_start = 0;
let mut min_score = 31;
for i in 0.. trim - 31 {
let score = hamming(&seq2_rc[i.. i+31], &wt_pac);
if score < min_score {
min_score = score;
pac_start = i;
}
}
let pac_end = cmp::min(trim, pac_start+31);
if pac_end - pac_start < 25 {
println!("# {} {}: pac too short ({}).", num_records, description, pac_end-pac_start);
num_records += 1;
num_qual_skip += 1;
continue;
}
let pac = String::from_utf8_lossy(&seq2_rc[pac_start.. pac_end]
.as_bytes()).into_owned();
if min_score > 4 {
let mut aligner = Aligner::with_capacity(wt_pac.len(), seq2.len(), -1, -1, &score);
let alignment = aligner.local(wt_pac.as_bytes(), &seq2_rc.as_bytes());
if alignment.operations.iter().any(|&x| x == AlignmentOperation::Ins || x == AlignmentOperation::Del) {
println!("# {} {}: pac contain indels.", num_records, description);
println!("{}", alignment.pretty(wt_pac.as_bytes(), &seq2_rc.as_bytes() ));
num_records += 1;
num_qual_skip += 1;
continue;
}
}
let pac_qual_avg : f32 = qual[pac_start.. pac_end].iter().cloned().map(|x| x as f32).sum::<f32>() / (pac_end - pac_start) as f32;
if pac_qual_avg < 63.0 || pac.chars().any(|x| x == 'N') {
println!("# {} {}: pac quality too low ({}) or contains N.", num_records, description, pac_qual_avg);
if &args[3] == "M" {
println!("# {} {} {} {}", num_records, bc, pac, am);
} else {
println!("# {} {} {}", num_records, bc, pac);
}
num_records += 1;
num_qual_skip += 1;
continue;
}
if &args[3] == "M" {
let ham_mut = ham_mutations(&pac, &wt_pac);
println!("{},{},{},{},{},{}", num_records, bc, pac, am, ham_mut.0, ham_mut.1);
} else {
println!("{} {} {}", num_records, bc, pac);
}
if results.contains_key(&bc) {
if results[&bc].0 == pac {
println!("# {}: duplicate found", num_records);
num_duplicates += 1;
}
else {
println!("# {}: possible sequencing error? {} {}", num_records, &pac, results[&bc].0);
}
}
else {
results.insert(bc, (pac, qual.clone()));
}
num_records += 1;
}
println!("# {} records processed;", num_records);
println!("# {} low quality reads;", num_qual_skip);
println!("# {} possible duplicates.", num_duplicates);
data_stat(&results, &args[4]);
}
| main | identifier_name |
main.rs | //
extern crate bio;
extern crate itertools;
use std::collections::HashMap;
use std::cmp;
use std::env;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use bio::io::fastq;
use bio::alignment::pairwise::*;
use bio::alignment::AlignmentOperation;
// fn check(rec: &fastq::Record, read: &str) -> (u16, Vec<(usize, char, char)>) {
// let mut distance : u16 = 0;
// let qual = rec.qual();
// let mut dif : Vec<(usize, char, char)> = vec![];
// let mut index : usize = 0;
// for (i, j) in String::from_utf8_lossy(rec.seq()).chars().dropping(8).zip(read.chars()) {
// if qual[index] > 63 { | // distance += 1;
// }
// }
// else {
// distance += 1;
// }
// index += 1;
// }
// (distance, dif)
// }
fn hamming(seq1: &str, seq2: &str) -> u32 {
let mut score = 0;
for (i, j) in seq1.chars().zip(seq2.chars()) {
if i!= j {
score += 1;
}
}
score
}
fn ham_mutations(seq1: &str, seq2: &str) -> (u32, String) {
let mut score = 0;
let mut mutations = "".to_string();
let mut n = 1;
for (i, j) in seq1.chars().zip(seq2.chars()) {
if i!= j {
score += 1;
if score == 1 {
mutations = mutations + &format!("{}{}", n, i);
} else {
mutations = mutations + &format!(" {}{}", n, i);
}
}
n += 1;
}
(score, mutations)
}
fn reverse_complement(seq: &str) -> String {
seq.chars()
.map(|t| match t {
'A' => 'T',
'T' => 'A',
'G' => 'C',
'C' => 'G',
_ => 'N',
}).rev().collect::<String>()
}
fn qual_check(a: &[u8], b: &[u8]) -> bool {
for (i, j) in a.iter().zip(b.iter()) {
if i < j {
continue;
}
return false;
}
return true
}
fn data_stat(results: &HashMap<String, (String, Vec<u8>)>, output_file: &str) -> Result<String, Box<Error>> {
// statistics on the datasets
let wt_pac = "AGAGAAGATTTATCTGAAGTCGTTACGCGAG";
let mut diff_counts : [usize; 31] = [0; 31];
let mut diff_freq : [usize; 31] = [0; 31];
let mut output = try!(File::create(output_file));
let mut pac_stat = HashMap::new();
for (_, pac_info) in results {
let ref pac = pac_info.0;
let ref qual = pac_info.1;
// mutation statistics
let mut index = 0;
let mut distance = 0;
for (i, j) in pac.chars().zip(wt_pac.chars()) {
if qual[index] > 63 && i!= j {
diff_freq[index] += 1;
distance += 1;
}
index += 1;
}
diff_counts[distance] += 1;
if distance > 8 {
println!("# {} {}", distance, pac);
}
// pac sites statistics
if pac_stat.contains_key(pac) {
*pac_stat.get_mut(pac).unwrap() += 1;
}
else {
pac_stat.insert(pac, 1);
}
}
println!("# Overall statistics:");
for i in 0..31 {
println!("# {}\t{}", i, diff_counts[i]);
}
println!("# Per-base statistics:");
for i in 0..31 {
println!("# {}\t{}", i, diff_freq[i]);
}
//try!(write!(output, "{}", "# pac counts:\n"));
for (pac, counts) in &pac_stat {
try!(write!(output, "{} {} {}\n", pac, hamming(&pac, &wt_pac), counts));
}
Ok("Done".into())
}
fn main() {
let args : Vec<String> = env::args().collect();
let file1 = fastq::Reader::from_file(&args[1]).unwrap();
let file2 = fastq::Reader::from_file(&args[2]).unwrap();
let mut num_records = 0;
let mut num_duplicates = 0;
let mut num_qual_skip = 0;
let mut results : HashMap<String, (String, Vec<u8>)>= HashMap::new();
let wt_read1 = if &args[3] == "M" {b"ACTAAGTGAGATGAATATGGCGGCACCAAAGGGCAACCGATTTTGGGAGGCCCGCAGTAGTCATGGGCGAAATCCTAAATTCGAATCGCCTGAGGCGCTGTGGGCTGCTTGTTGTGAA"}
else {b"AAGTGAGATGAATATGGCGGCACCAAAGGGCAACCGATTTTGGGAGGCCCGCAGTAGTCATGGGCGAAATCCTAAATTCGAATCGCCTGAGGCGCTGTGGGCTGCTTGTTGTGAATAC"};
for (record1, record2) in file1.records().zip(file2.records()) {
// take read1, filter low quality reads
let read1 = record1.unwrap();
let desc = read1.id().unwrap().split(":").skip(5).collect::<Vec<&str>>();
let description = desc[0].to_string() + ":" + desc[1];
let mut trim = 124;
let mut am = " ".to_string();
for i in 0..120 {
if qual_check(&read1.qual()[i.. i+5], &[63, 63, 63, 63, 63]) {
trim = i+1;
println!("# {} {}: Read 1 trimmed at {}.", num_records, description, trim);
break;
}
}
if trim < 18 {
println!("# {}: Useful read too short. Skipping. L = {}", num_records, trim);
num_qual_skip += 1;
num_records += 1;
continue;
}
// check if the read is the right read
let seq1 = String::from_utf8_lossy(&read1.seq()[0.. trim]);
let score = |a: u8, b: u8| if a == b {1i32} else {-1i32};
let mut aligner = Aligner::with_capacity(seq1.len(), wt_read1.len(), -5, -1, &score);
let alignment = aligner.global(&seq1[8..seq1.len()].as_bytes(), wt_read1);
if alignment.score < (2 * trim as i32 - 133 - 30) {
println!("# {} {}: wrong read 1 skipping", num_records, description);
println!("# {} {}", &seq1[8..seq1.len()], alignment.score);
num_records += 1;
num_qual_skip += 1;
continue;
}
// identifying AM/WT
if &args[3] == "M" {
if trim < 33 {
println!("# {}: Useful read too short for M. Skipping. L = {}", num_records, trim);
num_qual_skip += 1;
num_records += 1;
continue;
}
// Allowing 1 mismatch
if hamming(&seq1[27.. 32], "GCGGC") < 2 {
match &seq1[32.. 33] {
"A" => am = "WT".to_string(),
"G" => am = "AM".to_string(),
_ => am = " ".to_string(),
}
println!("# 1 am_codon = {}", &seq1[27.. 33]);
}
if am == " " {
for i in 0.. trim-6 {
if &seq1[i.. i+5] == "GCGGC" {
match &seq1[i+5.. i+6] {
"A" => am = "WT".to_string(),
"G" => am = "AM".to_string(),
_ => am = " ".to_string(),
}
println!("# 2 am_codon = {}", &seq1[i.. i+6]);
break;
}
}
}
}
// average quality filtering
//let avg_qual = read1.qual().iter().fold(0, |a, &b| a as u32 + b as u32);
//if avg_qual < (125 * 30) { // corresponding to an average quality of 20
// println!("# low quality read 1 skipping: {}", avg_qual);
// continue;
//}
// now deal with read2
let read2 = record2.unwrap();
// average quality filtering
//let avg_qual = read2.qual().iter().fold(0, |a, &b| a as u32 + b as u32);
//if avg_qual < 125*30 {
// println!("# {}: low quality read 2 skipping: {}", num_records, avg_qual);
// num_qual_skip += 1;
// continue;
//}
trim = 124;
for i in 0..119 {
if qual_check(&read2.qual()[i.. i+5], &[63, 63, 63, 63, 63]) {
trim = i+1;
println!("# {} {}: Read 2 trimmed at {}.", num_records, description, i);
break;
}
}
if trim < 80 {
println!("# {}: Useful read too short. Skipping. L = {}", num_records, trim);
num_qual_skip += 1;
num_records += 1;
continue;
}
let seq2 =String::from_utf8_lossy(&read2.seq()[0.. trim]);
// extract barcodes
let bc1 = &seq1[0..8];
let bc2 = &seq2[0..8];
let bc = bc1.to_string() + bc2;
// check the pac sequences
let wt_pac = "AGAGAAGATTTATCTGAAGTCGTTACGCGAG";
let seq2_rc = reverse_complement(&seq2);
let qual : Vec<u8> = read2.qual().iter().cloned().rev().collect();
let mut pac_start = 0;
let mut min_score = 31;
for i in 0.. trim - 31 {
let score = hamming(&seq2_rc[i.. i+31], &wt_pac);
if score < min_score {
min_score = score;
pac_start = i;
}
}
let pac_end = cmp::min(trim, pac_start+31);
if pac_end - pac_start < 25 {
println!("# {} {}: pac too short ({}).", num_records, description, pac_end-pac_start);
num_records += 1;
num_qual_skip += 1;
continue;
}
let pac = String::from_utf8_lossy(&seq2_rc[pac_start.. pac_end]
.as_bytes()).into_owned();
if min_score > 4 {
let mut aligner = Aligner::with_capacity(wt_pac.len(), seq2.len(), -1, -1, &score);
let alignment = aligner.local(wt_pac.as_bytes(), &seq2_rc.as_bytes());
if alignment.operations.iter().any(|&x| x == AlignmentOperation::Ins || x == AlignmentOperation::Del) {
println!("# {} {}: pac contain indels.", num_records, description);
println!("{}", alignment.pretty(wt_pac.as_bytes(), &seq2_rc.as_bytes() ));
num_records += 1;
num_qual_skip += 1;
continue;
}
}
let pac_qual_avg : f32 = qual[pac_start.. pac_end].iter().cloned().map(|x| x as f32).sum::<f32>() / (pac_end - pac_start) as f32;
if pac_qual_avg < 63.0 || pac.chars().any(|x| x == 'N') {
println!("# {} {}: pac quality too low ({}) or contains N.", num_records, description, pac_qual_avg);
if &args[3] == "M" {
println!("# {} {} {} {}", num_records, bc, pac, am);
} else {
println!("# {} {} {}", num_records, bc, pac);
}
num_records += 1;
num_qual_skip += 1;
continue;
}
if &args[3] == "M" {
let ham_mut = ham_mutations(&pac, &wt_pac);
println!("{},{},{},{},{},{}", num_records, bc, pac, am, ham_mut.0, ham_mut.1);
} else {
println!("{} {} {}", num_records, bc, pac);
}
if results.contains_key(&bc) {
if results[&bc].0 == pac {
println!("# {}: duplicate found", num_records);
num_duplicates += 1;
}
else {
println!("# {}: possible sequencing error? {} {}", num_records, &pac, results[&bc].0);
}
}
else {
results.insert(bc, (pac, qual.clone()));
}
num_records += 1;
}
println!("# {} records processed;", num_records);
println!("# {} low quality reads;", num_qual_skip);
println!("# {} possible duplicates.", num_duplicates);
data_stat(&results, &args[4]);
} | // if i != j {
// dif.push((index, i, j)); | random_line_split |
main.rs | //
extern crate bio;
extern crate itertools;
use std::collections::HashMap;
use std::cmp;
use std::env;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use bio::io::fastq;
use bio::alignment::pairwise::*;
use bio::alignment::AlignmentOperation;
// fn check(rec: &fastq::Record, read: &str) -> (u16, Vec<(usize, char, char)>) {
// let mut distance : u16 = 0;
// let qual = rec.qual();
// let mut dif : Vec<(usize, char, char)> = vec![];
// let mut index : usize = 0;
// for (i, j) in String::from_utf8_lossy(rec.seq()).chars().dropping(8).zip(read.chars()) {
// if qual[index] > 63 {
// if i!= j {
// dif.push((index, i, j));
// distance += 1;
// }
// }
// else {
// distance += 1;
// }
// index += 1;
// }
// (distance, dif)
// }
fn hamming(seq1: &str, seq2: &str) -> u32 |
fn ham_mutations(seq1: &str, seq2: &str) -> (u32, String) {
let mut score = 0;
let mut mutations = "".to_string();
let mut n = 1;
for (i, j) in seq1.chars().zip(seq2.chars()) {
if i!= j {
score += 1;
if score == 1 {
mutations = mutations + &format!("{}{}", n, i);
} else {
mutations = mutations + &format!(" {}{}", n, i);
}
}
n += 1;
}
(score, mutations)
}
fn reverse_complement(seq: &str) -> String {
seq.chars()
.map(|t| match t {
'A' => 'T',
'T' => 'A',
'G' => 'C',
'C' => 'G',
_ => 'N',
}).rev().collect::<String>()
}
fn qual_check(a: &[u8], b: &[u8]) -> bool {
for (i, j) in a.iter().zip(b.iter()) {
if i < j {
continue;
}
return false;
}
return true
}
fn data_stat(results: &HashMap<String, (String, Vec<u8>)>, output_file: &str) -> Result<String, Box<Error>> {
// statistics on the datasets
let wt_pac = "AGAGAAGATTTATCTGAAGTCGTTACGCGAG";
let mut diff_counts : [usize; 31] = [0; 31];
let mut diff_freq : [usize; 31] = [0; 31];
let mut output = try!(File::create(output_file));
let mut pac_stat = HashMap::new();
for (_, pac_info) in results {
let ref pac = pac_info.0;
let ref qual = pac_info.1;
// mutation statistics
let mut index = 0;
let mut distance = 0;
for (i, j) in pac.chars().zip(wt_pac.chars()) {
if qual[index] > 63 && i!= j {
diff_freq[index] += 1;
distance += 1;
}
index += 1;
}
diff_counts[distance] += 1;
if distance > 8 {
println!("# {} {}", distance, pac);
}
// pac sites statistics
if pac_stat.contains_key(pac) {
*pac_stat.get_mut(pac).unwrap() += 1;
}
else {
pac_stat.insert(pac, 1);
}
}
println!("# Overall statistics:");
for i in 0..31 {
println!("# {}\t{}", i, diff_counts[i]);
}
println!("# Per-base statistics:");
for i in 0..31 {
println!("# {}\t{}", i, diff_freq[i]);
}
//try!(write!(output, "{}", "# pac counts:\n"));
for (pac, counts) in &pac_stat {
try!(write!(output, "{} {} {}\n", pac, hamming(&pac, &wt_pac), counts));
}
Ok("Done".into())
}
fn main() {
let args : Vec<String> = env::args().collect();
let file1 = fastq::Reader::from_file(&args[1]).unwrap();
let file2 = fastq::Reader::from_file(&args[2]).unwrap();
let mut num_records = 0;
let mut num_duplicates = 0;
let mut num_qual_skip = 0;
let mut results : HashMap<String, (String, Vec<u8>)>= HashMap::new();
let wt_read1 = if &args[3] == "M" {b"ACTAAGTGAGATGAATATGGCGGCACCAAAGGGCAACCGATTTTGGGAGGCCCGCAGTAGTCATGGGCGAAATCCTAAATTCGAATCGCCTGAGGCGCTGTGGGCTGCTTGTTGTGAA"}
else {b"AAGTGAGATGAATATGGCGGCACCAAAGGGCAACCGATTTTGGGAGGCCCGCAGTAGTCATGGGCGAAATCCTAAATTCGAATCGCCTGAGGCGCTGTGGGCTGCTTGTTGTGAATAC"};
for (record1, record2) in file1.records().zip(file2.records()) {
// take read1, filter low quality reads
let read1 = record1.unwrap();
let desc = read1.id().unwrap().split(":").skip(5).collect::<Vec<&str>>();
let description = desc[0].to_string() + ":" + desc[1];
let mut trim = 124;
let mut am = " ".to_string();
for i in 0..120 {
if qual_check(&read1.qual()[i.. i+5], &[63, 63, 63, 63, 63]) {
trim = i+1;
println!("# {} {}: Read 1 trimmed at {}.", num_records, description, trim);
break;
}
}
if trim < 18 {
println!("# {}: Useful read too short. Skipping. L = {}", num_records, trim);
num_qual_skip += 1;
num_records += 1;
continue;
}
// check if the read is the right read
let seq1 = String::from_utf8_lossy(&read1.seq()[0.. trim]);
let score = |a: u8, b: u8| if a == b {1i32} else {-1i32};
let mut aligner = Aligner::with_capacity(seq1.len(), wt_read1.len(), -5, -1, &score);
let alignment = aligner.global(&seq1[8..seq1.len()].as_bytes(), wt_read1);
if alignment.score < (2 * trim as i32 - 133 - 30) {
println!("# {} {}: wrong read 1 skipping", num_records, description);
println!("# {} {}", &seq1[8..seq1.len()], alignment.score);
num_records += 1;
num_qual_skip += 1;
continue;
}
// identifying AM/WT
if &args[3] == "M" {
if trim < 33 {
println!("# {}: Useful read too short for M. Skipping. L = {}", num_records, trim);
num_qual_skip += 1;
num_records += 1;
continue;
}
// Allowing 1 mismatch
if hamming(&seq1[27.. 32], "GCGGC") < 2 {
match &seq1[32.. 33] {
"A" => am = "WT".to_string(),
"G" => am = "AM".to_string(),
_ => am = " ".to_string(),
}
println!("# 1 am_codon = {}", &seq1[27.. 33]);
}
if am == " " {
for i in 0.. trim-6 {
if &seq1[i.. i+5] == "GCGGC" {
match &seq1[i+5.. i+6] {
"A" => am = "WT".to_string(),
"G" => am = "AM".to_string(),
_ => am = " ".to_string(),
}
println!("# 2 am_codon = {}", &seq1[i.. i+6]);
break;
}
}
}
}
// average quality filtering
//let avg_qual = read1.qual().iter().fold(0, |a, &b| a as u32 + b as u32);
//if avg_qual < (125 * 30) { // corresponding to an average quality of 20
// println!("# low quality read 1 skipping: {}", avg_qual);
// continue;
//}
// now deal with read2
let read2 = record2.unwrap();
// average quality filtering
//let avg_qual = read2.qual().iter().fold(0, |a, &b| a as u32 + b as u32);
//if avg_qual < 125*30 {
// println!("# {}: low quality read 2 skipping: {}", num_records, avg_qual);
// num_qual_skip += 1;
// continue;
//}
trim = 124;
for i in 0..119 {
if qual_check(&read2.qual()[i.. i+5], &[63, 63, 63, 63, 63]) {
trim = i+1;
println!("# {} {}: Read 2 trimmed at {}.", num_records, description, i);
break;
}
}
if trim < 80 {
println!("# {}: Useful read too short. Skipping. L = {}", num_records, trim);
num_qual_skip += 1;
num_records += 1;
continue;
}
let seq2 =String::from_utf8_lossy(&read2.seq()[0.. trim]);
// extract barcodes
let bc1 = &seq1[0..8];
let bc2 = &seq2[0..8];
let bc = bc1.to_string() + bc2;
// check the pac sequences
let wt_pac = "AGAGAAGATTTATCTGAAGTCGTTACGCGAG";
let seq2_rc = reverse_complement(&seq2);
let qual : Vec<u8> = read2.qual().iter().cloned().rev().collect();
let mut pac_start = 0;
let mut min_score = 31;
for i in 0.. trim - 31 {
let score = hamming(&seq2_rc[i.. i+31], &wt_pac);
if score < min_score {
min_score = score;
pac_start = i;
}
}
let pac_end = cmp::min(trim, pac_start+31);
if pac_end - pac_start < 25 {
println!("# {} {}: pac too short ({}).", num_records, description, pac_end-pac_start);
num_records += 1;
num_qual_skip += 1;
continue;
}
let pac = String::from_utf8_lossy(&seq2_rc[pac_start.. pac_end]
.as_bytes()).into_owned();
if min_score > 4 {
let mut aligner = Aligner::with_capacity(wt_pac.len(), seq2.len(), -1, -1, &score);
let alignment = aligner.local(wt_pac.as_bytes(), &seq2_rc.as_bytes());
if alignment.operations.iter().any(|&x| x == AlignmentOperation::Ins || x == AlignmentOperation::Del) {
println!("# {} {}: pac contain indels.", num_records, description);
println!("{}", alignment.pretty(wt_pac.as_bytes(), &seq2_rc.as_bytes() ));
num_records += 1;
num_qual_skip += 1;
continue;
}
}
let pac_qual_avg : f32 = qual[pac_start.. pac_end].iter().cloned().map(|x| x as f32).sum::<f32>() / (pac_end - pac_start) as f32;
if pac_qual_avg < 63.0 || pac.chars().any(|x| x == 'N') {
println!("# {} {}: pac quality too low ({}) or contains N.", num_records, description, pac_qual_avg);
if &args[3] == "M" {
println!("# {} {} {} {}", num_records, bc, pac, am);
} else {
println!("# {} {} {}", num_records, bc, pac);
}
num_records += 1;
num_qual_skip += 1;
continue;
}
if &args[3] == "M" {
let ham_mut = ham_mutations(&pac, &wt_pac);
println!("{},{},{},{},{},{}", num_records, bc, pac, am, ham_mut.0, ham_mut.1);
} else {
println!("{} {} {}", num_records, bc, pac);
}
if results.contains_key(&bc) {
if results[&bc].0 == pac {
println!("# {}: duplicate found", num_records);
num_duplicates += 1;
}
else {
println!("# {}: possible sequencing error? {} {}", num_records, &pac, results[&bc].0);
}
}
else {
results.insert(bc, (pac, qual.clone()));
}
num_records += 1;
}
println!("# {} records processed;", num_records);
println!("# {} low quality reads;", num_qual_skip);
println!("# {} possible duplicates.", num_duplicates);
data_stat(&results, &args[4]);
}
| {
let mut score = 0;
for (i, j) in seq1.chars().zip(seq2.chars()) {
if i != j {
score += 1;
}
}
score
} | identifier_body |
main.rs | #[macro_use]
extern crate microprofile;
//
use rand::{distributions as distr, distributions::Distribution};
use starframe::{
self as sf,
game::{self, Game},
graph, graphics as gx,
input::{Key, MouseButton},
math::{self as m, uv},
physics as phys,
};
mod mousegrab;
use mousegrab::MouseGrabber;
mod player;
mod recipes;
use recipes::Recipe;
fn main() {
microprofile::init!();
microprofile::set_enable_all_groups!(true);
let game = Game::init(
60,
winit::window::WindowBuilder::new()
.with_title("starframe test")
.with_inner_size(winit::dpi::LogicalSize {
width: 800.0,
height: 600.0,
}),
);
let state = State::init(&game.renderer.device);
game.run(state);
microprofile::shutdown!();
}
//
// Types
//
pub enum StateEnum {
Playing,
Paused,
}
pub struct State {
scene: Scene,
state: StateEnum,
graph: MyGraph,
player: player::PlayerController,
mouse_mode: MouseMode,
mouse_grabber: MouseGrabber,
physics: phys::Physics,
camera: gx::camera::MouseDragCamera,
shape_renderer: gx::ShapeRenderer,
}
impl State {
fn init(device: &wgpu::Device) -> Self {
State {
scene: Scene::default(),
state: StateEnum::Playing,
graph: MyGraph::new(),
player: player::PlayerController::new(),
mouse_mode: MouseMode::Grab,
mouse_grabber: MouseGrabber::new(),
physics: phys::Physics::with_substeps(10),
camera: gx::camera::MouseDragCamera::new(
gx::camera::ScalingStrategy::ConstantDisplayArea {
width: 20.0,
height: 10.0,
},
),
shape_renderer: gx::ShapeRenderer::new(device),
}
}
fn reset(&mut self) {
self.physics.clear_constraints();
self.graph = MyGraph::new();
}
fn read_scene(&mut self, file_idx: usize) {
let dir = std::fs::read_dir("./examples/testgame/scenes");
match dir {
Err(err) => eprintln!("Scenes dir not found: {}", err),
Ok(mut dir) => {
if let Some(Ok(entry)) = dir.nth(file_idx) {
let file = std::fs::File::open(entry.path());
match file {
Ok(file) => {
let scene = Scene::read_from_file(file);
match scene {
Err(err) => eprintln!("Failed to parse file: {}", err),
Ok(scene) => self.scene = scene,
}
}
Err(err) => eprintln!("Failed to open file: {}", err),
}
}
}
}
}
fn instantiate_scene(&mut self) {
self.scene.instantiate(&mut self.graph, &mut self.physics);
}
}
#[derive(Clone, Copy, Debug)]
pub enum MouseMode {
/// Grab objects with the mouse
Grab,
/// Move the camera with the mouse
Camera,
}
/// The recipes in a scene plus some adjustable parameters.
#[derive(Clone, Debug, serde::Deserialize)]
#[serde(default)]
pub struct Scene {
gravity: [f64; 2],
recipes: Vec<Recipe>,
}
impl Default for Scene {
fn default() -> Self {
Self {
gravity: [0.0, -9.81],
recipes: vec![],
}
}
}
impl Scene {
pub fn read_from_file(file: std::fs::File) -> Result<Self, ron::de::Error> {
use serde::Deserialize;
use std::io::Read;
let mut reader = std::io::BufReader::new(file);
let mut bytes = Vec::new();
reader.read_to_end(&mut bytes)?;
let mut deser = ron::de::Deserializer::from_bytes(bytes.as_slice())?;
Scene::deserialize(&mut deser)
}
pub fn instantiate(&self, graph: &mut crate::MyGraph, physics: &mut phys::Physics) {
for recipe in &self.recipes {
recipe.spawn(graph, physics);
}
}
}
/// The entity graph.
pub struct | {
graph: graph::Graph,
l_pose: graph::Layer<m::Pose>,
l_collider: graph::Layer<phys::Collider>,
l_body: graph::Layer<phys::RigidBody>,
l_shape: graph::Layer<gx::Shape>,
l_player: graph::Layer<player::Player>,
l_evt_sink: sf::event::EventSinkLayer<MyGraph>,
}
impl MyGraph {
pub fn new() -> Self {
let mut graph = graph::Graph::new();
let l_pose = graph.create_layer();
let l_collider = graph.create_layer();
let l_body = graph.create_layer();
let l_shape = graph.create_layer();
let l_player = graph.create_layer();
let l_evt_sinks = graph.create_layer();
MyGraph {
graph,
l_pose,
l_collider,
l_body,
l_shape,
l_player,
l_evt_sink: l_evt_sinks,
}
}
}
//
// State updates
//
impl game::GameState for State {
fn tick(&mut self, dt: f64, game: &Game) -> Option<()> {
microprofile::flip();
microprofile::scope!("update", "all");
//
// State-independent stuff
//
// exit on esc
if game.input.is_key_pressed(Key::Escape, None) {
return None;
}
// adjust physics substeps
if game.input.is_key_pressed(Key::NumpadAdd, Some(0)) {
self.physics.substeps += 1;
println!("Substeps: {}", self.physics.substeps);
} else if game.input.is_key_pressed(Key::NumpadSubtract, Some(0))
&& self.physics.substeps > 1
{
self.physics.substeps -= 1;
println!("Substeps: {}", self.physics.substeps);
}
// mouse controls
if game.input.is_key_pressed(Key::V, Some(0)) {
self.mouse_mode = match self.mouse_mode {
MouseMode::Grab => MouseMode::Camera,
MouseMode::Camera => MouseMode::Grab,
};
println!("Mouse mode: {:?}", self.mouse_mode);
}
match self.mouse_mode {
MouseMode::Grab => {
self.mouse_grabber.update(
&game.input,
&self.camera,
game.renderer.window_size().into(),
&mut self.physics,
&self.graph,
);
}
MouseMode::Camera => {
self.camera
.update(&game.input, game.renderer.window_size().into());
if (game.input).is_mouse_button_pressed(MouseButton::Middle, Some(0)) {
self.camera.pose = uv::DSimilarity2::identity();
}
}
}
// reload
for (idx, num_key) in [
Key::Key1,
Key::Key2,
Key::Key3,
Key::Key4,
Key::Key5,
Key::Key6,
Key::Key7,
Key::Key8,
Key::Key9,
]
.iter()
.enumerate()
{
if game.input.is_key_pressed(*num_key, Some(0)) {
self.reset();
self.read_scene(idx);
self.instantiate_scene();
}
}
// reload current scene
if game.input.is_key_pressed(Key::Return, Some(0)) {
self.reset();
self.instantiate_scene();
}
// spawn stuff also when paused
let random_pos = || {
let mut rng = rand::thread_rng();
m::Vec2::new(
distr::Uniform::from(-5.0..5.0).sample(&mut rng),
distr::Uniform::from(1.0..4.0).sample(&mut rng),
)
};
let random_angle =
|| m::Angle::Deg(distr::Uniform::from(0.0..360.0).sample(&mut rand::thread_rng()));
let random_vel = || {
let mut rng = rand::thread_rng();
[
distr::Uniform::from(-5.0..5.0).sample(&mut rng),
distr::Uniform::from(-5.0..5.0).sample(&mut rng),
]
};
let mut rng = rand::thread_rng();
if game.input.is_key_pressed(Key::S, Some(0)) {
Recipe::DynamicBlock(recipes::Block {
pose: m::IsometryBuilder::new()
.with_position(random_pos())
.with_rotation(random_angle()),
width: distr::Uniform::from(0.6..1.0).sample(&mut rng),
height: distr::Uniform::from(0.5..0.8).sample(&mut rng),
})
.spawn(&mut self.graph, &mut self.physics);
}
if game.input.is_key_pressed(Key::T, Some(0)) {
Recipe::Ball(recipes::Ball {
position: random_pos().into(),
radius: distr::Uniform::from(0.1..0.4).sample(&mut rng),
restitution: 1.0,
start_velocity: random_vel(),
})
.spawn(&mut self.graph, &mut self.physics);
}
match (&self.state, game.input.is_key_pressed(Key::Space, Some(0))) {
//
// Playing or stepping manually
//
(StateEnum::Playing, _) | (StateEnum::Paused, true) => {
if game.input.is_key_pressed(Key::P, Some(0)) {
self.state = StateEnum::Paused;
return Some(());
}
{
microprofile::scope!("update", "physics");
let grav = phys::forcefield::Gravity(self.scene.gravity.into());
self.physics.tick(
&self.graph.graph,
&mut self.graph.l_pose,
&mut self.graph.l_body,
&self.graph.l_collider,
&mut self.graph.l_evt_sink,
dt,
&grav,
);
}
{
microprofile::scope!("update", "player");
self.player.tick(&mut self.graph, &game.input);
}
self.graph.l_evt_sink.flush(&self.graph.graph)(&mut self.graph);
Some(())
}
//
// Paused
//
(StateEnum::Paused, false) => {
if game.input.is_key_pressed(Key::P, Some(0)) {
self.state = StateEnum::Playing;
return Some(());
}
Some(())
}
}
}
fn draw(&mut self, renderer: &mut gx::Renderer) {
microprofile::scope!("render", "all");
let mut ctx = renderer.draw_to_window();
ctx.clear(wgpu::Color {
r: 0.1,
g: 0.1,
b: 0.1,
a: 1.0,
});
self.shape_renderer.draw(
&self.graph.l_shape,
&self.graph.l_pose,
&self.graph.graph,
&self.camera,
&mut ctx,
);
ctx.submit();
}
}
| MyGraph | identifier_name |
main.rs | #[macro_use]
extern crate microprofile;
//
use rand::{distributions as distr, distributions::Distribution};
use starframe::{
self as sf,
game::{self, Game},
graph, graphics as gx,
input::{Key, MouseButton},
math::{self as m, uv},
physics as phys,
};
mod mousegrab;
use mousegrab::MouseGrabber;
mod player;
mod recipes;
use recipes::Recipe;
fn main() {
microprofile::init!();
microprofile::set_enable_all_groups!(true);
let game = Game::init(
60,
winit::window::WindowBuilder::new()
.with_title("starframe test")
.with_inner_size(winit::dpi::LogicalSize {
width: 800.0,
height: 600.0,
}),
);
let state = State::init(&game.renderer.device);
game.run(state);
microprofile::shutdown!();
}
//
// Types
//
pub enum StateEnum {
Playing,
Paused,
}
pub struct State {
scene: Scene,
state: StateEnum,
graph: MyGraph,
player: player::PlayerController,
mouse_mode: MouseMode,
mouse_grabber: MouseGrabber,
physics: phys::Physics,
camera: gx::camera::MouseDragCamera,
shape_renderer: gx::ShapeRenderer,
}
impl State {
fn init(device: &wgpu::Device) -> Self {
State {
scene: Scene::default(),
state: StateEnum::Playing,
graph: MyGraph::new(),
player: player::PlayerController::new(),
mouse_mode: MouseMode::Grab,
mouse_grabber: MouseGrabber::new(),
physics: phys::Physics::with_substeps(10),
camera: gx::camera::MouseDragCamera::new(
gx::camera::ScalingStrategy::ConstantDisplayArea {
width: 20.0,
height: 10.0,
},
),
shape_renderer: gx::ShapeRenderer::new(device),
}
}
fn reset(&mut self) {
self.physics.clear_constraints();
self.graph = MyGraph::new();
}
fn read_scene(&mut self, file_idx: usize) {
let dir = std::fs::read_dir("./examples/testgame/scenes");
match dir {
Err(err) => eprintln!("Scenes dir not found: {}", err),
Ok(mut dir) => {
if let Some(Ok(entry)) = dir.nth(file_idx) {
let file = std::fs::File::open(entry.path());
match file {
Ok(file) => {
let scene = Scene::read_from_file(file);
match scene {
Err(err) => eprintln!("Failed to parse file: {}", err),
Ok(scene) => self.scene = scene,
}
}
Err(err) => eprintln!("Failed to open file: {}", err),
}
}
}
}
}
fn instantiate_scene(&mut self) {
self.scene.instantiate(&mut self.graph, &mut self.physics);
}
}
#[derive(Clone, Copy, Debug)]
pub enum MouseMode {
/// Grab objects with the mouse
Grab,
/// Move the camera with the mouse
Camera,
}
/// The recipes in a scene plus some adjustable parameters.
#[derive(Clone, Debug, serde::Deserialize)]
#[serde(default)]
pub struct Scene {
gravity: [f64; 2],
recipes: Vec<Recipe>,
}
impl Default for Scene {
fn default() -> Self {
Self {
gravity: [0.0, -9.81],
recipes: vec![],
}
}
}
impl Scene {
pub fn read_from_file(file: std::fs::File) -> Result<Self, ron::de::Error> {
use serde::Deserialize;
use std::io::Read;
let mut reader = std::io::BufReader::new(file);
let mut bytes = Vec::new();
reader.read_to_end(&mut bytes)?;
let mut deser = ron::de::Deserializer::from_bytes(bytes.as_slice())?;
Scene::deserialize(&mut deser)
}
pub fn instantiate(&self, graph: &mut crate::MyGraph, physics: &mut phys::Physics) {
for recipe in &self.recipes {
recipe.spawn(graph, physics);
}
}
}
/// The entity graph.
pub struct MyGraph {
graph: graph::Graph,
l_pose: graph::Layer<m::Pose>,
l_collider: graph::Layer<phys::Collider>,
l_body: graph::Layer<phys::RigidBody>,
l_shape: graph::Layer<gx::Shape>,
l_player: graph::Layer<player::Player>,
l_evt_sink: sf::event::EventSinkLayer<MyGraph>,
}
impl MyGraph {
pub fn new() -> Self {
let mut graph = graph::Graph::new();
let l_pose = graph.create_layer();
let l_collider = graph.create_layer();
let l_body = graph.create_layer();
let l_shape = graph.create_layer();
let l_player = graph.create_layer();
let l_evt_sinks = graph.create_layer();
MyGraph {
graph,
l_pose,
l_collider,
l_body,
l_shape,
l_player,
l_evt_sink: l_evt_sinks,
}
}
}
//
// State updates
//
impl game::GameState for State {
fn tick(&mut self, dt: f64, game: &Game) -> Option<()> {
microprofile::flip();
microprofile::scope!("update", "all");
//
// State-independent stuff
//
// exit on esc
if game.input.is_key_pressed(Key::Escape, None) {
return None;
}
// adjust physics substeps
if game.input.is_key_pressed(Key::NumpadAdd, Some(0)) {
self.physics.substeps += 1;
println!("Substeps: {}", self.physics.substeps);
} else if game.input.is_key_pressed(Key::NumpadSubtract, Some(0))
&& self.physics.substeps > 1
{
self.physics.substeps -= 1;
println!("Substeps: {}", self.physics.substeps);
}
// mouse controls
if game.input.is_key_pressed(Key::V, Some(0)) {
self.mouse_mode = match self.mouse_mode {
MouseMode::Grab => MouseMode::Camera,
MouseMode::Camera => MouseMode::Grab,
};
println!("Mouse mode: {:?}", self.mouse_mode);
}
match self.mouse_mode {
MouseMode::Grab => {
self.mouse_grabber.update(
&game.input,
&self.camera,
game.renderer.window_size().into(),
&mut self.physics,
&self.graph,
);
}
MouseMode::Camera => {
self.camera
.update(&game.input, game.renderer.window_size().into());
if (game.input).is_mouse_button_pressed(MouseButton::Middle, Some(0)) {
self.camera.pose = uv::DSimilarity2::identity();
}
}
}
// reload
for (idx, num_key) in [
Key::Key1,
Key::Key2,
Key::Key3,
Key::Key4,
Key::Key5,
Key::Key6,
Key::Key7,
Key::Key8,
Key::Key9,
]
.iter()
.enumerate()
{
if game.input.is_key_pressed(*num_key, Some(0)) {
self.reset();
self.read_scene(idx);
self.instantiate_scene();
}
}
// reload current scene
if game.input.is_key_pressed(Key::Return, Some(0)) {
self.reset();
self.instantiate_scene();
}
// spawn stuff also when paused
let random_pos = || {
let mut rng = rand::thread_rng();
m::Vec2::new(
distr::Uniform::from(-5.0..5.0).sample(&mut rng),
distr::Uniform::from(1.0..4.0).sample(&mut rng),
)
};
let random_angle =
|| m::Angle::Deg(distr::Uniform::from(0.0..360.0).sample(&mut rand::thread_rng())); | distr::Uniform::from(-5.0..5.0).sample(&mut rng),
distr::Uniform::from(-5.0..5.0).sample(&mut rng),
]
};
let mut rng = rand::thread_rng();
if game.input.is_key_pressed(Key::S, Some(0)) {
Recipe::DynamicBlock(recipes::Block {
pose: m::IsometryBuilder::new()
.with_position(random_pos())
.with_rotation(random_angle()),
width: distr::Uniform::from(0.6..1.0).sample(&mut rng),
height: distr::Uniform::from(0.5..0.8).sample(&mut rng),
})
.spawn(&mut self.graph, &mut self.physics);
}
if game.input.is_key_pressed(Key::T, Some(0)) {
Recipe::Ball(recipes::Ball {
position: random_pos().into(),
radius: distr::Uniform::from(0.1..0.4).sample(&mut rng),
restitution: 1.0,
start_velocity: random_vel(),
})
.spawn(&mut self.graph, &mut self.physics);
}
match (&self.state, game.input.is_key_pressed(Key::Space, Some(0))) {
//
// Playing or stepping manually
//
(StateEnum::Playing, _) | (StateEnum::Paused, true) => {
if game.input.is_key_pressed(Key::P, Some(0)) {
self.state = StateEnum::Paused;
return Some(());
}
{
microprofile::scope!("update", "physics");
let grav = phys::forcefield::Gravity(self.scene.gravity.into());
self.physics.tick(
&self.graph.graph,
&mut self.graph.l_pose,
&mut self.graph.l_body,
&self.graph.l_collider,
&mut self.graph.l_evt_sink,
dt,
&grav,
);
}
{
microprofile::scope!("update", "player");
self.player.tick(&mut self.graph, &game.input);
}
self.graph.l_evt_sink.flush(&self.graph.graph)(&mut self.graph);
Some(())
}
//
// Paused
//
(StateEnum::Paused, false) => {
if game.input.is_key_pressed(Key::P, Some(0)) {
self.state = StateEnum::Playing;
return Some(());
}
Some(())
}
}
}
fn draw(&mut self, renderer: &mut gx::Renderer) {
microprofile::scope!("render", "all");
let mut ctx = renderer.draw_to_window();
ctx.clear(wgpu::Color {
r: 0.1,
g: 0.1,
b: 0.1,
a: 1.0,
});
self.shape_renderer.draw(
&self.graph.l_shape,
&self.graph.l_pose,
&self.graph.graph,
&self.camera,
&mut ctx,
);
ctx.submit();
}
} | let random_vel = || {
let mut rng = rand::thread_rng();
[ | random_line_split |
main.rs | {
#[clap(long)]
labels: bool,
#[clap(long)]
statement_counts: bool,
#[clap(short, long, default_value = "0")]
skip: u64,
#[clap(short, long)]
threads: Option<usize>,
#[clap(required = true)]
paths: Vec<String>,
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum Extra<'a> {
None,
Type(&'a str),
Lang(&'a str),
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum Subject<'a> {
IRI(&'a str),
Blank(&'a str),
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum Object<'a> {
IRI(&'a str),
Blank(&'a str),
Literal(&'a str, Extra<'a>),
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct Statement<'a> {
subject: Subject<'a>,
predicate: &'a str,
object: Object<'a>,
}
pub enum Work {
LINES(u64, Vec<String>),
DONE,
}
pub struct WorkResult {
statement_counts: Option<HashMap<String, u64>>,
}
lazy_static! {
static ref RE: Regex = Regex::new(
r#"(?x)
^
\s*
# subject
(?:
# IRI
(?:<([^>]*)>)
|
# Blank
(?:_:([^\s]+))
)
\s*
# predicate IRI
<([^>]*)>
\s*
# object
(?:
# IRI
(?:<([^>]*)>)
|
# Blank
(?:_:([^\s]+))
|
# literal
(?:
"([^"]*)"
# optional extra
(?:
# language
(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*))
|
# data type
(?:\^\^<([^>]*)>)
)?
)
)
"#
)
.unwrap();
}
pub fn parse<'a>(line: u64, input: &'a str, regex: &Regex) -> Statement<'a> {
let captures = regex
.captures(input)
.unwrap_or_else(|| panic!("Invalid line: {}: {:?}", line, input));
let subject = captures
.get(1)
.map(|object| Subject::IRI(object.as_str()))
.or_else(|| captures.get(2).map(|blank| Subject::Blank(blank.as_str())))
.expect("failed to parse subject");
let predicate = captures.get(3).expect("failed to parse predicate").as_str();
let object = captures
.get(4)
.map(|object| Object::IRI(object.as_str()))
.or_else(|| captures.get(5).map(|blank| Object::Blank(blank.as_str())))
.unwrap_or_else(|| {
let literal = captures.get(6).expect("failed to parse object").as_str();
let extra = captures
.get(7)
.map(|lang| Extra::Lang(lang.as_str()))
.or_else(|| {
captures
.get(8)
.map(|data_type| Extra::Type(data_type.as_str()))
})
.unwrap_or(Extra::None);
Object::Literal(literal, extra)
});
Statement {
subject,
predicate,
object,
}
}
lazy_static_include_str! {
PROPERTIES_DATA => "properties",
IDENTIFIER_PROPERTIES_DATA => "identifier-properties",
LANGUAGES_DATA => "languages",
LABELS_DATA => "labels",
}
lazy_static! {
static ref PROPERTIES: HashSet<&'static str> = line_set(&PROPERTIES_DATA);
}
lazy_static! {
static ref IDENTIFIER_PROPERTIES: HashSet<String> = line_set(&IDENTIFIER_PROPERTIES_DATA)
.iter()
.flat_map(|id| vec![
format!("http://www.wikidata.org/prop/direct/P{}", id),
format!("http://www.wikidata.org/prop/direct-normalized/P{}", id)
])
.collect();
}
lazy_static! {
static ref LANGUAGES: HashSet<&'static str> = line_set(&LANGUAGES_DATA);
}
lazy_static! {
static ref LABELS: HashSet<&'static str> = line_set(&LABELS_DATA);
}
fn line_set(data: &str) -> HashSet<&str> {
data.lines().collect()
}
fn ignored_subject(iri: &str) -> bool {
iri.starts_with("https://www.wikidata.org/wiki/Special:EntityData")
}
fn produce<T: Read>(
running: Arc<AtomicBool>,
skip: u64,
reader: T,
s: &Sender<Work>,
) -> (bool, u64) {
let mut total = 0;
let mut buf_reader = BufReader::new(reader);
let mut lines = Vec::new();
if skip > 0 {
eprintln!("# skipping {}", skip)
}
loop {
if!running.load(Ordering::SeqCst) {
eprintln!("# interrupted after {}", total);
return (false, total);
}
let mut line = String::new();
if buf_reader.read_line(&mut line).unwrap() == 0 {
break;
}
total += 1;
let skipped = total < skip;
if!skipped {
lines.push(line);
if total % BATCH_SIZE == 0 {
s.send(Work::LINES(total, lines)).unwrap();
lines = Vec::new();
}
}
if total % PROGRESS_COUNT == 0 {
let status = if skipped { "skipped" } else { "" };
eprintln!("# {} {}", status, total);
}
}
if!lines.is_empty() {
s.send(Work::LINES(total, lines)).unwrap();
}
(true, total)
}
fn consume(
name: String,
work_receiver: Receiver<Work>,
result_sender: Sender<WorkResult>,
labels: bool,
statement_counts: bool,
) {
let regex = RE.clone();
let lines_path = format!("{}.nt.bz2", name);
let lines_file = File::create(&lines_path)
.unwrap_or_else(|_| panic!("unable to create file: {}", &lines_path));
let mut lines_encoder = BzEncoder::new(BufWriter::new(lines_file), Compression::best());
let mut labels_encoder = if labels {
let labels_path = format!("labels_{}.bz2", name);
let labels_file = File::create(&labels_path)
.unwrap_or_else(|_| panic!("unable to create file: {}", &labels_path));
Some(BzEncoder::new(
BufWriter::new(labels_file),
Compression::best(),
))
} else {
None
};
let mut statement_counter = if statement_counts {
Some(HashMap::new())
} else {
None
};
loop {
match work_receiver.recv().unwrap() {
Work::LINES(number, lines) => {
for line in lines {
handle(
&mut lines_encoder,
labels_encoder.as_mut(),
statement_counter.as_mut(),
number,
line,
®ex,
);
}
lines_encoder.flush().unwrap();
if let Some(labels_encoder) = labels_encoder.as_mut() {
labels_encoder.flush().unwrap()
}
}
Work::DONE => {
eprintln!("# stopping thread {}", name);
lines_encoder.try_finish().unwrap();
if let Some(labels_encoder) = labels_encoder.as_mut() {
labels_encoder.try_finish().unwrap()
}
result_sender
.send(WorkResult {
statement_counts: statement_counter,
})
.unwrap();
return;
}
}
}
}
fn handle<T: Write, U: Write>(
lines_writer: &mut T,
labels_writer: Option<&mut U>,
statement_counter: Option<&mut HashMap<String, u64>>,
number: u64,
line: String,
regex: &Regex,
) -> Option<()> {
let statement = parse(number, &line, regex);
maybe_write_line(lines_writer, &line, statement);
let id = entity(statement.subject)?;
maybe_count_statement(statement_counter, id, statement);
maybe_write_label(labels_writer, id, statement);
None
}
fn maybe_write_line<T: Write>(lines_writer: &mut T, line: &str, statement: Statement) {
if!is_acceptable(statement) {
return;
}
lines_writer.write_all(line.as_bytes()).unwrap();
}
fn maybe_write_label<T: Write>(
labels_writer: Option<&mut T>,
id: &str,
statement: Statement,
) -> Option<()> {
let labels_writer = labels_writer?;
let label = label(statement)?;
labels_writer
.write_fmt(format_args!("{} {}\n", id, label))
.unwrap();
None
}
fn maybe_count_statement(
statement_counter: Option<&mut HashMap<String, u64>>,
id: &str,
statement: Statement,
) -> Option<()> {
let statement_counter = statement_counter?;
direct_property(statement.predicate)?;
*statement_counter.entry(id.to_string()).or_insert(0) += 1;
None
}
fn is_acceptable(statement: Statement) -> bool {
if PROPERTIES.contains(statement.predicate)
|| IDENTIFIER_PROPERTIES.contains(statement.predicate)
{
return false;
}
match statement.subject {
Subject::Blank(_) => return false,
Subject::IRI(iri) if ignored_subject(iri) => return false,
_ => (),
}
match statement.object {
Object::Blank(_) => return false,
Object::Literal(_, Extra::Lang(lang)) if!LANGUAGES.contains(lang) => return false,
// non-Earth geo coordinates are not supported by some triple stores
Object::Literal(
literal,
Extra::Type("http://www.opengis.net/ont/geosparql#wktLiteral"),
) if literal.starts_with('<') => return false,
_ => (),
}
true
}
fn label(statement: Statement) -> Option<String> {
if!LABELS.contains(statement.predicate) {
return None;
}
if let Object::Literal(label, Extra::Lang(lang)) = statement.object {
if!LANGUAGES.contains(lang) {
return None;
}
return Some(unescape(label));
}
None
}
static ENTITY_IRI_PREFIX: &str = "http://www.wikidata.org/entity/Q";
fn entity(subject: Subject) -> Option<&str> {
if let Subject::IRI(iri) = subject {
iri.strip_prefix(ENTITY_IRI_PREFIX)
} else {
None
}
}
static DIRECT_PROPERTY_IRI_PREFIX: &str = "http://www.wikidata.org/prop/direct/";
fn direct_property(predicate: &str) -> Option<&str> {
predicate.strip_prefix(DIRECT_PROPERTY_IRI_PREFIX)
}
pub fn unescape(s: &str) -> String {
let mut chars = s.chars().enumerate();
let mut res = String::with_capacity(s.len());
while let Some((idx, c)) = chars.next() {
if c == '\\' {
match chars.next() {
None => {
panic!("invalid escape at {} in {}", idx, s);
}
Some((idx, c2)) => {
res.push(match c2 {
't' => '\t',
'b' => '\u{08}',
'n' => '\n',
'r' => '\r',
'f' => '\u{0C}',
'\\' => '\\',
'u' => match parse_unicode(&mut chars, 4) {
Ok(c3) => c3,
Err(err) => {
panic!("invalid escape {}{} at {} in {}: {}", c, c2, idx, s, err);
}
},
'U' => match parse_unicode(&mut chars, 8) {
Ok(c3) => c3,
Err(err) => {
panic!("invalid escape {}{} at {} in {}: {}", c, c2, idx, s, err);
}
},
_ => {
panic!("invalid escape {}{} at {} in {}", c, c2, idx, s);
}
});
continue;
}
};
}
res.push(c);
}
res
}
fn parse_unicode<I>(chars: &mut I, count: usize) -> Result<char, String>
where
I: Iterator<Item = (usize, char)>,
{
let unicode_seq: String = chars.take(count).map(|(_, c)| c).collect();
u32::from_str_radix(&unicode_seq, 16)
.map_err(|e| format!("could not parse {} as u32 hex: {}", unicode_seq, e))
.and_then(|u| {
std::char::from_u32(u).ok_or_else(|| format!("could not parse {} as a unicode char", u))
})
}
fn main() {
let opts: Opts = Opts::parse();
let labels = opts.labels;
let statement_counts = opts.statement_counts;
let running = Arc::new(AtomicBool::new(true));
let r = running.clone();
ctrlc::set_handler(move || {
if r.load(Ordering::SeqCst) {
exit(1);
}
r.store(false, Ordering::SeqCst);
})
.expect("failed to set Ctrl-C handler");
let start = Instant::now();
let (work_sender, work_receiver) = bounded::<Work>(0);
let (result_sender, result_receiver) = unbounded();
let mut threads = Vec::new();
let thread_count = opts.threads.unwrap_or_else(|| num_cpus::get() * 2);
for id in 1..=thread_count {
let work_receiver = work_receiver.clone();
let result_sender = result_sender.clone();
threads.push(thread::spawn(move || {
consume(
id.to_string(),
work_receiver,
result_sender,
labels,
statement_counts,
)
}));
}
let mut exit_code = 0;
for path in opts.paths {
let file = File::open(&path).expect("can't open file");
let decoder = BzDecoder::new(BufReader::new(file));
eprintln!("# processing {}", path);
let (finished, count) = produce(running.clone(), opts.skip, decoder, &work_sender);
eprintln!("# processed {}: {}", path, count);
if!finished {
exit_code = 1;
break;
}
}
for _ in &threads {
work_sender.send(Work::DONE).unwrap();
}
let mut statement_counter = HashMap::new();
let mut result_count = 0;
for result in result_receiver.iter() {
if let Some(statement_counts) = result.statement_counts {
for (id, count) in statement_counts.iter() {
*statement_counter.entry(id.to_string()).or_insert(0) += count;
}
}
result_count += 1;
if result_count == thread_count {
break;
}
}
if statement_counts {
eprintln!("# entities: {}", statement_counter.len());
let path = "statement_counts.bz2";
let file = File::create(path).unwrap_or_else(|_| panic!("unable to create file: {}", path));
let mut encoder = BzEncoder::new(BufWriter::new(file), Compression::best());
for (id, count) in statement_counter.iter() {
encoder
.write_fmt(format_args!("{} {}\n", id, count))
.unwrap();
}
encoder.try_finish().unwrap();
}
let duration = start.elapsed();
eprintln!("# took {:?}", duration);
exit(exit_code);
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use std::fs::read_to_string;
use std::io::{self, Lines};
use std::path::{Path, PathBuf};
#[test]
fn test_literal_with_type() {
let line = r#"<http://www.wikidata.org/entity/Q1644> <http://www.wikidata.org/prop/direct/P2043> "+1094.26"^^<http://www.w3.org/2001/XMLSchema#decimal>."#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("http://www.wikidata.org/entity/Q1644"),
predicate: "http://www.wikidata.org/prop/direct/P2043",
object: Object::Literal(
"+1094.26",
Extra::Type("http://www.w3.org/2001/XMLSchema#decimal")
)
}
);
}
#[test]
fn test_literal_with_lang() {
let line = r#"<http://www.wikidata.org/entity/Q177> <http://schema.org/name> "pizza"@en."#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("http://www.wikidata.org/entity/Q177"),
predicate: "http://schema.org/name",
object: Object::Literal("pizza", Extra::Lang("en"))
}
);
}
#[test]
fn test_literal() {
let line = r#"<http://www.wikidata.org/entity/Q177> <http://www.wikidata.org/prop/direct/P373> "Pizzas"."#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("http://www.wikidata.org/entity/Q177"),
predicate: "http://www.wikidata.org/prop/direct/P373",
object: Object::Literal("Pizzas", Extra::None)
}
);
}
#[test]
fn test_blank_subject() {
let line = r#"_:foo <bar> <baz>"#;
assert_eq!(
parse(1, line, &RE),
Statement {
| Opts | identifier_name |
|
main.rs | String, u64>>,
}
lazy_static! {
static ref RE: Regex = Regex::new(
r#"(?x)
^
\s*
# subject
(?:
# IRI
(?:<([^>]*)>)
|
# Blank
(?:_:([^\s]+))
)
\s*
# predicate IRI
<([^>]*)>
\s*
# object
(?:
# IRI
(?:<([^>]*)>)
|
# Blank
(?:_:([^\s]+))
|
# literal
(?:
"([^"]*)"
# optional extra
(?:
# language
(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*))
|
# data type
(?:\^\^<([^>]*)>)
)?
)
)
"#
)
.unwrap();
}
pub fn parse<'a>(line: u64, input: &'a str, regex: &Regex) -> Statement<'a> {
let captures = regex
.captures(input)
.unwrap_or_else(|| panic!("Invalid line: {}: {:?}", line, input));
let subject = captures
.get(1)
.map(|object| Subject::IRI(object.as_str()))
.or_else(|| captures.get(2).map(|blank| Subject::Blank(blank.as_str())))
.expect("failed to parse subject");
let predicate = captures.get(3).expect("failed to parse predicate").as_str();
let object = captures
.get(4)
.map(|object| Object::IRI(object.as_str()))
.or_else(|| captures.get(5).map(|blank| Object::Blank(blank.as_str())))
.unwrap_or_else(|| {
let literal = captures.get(6).expect("failed to parse object").as_str();
let extra = captures
.get(7)
.map(|lang| Extra::Lang(lang.as_str()))
.or_else(|| {
captures
.get(8)
.map(|data_type| Extra::Type(data_type.as_str()))
})
.unwrap_or(Extra::None);
Object::Literal(literal, extra)
});
Statement {
subject,
predicate,
object,
}
}
lazy_static_include_str! {
PROPERTIES_DATA => "properties",
IDENTIFIER_PROPERTIES_DATA => "identifier-properties",
LANGUAGES_DATA => "languages",
LABELS_DATA => "labels",
}
lazy_static! {
static ref PROPERTIES: HashSet<&'static str> = line_set(&PROPERTIES_DATA);
}
lazy_static! {
static ref IDENTIFIER_PROPERTIES: HashSet<String> = line_set(&IDENTIFIER_PROPERTIES_DATA)
.iter()
.flat_map(|id| vec![
format!("http://www.wikidata.org/prop/direct/P{}", id),
format!("http://www.wikidata.org/prop/direct-normalized/P{}", id)
])
.collect();
}
lazy_static! {
static ref LANGUAGES: HashSet<&'static str> = line_set(&LANGUAGES_DATA);
}
lazy_static! {
static ref LABELS: HashSet<&'static str> = line_set(&LABELS_DATA);
}
fn line_set(data: &str) -> HashSet<&str> {
data.lines().collect()
}
fn ignored_subject(iri: &str) -> bool {
iri.starts_with("https://www.wikidata.org/wiki/Special:EntityData")
}
fn produce<T: Read>(
running: Arc<AtomicBool>,
skip: u64,
reader: T,
s: &Sender<Work>,
) -> (bool, u64) {
let mut total = 0;
let mut buf_reader = BufReader::new(reader);
let mut lines = Vec::new();
if skip > 0 {
eprintln!("# skipping {}", skip)
}
loop {
if!running.load(Ordering::SeqCst) {
eprintln!("# interrupted after {}", total);
return (false, total);
}
let mut line = String::new();
if buf_reader.read_line(&mut line).unwrap() == 0 {
break;
}
total += 1;
let skipped = total < skip;
if!skipped {
lines.push(line);
if total % BATCH_SIZE == 0 {
s.send(Work::LINES(total, lines)).unwrap();
lines = Vec::new();
}
}
if total % PROGRESS_COUNT == 0 {
let status = if skipped { "skipped" } else { "" };
eprintln!("# {} {}", status, total);
}
}
if!lines.is_empty() {
s.send(Work::LINES(total, lines)).unwrap();
}
(true, total)
}
fn consume(
name: String,
work_receiver: Receiver<Work>,
result_sender: Sender<WorkResult>,
labels: bool,
statement_counts: bool,
) {
let regex = RE.clone();
let lines_path = format!("{}.nt.bz2", name);
let lines_file = File::create(&lines_path)
.unwrap_or_else(|_| panic!("unable to create file: {}", &lines_path));
let mut lines_encoder = BzEncoder::new(BufWriter::new(lines_file), Compression::best());
let mut labels_encoder = if labels {
let labels_path = format!("labels_{}.bz2", name);
let labels_file = File::create(&labels_path)
.unwrap_or_else(|_| panic!("unable to create file: {}", &labels_path));
Some(BzEncoder::new(
BufWriter::new(labels_file),
Compression::best(),
))
} else {
None
};
let mut statement_counter = if statement_counts {
Some(HashMap::new())
} else {
None
};
loop {
match work_receiver.recv().unwrap() {
Work::LINES(number, lines) => {
for line in lines {
handle(
&mut lines_encoder,
labels_encoder.as_mut(),
statement_counter.as_mut(),
number,
line,
®ex,
);
}
lines_encoder.flush().unwrap();
if let Some(labels_encoder) = labels_encoder.as_mut() {
labels_encoder.flush().unwrap()
}
}
Work::DONE => {
eprintln!("# stopping thread {}", name);
lines_encoder.try_finish().unwrap();
if let Some(labels_encoder) = labels_encoder.as_mut() {
labels_encoder.try_finish().unwrap()
}
result_sender
.send(WorkResult {
statement_counts: statement_counter,
})
.unwrap();
return;
}
}
}
}
fn handle<T: Write, U: Write>(
lines_writer: &mut T,
labels_writer: Option<&mut U>,
statement_counter: Option<&mut HashMap<String, u64>>,
number: u64,
line: String,
regex: &Regex,
) -> Option<()> {
let statement = parse(number, &line, regex);
maybe_write_line(lines_writer, &line, statement);
let id = entity(statement.subject)?;
maybe_count_statement(statement_counter, id, statement);
maybe_write_label(labels_writer, id, statement);
None
}
fn maybe_write_line<T: Write>(lines_writer: &mut T, line: &str, statement: Statement) {
if!is_acceptable(statement) {
return;
}
lines_writer.write_all(line.as_bytes()).unwrap();
}
fn maybe_write_label<T: Write>(
labels_writer: Option<&mut T>,
id: &str,
statement: Statement,
) -> Option<()> {
let labels_writer = labels_writer?;
let label = label(statement)?;
labels_writer
.write_fmt(format_args!("{} {}\n", id, label))
.unwrap();
None
}
fn maybe_count_statement(
statement_counter: Option<&mut HashMap<String, u64>>,
id: &str,
statement: Statement,
) -> Option<()> {
let statement_counter = statement_counter?;
direct_property(statement.predicate)?;
*statement_counter.entry(id.to_string()).or_insert(0) += 1;
None
}
fn is_acceptable(statement: Statement) -> bool {
if PROPERTIES.contains(statement.predicate)
|| IDENTIFIER_PROPERTIES.contains(statement.predicate)
{
return false;
}
match statement.subject {
Subject::Blank(_) => return false,
Subject::IRI(iri) if ignored_subject(iri) => return false,
_ => (),
}
match statement.object {
Object::Blank(_) => return false,
Object::Literal(_, Extra::Lang(lang)) if!LANGUAGES.contains(lang) => return false,
// non-Earth geo coordinates are not supported by some triple stores
Object::Literal(
literal,
Extra::Type("http://www.opengis.net/ont/geosparql#wktLiteral"),
) if literal.starts_with('<') => return false,
_ => (),
}
true
}
fn label(statement: Statement) -> Option<String> {
if!LABELS.contains(statement.predicate) {
return None;
}
if let Object::Literal(label, Extra::Lang(lang)) = statement.object {
if!LANGUAGES.contains(lang) {
return None;
}
return Some(unescape(label));
}
None
}
static ENTITY_IRI_PREFIX: &str = "http://www.wikidata.org/entity/Q";
fn entity(subject: Subject) -> Option<&str> {
if let Subject::IRI(iri) = subject {
iri.strip_prefix(ENTITY_IRI_PREFIX)
} else {
None
}
}
static DIRECT_PROPERTY_IRI_PREFIX: &str = "http://www.wikidata.org/prop/direct/";
fn direct_property(predicate: &str) -> Option<&str> {
predicate.strip_prefix(DIRECT_PROPERTY_IRI_PREFIX)
}
pub fn unescape(s: &str) -> String {
let mut chars = s.chars().enumerate();
let mut res = String::with_capacity(s.len());
while let Some((idx, c)) = chars.next() {
if c == '\\' {
match chars.next() {
None => {
panic!("invalid escape at {} in {}", idx, s);
}
Some((idx, c2)) => {
res.push(match c2 {
't' => '\t',
'b' => '\u{08}',
'n' => '\n',
'r' => '\r',
'f' => '\u{0C}',
'\\' => '\\',
'u' => match parse_unicode(&mut chars, 4) {
Ok(c3) => c3,
Err(err) => {
panic!("invalid escape {}{} at {} in {}: {}", c, c2, idx, s, err);
} | }
},
_ => {
panic!("invalid escape {}{} at {} in {}", c, c2, idx, s);
}
});
continue;
}
};
}
res.push(c);
}
res
}
fn parse_unicode<I>(chars: &mut I, count: usize) -> Result<char, String>
where
I: Iterator<Item = (usize, char)>,
{
let unicode_seq: String = chars.take(count).map(|(_, c)| c).collect();
u32::from_str_radix(&unicode_seq, 16)
.map_err(|e| format!("could not parse {} as u32 hex: {}", unicode_seq, e))
.and_then(|u| {
std::char::from_u32(u).ok_or_else(|| format!("could not parse {} as a unicode char", u))
})
}
fn main() {
let opts: Opts = Opts::parse();
let labels = opts.labels;
let statement_counts = opts.statement_counts;
let running = Arc::new(AtomicBool::new(true));
let r = running.clone();
ctrlc::set_handler(move || {
if r.load(Ordering::SeqCst) {
exit(1);
}
r.store(false, Ordering::SeqCst);
})
.expect("failed to set Ctrl-C handler");
let start = Instant::now();
let (work_sender, work_receiver) = bounded::<Work>(0);
let (result_sender, result_receiver) = unbounded();
let mut threads = Vec::new();
let thread_count = opts.threads.unwrap_or_else(|| num_cpus::get() * 2);
for id in 1..=thread_count {
let work_receiver = work_receiver.clone();
let result_sender = result_sender.clone();
threads.push(thread::spawn(move || {
consume(
id.to_string(),
work_receiver,
result_sender,
labels,
statement_counts,
)
}));
}
let mut exit_code = 0;
for path in opts.paths {
let file = File::open(&path).expect("can't open file");
let decoder = BzDecoder::new(BufReader::new(file));
eprintln!("# processing {}", path);
let (finished, count) = produce(running.clone(), opts.skip, decoder, &work_sender);
eprintln!("# processed {}: {}", path, count);
if!finished {
exit_code = 1;
break;
}
}
for _ in &threads {
work_sender.send(Work::DONE).unwrap();
}
let mut statement_counter = HashMap::new();
let mut result_count = 0;
for result in result_receiver.iter() {
if let Some(statement_counts) = result.statement_counts {
for (id, count) in statement_counts.iter() {
*statement_counter.entry(id.to_string()).or_insert(0) += count;
}
}
result_count += 1;
if result_count == thread_count {
break;
}
}
if statement_counts {
eprintln!("# entities: {}", statement_counter.len());
let path = "statement_counts.bz2";
let file = File::create(path).unwrap_or_else(|_| panic!("unable to create file: {}", path));
let mut encoder = BzEncoder::new(BufWriter::new(file), Compression::best());
for (id, count) in statement_counter.iter() {
encoder
.write_fmt(format_args!("{} {}\n", id, count))
.unwrap();
}
encoder.try_finish().unwrap();
}
let duration = start.elapsed();
eprintln!("# took {:?}", duration);
exit(exit_code);
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use std::fs::read_to_string;
use std::io::{self, Lines};
use std::path::{Path, PathBuf};
#[test]
fn test_literal_with_type() {
let line = r#"<http://www.wikidata.org/entity/Q1644> <http://www.wikidata.org/prop/direct/P2043> "+1094.26"^^<http://www.w3.org/2001/XMLSchema#decimal>."#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("http://www.wikidata.org/entity/Q1644"),
predicate: "http://www.wikidata.org/prop/direct/P2043",
object: Object::Literal(
"+1094.26",
Extra::Type("http://www.w3.org/2001/XMLSchema#decimal")
)
}
);
}
#[test]
fn test_literal_with_lang() {
let line = r#"<http://www.wikidata.org/entity/Q177> <http://schema.org/name> "pizza"@en."#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("http://www.wikidata.org/entity/Q177"),
predicate: "http://schema.org/name",
object: Object::Literal("pizza", Extra::Lang("en"))
}
);
}
#[test]
fn test_literal() {
let line = r#"<http://www.wikidata.org/entity/Q177> <http://www.wikidata.org/prop/direct/P373> "Pizzas"."#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("http://www.wikidata.org/entity/Q177"),
predicate: "http://www.wikidata.org/prop/direct/P373",
object: Object::Literal("Pizzas", Extra::None)
}
);
}
#[test]
fn test_blank_subject() {
let line = r#"_:foo <bar> <baz>"#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::Blank("foo"),
predicate: "bar",
object: Object::IRI("baz")
}
);
}
#[test]
fn test_blank_object() {
let line = r#"<foo> <bar> _:baz"#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("foo"),
predicate: "bar",
object: Object::Blank("baz")
}
);
}
#[test]
fn test_statement_count() {
let a = format!("{}a", ENTITY_IRI_PREFIX);
let b = format!("{}b", ENTITY_IRI_PREFIX);
let first_predicate = format!("{}first", | },
'U' => match parse_unicode(&mut chars, 8) {
Ok(c3) => c3,
Err(err) => {
panic!("invalid escape {}{} at {} in {}: {}", c, c2, idx, s, err); | random_line_split |
main.rs | static_include_str! {
PROPERTIES_DATA => "properties",
IDENTIFIER_PROPERTIES_DATA => "identifier-properties",
LANGUAGES_DATA => "languages",
LABELS_DATA => "labels",
}
lazy_static! {
static ref PROPERTIES: HashSet<&'static str> = line_set(&PROPERTIES_DATA);
}
lazy_static! {
static ref IDENTIFIER_PROPERTIES: HashSet<String> = line_set(&IDENTIFIER_PROPERTIES_DATA)
.iter()
.flat_map(|id| vec![
format!("http://www.wikidata.org/prop/direct/P{}", id),
format!("http://www.wikidata.org/prop/direct-normalized/P{}", id)
])
.collect();
}
lazy_static! {
static ref LANGUAGES: HashSet<&'static str> = line_set(&LANGUAGES_DATA);
}
lazy_static! {
static ref LABELS: HashSet<&'static str> = line_set(&LABELS_DATA);
}
fn line_set(data: &str) -> HashSet<&str> {
data.lines().collect()
}
fn ignored_subject(iri: &str) -> bool {
iri.starts_with("https://www.wikidata.org/wiki/Special:EntityData")
}
fn produce<T: Read>(
running: Arc<AtomicBool>,
skip: u64,
reader: T,
s: &Sender<Work>,
) -> (bool, u64) {
let mut total = 0;
let mut buf_reader = BufReader::new(reader);
let mut lines = Vec::new();
if skip > 0 {
eprintln!("# skipping {}", skip)
}
loop {
if!running.load(Ordering::SeqCst) {
eprintln!("# interrupted after {}", total);
return (false, total);
}
let mut line = String::new();
if buf_reader.read_line(&mut line).unwrap() == 0 {
break;
}
total += 1;
let skipped = total < skip;
if!skipped {
lines.push(line);
if total % BATCH_SIZE == 0 {
s.send(Work::LINES(total, lines)).unwrap();
lines = Vec::new();
}
}
if total % PROGRESS_COUNT == 0 {
let status = if skipped { "skipped" } else { "" };
eprintln!("# {} {}", status, total);
}
}
if!lines.is_empty() {
s.send(Work::LINES(total, lines)).unwrap();
}
(true, total)
}
fn consume(
name: String,
work_receiver: Receiver<Work>,
result_sender: Sender<WorkResult>,
labels: bool,
statement_counts: bool,
) {
let regex = RE.clone();
let lines_path = format!("{}.nt.bz2", name);
let lines_file = File::create(&lines_path)
.unwrap_or_else(|_| panic!("unable to create file: {}", &lines_path));
let mut lines_encoder = BzEncoder::new(BufWriter::new(lines_file), Compression::best());
let mut labels_encoder = if labels {
let labels_path = format!("labels_{}.bz2", name);
let labels_file = File::create(&labels_path)
.unwrap_or_else(|_| panic!("unable to create file: {}", &labels_path));
Some(BzEncoder::new(
BufWriter::new(labels_file),
Compression::best(),
))
} else {
None
};
let mut statement_counter = if statement_counts {
Some(HashMap::new())
} else {
None
};
loop {
match work_receiver.recv().unwrap() {
Work::LINES(number, lines) => {
for line in lines {
handle(
&mut lines_encoder,
labels_encoder.as_mut(),
statement_counter.as_mut(),
number,
line,
®ex,
);
}
lines_encoder.flush().unwrap();
if let Some(labels_encoder) = labels_encoder.as_mut() {
labels_encoder.flush().unwrap()
}
}
Work::DONE => {
eprintln!("# stopping thread {}", name);
lines_encoder.try_finish().unwrap();
if let Some(labels_encoder) = labels_encoder.as_mut() {
labels_encoder.try_finish().unwrap()
}
result_sender
.send(WorkResult {
statement_counts: statement_counter,
})
.unwrap();
return;
}
}
}
}
fn handle<T: Write, U: Write>(
lines_writer: &mut T,
labels_writer: Option<&mut U>,
statement_counter: Option<&mut HashMap<String, u64>>,
number: u64,
line: String,
regex: &Regex,
) -> Option<()> {
let statement = parse(number, &line, regex);
maybe_write_line(lines_writer, &line, statement);
let id = entity(statement.subject)?;
maybe_count_statement(statement_counter, id, statement);
maybe_write_label(labels_writer, id, statement);
None
}
fn maybe_write_line<T: Write>(lines_writer: &mut T, line: &str, statement: Statement) {
if!is_acceptable(statement) {
return;
}
lines_writer.write_all(line.as_bytes()).unwrap();
}
fn maybe_write_label<T: Write>(
labels_writer: Option<&mut T>,
id: &str,
statement: Statement,
) -> Option<()> {
let labels_writer = labels_writer?;
let label = label(statement)?;
labels_writer
.write_fmt(format_args!("{} {}\n", id, label))
.unwrap();
None
}
fn maybe_count_statement(
statement_counter: Option<&mut HashMap<String, u64>>,
id: &str,
statement: Statement,
) -> Option<()> {
let statement_counter = statement_counter?;
direct_property(statement.predicate)?;
*statement_counter.entry(id.to_string()).or_insert(0) += 1;
None
}
fn is_acceptable(statement: Statement) -> bool {
if PROPERTIES.contains(statement.predicate)
|| IDENTIFIER_PROPERTIES.contains(statement.predicate)
{
return false;
}
match statement.subject {
Subject::Blank(_) => return false,
Subject::IRI(iri) if ignored_subject(iri) => return false,
_ => (),
}
match statement.object {
Object::Blank(_) => return false,
Object::Literal(_, Extra::Lang(lang)) if!LANGUAGES.contains(lang) => return false,
// non-Earth geo coordinates are not supported by some triple stores
Object::Literal(
literal,
Extra::Type("http://www.opengis.net/ont/geosparql#wktLiteral"),
) if literal.starts_with('<') => return false,
_ => (),
}
true
}
fn label(statement: Statement) -> Option<String> {
if!LABELS.contains(statement.predicate) {
return None;
}
if let Object::Literal(label, Extra::Lang(lang)) = statement.object {
if!LANGUAGES.contains(lang) {
return None;
}
return Some(unescape(label));
}
None
}
static ENTITY_IRI_PREFIX: &str = "http://www.wikidata.org/entity/Q";
fn entity(subject: Subject) -> Option<&str> {
if let Subject::IRI(iri) = subject {
iri.strip_prefix(ENTITY_IRI_PREFIX)
} else {
None
}
}
static DIRECT_PROPERTY_IRI_PREFIX: &str = "http://www.wikidata.org/prop/direct/";
fn direct_property(predicate: &str) -> Option<&str> {
predicate.strip_prefix(DIRECT_PROPERTY_IRI_PREFIX)
}
pub fn unescape(s: &str) -> String {
let mut chars = s.chars().enumerate();
let mut res = String::with_capacity(s.len());
while let Some((idx, c)) = chars.next() {
if c == '\\' {
match chars.next() {
None => {
panic!("invalid escape at {} in {}", idx, s);
}
Some((idx, c2)) => {
res.push(match c2 {
't' => '\t',
'b' => '\u{08}',
'n' => '\n',
'r' => '\r',
'f' => '\u{0C}',
'\\' => '\\',
'u' => match parse_unicode(&mut chars, 4) {
Ok(c3) => c3,
Err(err) => {
panic!("invalid escape {}{} at {} in {}: {}", c, c2, idx, s, err);
}
},
'U' => match parse_unicode(&mut chars, 8) {
Ok(c3) => c3,
Err(err) => {
panic!("invalid escape {}{} at {} in {}: {}", c, c2, idx, s, err);
}
},
_ => {
panic!("invalid escape {}{} at {} in {}", c, c2, idx, s);
}
});
continue;
}
};
}
res.push(c);
}
res
}
fn parse_unicode<I>(chars: &mut I, count: usize) -> Result<char, String>
where
I: Iterator<Item = (usize, char)>,
{
let unicode_seq: String = chars.take(count).map(|(_, c)| c).collect();
u32::from_str_radix(&unicode_seq, 16)
.map_err(|e| format!("could not parse {} as u32 hex: {}", unicode_seq, e))
.and_then(|u| {
std::char::from_u32(u).ok_or_else(|| format!("could not parse {} as a unicode char", u))
})
}
fn main() {
let opts: Opts = Opts::parse();
let labels = opts.labels;
let statement_counts = opts.statement_counts;
let running = Arc::new(AtomicBool::new(true));
let r = running.clone();
ctrlc::set_handler(move || {
if r.load(Ordering::SeqCst) {
exit(1);
}
r.store(false, Ordering::SeqCst);
})
.expect("failed to set Ctrl-C handler");
let start = Instant::now();
let (work_sender, work_receiver) = bounded::<Work>(0);
let (result_sender, result_receiver) = unbounded();
let mut threads = Vec::new();
let thread_count = opts.threads.unwrap_or_else(|| num_cpus::get() * 2);
for id in 1..=thread_count {
let work_receiver = work_receiver.clone();
let result_sender = result_sender.clone();
threads.push(thread::spawn(move || {
consume(
id.to_string(),
work_receiver,
result_sender,
labels,
statement_counts,
)
}));
}
let mut exit_code = 0;
for path in opts.paths {
let file = File::open(&path).expect("can't open file");
let decoder = BzDecoder::new(BufReader::new(file));
eprintln!("# processing {}", path);
let (finished, count) = produce(running.clone(), opts.skip, decoder, &work_sender);
eprintln!("# processed {}: {}", path, count);
if!finished {
exit_code = 1;
break;
}
}
for _ in &threads {
work_sender.send(Work::DONE).unwrap();
}
let mut statement_counter = HashMap::new();
let mut result_count = 0;
for result in result_receiver.iter() {
if let Some(statement_counts) = result.statement_counts {
for (id, count) in statement_counts.iter() {
*statement_counter.entry(id.to_string()).or_insert(0) += count;
}
}
result_count += 1;
if result_count == thread_count {
break;
}
}
if statement_counts {
eprintln!("# entities: {}", statement_counter.len());
let path = "statement_counts.bz2";
let file = File::create(path).unwrap_or_else(|_| panic!("unable to create file: {}", path));
let mut encoder = BzEncoder::new(BufWriter::new(file), Compression::best());
for (id, count) in statement_counter.iter() {
encoder
.write_fmt(format_args!("{} {}\n", id, count))
.unwrap();
}
encoder.try_finish().unwrap();
}
let duration = start.elapsed();
eprintln!("# took {:?}", duration);
exit(exit_code);
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
use std::fs::read_to_string;
use std::io::{self, Lines};
use std::path::{Path, PathBuf};
#[test]
fn test_literal_with_type() {
let line = r#"<http://www.wikidata.org/entity/Q1644> <http://www.wikidata.org/prop/direct/P2043> "+1094.26"^^<http://www.w3.org/2001/XMLSchema#decimal>."#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("http://www.wikidata.org/entity/Q1644"),
predicate: "http://www.wikidata.org/prop/direct/P2043",
object: Object::Literal(
"+1094.26",
Extra::Type("http://www.w3.org/2001/XMLSchema#decimal")
)
}
);
}
#[test]
fn test_literal_with_lang() {
let line = r#"<http://www.wikidata.org/entity/Q177> <http://schema.org/name> "pizza"@en."#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("http://www.wikidata.org/entity/Q177"),
predicate: "http://schema.org/name",
object: Object::Literal("pizza", Extra::Lang("en"))
}
);
}
#[test]
fn test_literal() {
let line = r#"<http://www.wikidata.org/entity/Q177> <http://www.wikidata.org/prop/direct/P373> "Pizzas"."#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("http://www.wikidata.org/entity/Q177"),
predicate: "http://www.wikidata.org/prop/direct/P373",
object: Object::Literal("Pizzas", Extra::None)
}
);
}
#[test]
fn test_blank_subject() {
let line = r#"_:foo <bar> <baz>"#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::Blank("foo"),
predicate: "bar",
object: Object::IRI("baz")
}
);
}
#[test]
fn test_blank_object() {
let line = r#"<foo> <bar> _:baz"#;
assert_eq!(
parse(1, line, &RE),
Statement {
subject: Subject::IRI("foo"),
predicate: "bar",
object: Object::Blank("baz")
}
);
}
#[test]
fn test_statement_count() {
let a = format!("{}a", ENTITY_IRI_PREFIX);
let b = format!("{}b", ENTITY_IRI_PREFIX);
let first_predicate = format!("{}first", DIRECT_PROPERTY_IRI_PREFIX);
let second_predicate = "second";
let third_predicate = format!("{}third", DIRECT_PROPERTY_IRI_PREFIX);
let first = Statement {
subject: Subject::IRI(a.as_str()),
predicate: first_predicate.as_str(),
object: Object::IRI(""),
};
let second = Statement {
subject: Subject::IRI(b.as_str()),
predicate: second_predicate,
object: Object::IRI(""),
};
let third = Statement {
subject: Subject::IRI(a.as_str()),
predicate: third_predicate.as_str(),
object: Object::IRI(""),
};
let mut counter = HashMap::new();
maybe_count_statement(Some(&mut counter), "a", first);
maybe_count_statement(Some(&mut counter), "b", second);
maybe_count_statement(Some(&mut counter), "a", third);
assert_eq!(counter.len(), 1);
assert_eq!(counter.get("a"), Some(&2));
assert_eq!(counter.get("b"), None);
}
#[test]
fn test_geo_literals() {
assert!(is_acceptable(parse(
1,
r#"<foo> <bar> "Point(4.6681 50.6411)"^^<http://www.opengis.net/ont/geosparql#wktLiteral>."#,
&RE,
)));
assert!(!is_acceptable(parse(
1,
r#"<foo> <bar> "<http://www.wikidata.org/entity/Q405> Point(-141.6 42.6)"^^<http://www.opengis.net/ont/geosparql#wktLiteral>."#,
&RE,
)));
}
fn read_lines<P>(filename: P) -> io::Result<Lines<BufReader<File>>>
where
P: AsRef<Path>,
{
let file = File::open(filename)?;
Ok(BufReader::new(file).lines())
}
#[test]
fn test_full() -> Result<(), ()> | {
let dir = env!("CARGO_MANIFEST_DIR");
let mut in_path = PathBuf::from(dir);
in_path.push("test.in.rdf");
let in_path = in_path.as_os_str().to_str().unwrap();
let mut out_path = PathBuf::from(dir);
out_path.push("test.out.rdf");
let out_path = out_path.as_os_str().to_str().unwrap();
let mut lines_writer = Vec::new();
let mut labels_writer = Vec::new();
for (line, number) in read_lines(in_path).unwrap().zip(1u64..) {
let mut line = line.unwrap();
line.push('\n');
handle(
&mut lines_writer,
Some(&mut labels_writer), | identifier_body |
|
writer.rs |
call_locations: IndexSet<raw::SourceLocation>,
/// A map from code ranges to the [`raw::SourceLocation`]s they correspond to.
///
/// Only the starting address of a range is saved, the end address is given implicitly
/// by the start address of the next range.
ranges: BTreeMap<u32, raw::SourceLocation>,
/// This is highest addr that we know is outside of a valid function.
/// Functions have an explicit end, while Symbols implicitly extend to infinity.
/// In case the highest addr belongs to a Symbol, this will be `None` and the SymCache
/// also extends to infinite, otherwise this is the end of the highest function.
last_addr: Option<u32>,
}
impl<'a> SymCacheConverter<'a> {
/// Creates a new Converter.
pub fn new() -> Self {
Self::default()
}
/// Adds a new [`transform::Transformer`] to this [`SymCacheConverter`].
///
/// Every [`transform::Function`] and [`transform::SourceLocation`] will be passed through
/// this transformer before it is written to the SymCache.
pub fn add_transformer<T>(&mut self, t: T)
where
T: transform::Transformer + 'a,
{
self.transformers.0.push(Box::new(t));
}
/// Sets the CPU architecture of this SymCache.
pub fn | (&mut self, arch: Arch) {
self.arch = arch;
}
/// Sets the debug identifier of this SymCache.
pub fn set_debug_id(&mut self, debug_id: DebugId) {
self.debug_id = debug_id;
}
// Methods processing symbolic-debuginfo [`ObjectLike`] below:
// Feel free to move these to a separate file.
/// This processes the given [`ObjectLike`] object, collecting all its functions and line
/// information into the converter.
#[tracing::instrument(skip_all, fields(object.debug_id = %object.debug_id().breakpad()))]
pub fn process_object<'d, 'o, O>(&mut self, object: &'o O) -> Result<(), Error>
where
O: ObjectLike<'d, 'o>,
O::Error: std::error::Error + Send + Sync +'static,
{
let session = object
.debug_session()
.map_err(|e| Error::new(ErrorKind::BadDebugFile, e))?;
self.set_arch(object.arch());
self.set_debug_id(object.debug_id());
self.is_windows_object = matches!(object.file_format(), FileFormat::Pe | FileFormat::Pdb);
for function in session.functions() {
let function = function.map_err(|e| Error::new(ErrorKind::BadDebugFile, e))?;
self.process_symbolic_function(&function);
}
for symbol in object.symbols() {
self.process_symbolic_symbol(&symbol);
}
self.is_windows_object = false;
Ok(())
}
/// Processes an individual [`Function`], adding its line information to the converter.
pub fn process_symbolic_function(&mut self, function: &Function<'_>) {
self.process_symbolic_function_recursive(function, &[(0x0, u32::MAX)]);
}
/// Processes an individual [`Function`], adding its line information to the converter.
///
/// `call_locations` is a non-empty sorted list of `(address, call_location index)` pairs.
fn process_symbolic_function_recursive(
&mut self,
function: &Function<'_>,
call_locations: &[(u32, u32)],
) {
let string_table = &mut self.string_table;
// skip over empty functions or functions whose address is too large to fit in a u32
if function.size == 0 || function.address > u32::MAX as u64 {
return;
}
let comp_dir = std::str::from_utf8(function.compilation_dir).ok();
let entry_pc = if function.inline {
u32::MAX
} else {
function.address as u32
};
let function_idx = {
let language = function.name.language();
let mut function = transform::Function {
name: function.name.as_str().into(),
comp_dir: comp_dir.map(Into::into),
};
for transformer in &mut self.transformers.0 {
function = transformer.transform_function(function);
}
let function_name = if self.is_windows_object {
undecorate_win_symbol(&function.name)
} else {
&function.name
};
let name_offset = string_table.insert(function_name) as u32;
let lang = language as u32;
let (fun_idx, _) = self.functions.insert_full(raw::Function {
name_offset,
_comp_dir_offset: u32::MAX,
entry_pc,
lang,
});
fun_idx as u32
};
// We can divide the instructions in a function into two buckets:
// (1) Instructions which are part of an inlined function call, and
// (2) instructions which are *not* part of an inlined function call.
//
// Our incoming line records cover both (1) and (2) types of instructions.
//
// Let's call the address ranges of these instructions (1) inlinee ranges and (2) self ranges.
//
// We use the following strategy: For each function, only insert that function's "self ranges"
// into `self.ranges`. Then recurse into the function's inlinees. Those will insert their
// own "self ranges". Once the entire tree has been traversed, `self.ranges` will contain
// entries from all levels.
//
// In order to compute this function's "self ranges", we first gather and sort its
// "inlinee ranges". Later, when we iterate over this function's lines, we will compute the
// "self ranges" from the gaps between the "inlinee ranges".
let mut inlinee_ranges = Vec::new();
for inlinee in &function.inlinees {
for line in &inlinee.lines {
let start = line.address as u32;
let end = (line.address + line.size.unwrap_or(1)) as u32;
inlinee_ranges.push(start..end);
}
}
inlinee_ranges.sort_unstable_by_key(|range| range.start);
// Walk three iterators. All of these are already sorted by address.
let mut line_iter = function.lines.iter();
let mut call_location_iter = call_locations.iter();
let mut inline_iter = inlinee_ranges.into_iter();
// call_locations is non-empty, so the first element always exists.
let mut current_call_location = call_location_iter.next().unwrap();
let mut next_call_location = call_location_iter.next();
let mut next_line = line_iter.next();
let mut next_inline = inline_iter.next();
// This will be the list we pass to our inlinees as the call_locations argument.
// This list is ordered by address by construction.
let mut callee_call_locations = Vec::new();
// Iterate over the line records.
while let Some(line) = next_line.take() {
let line_range_start = line.address as u32;
let line_range_end = (line.address + line.size.unwrap_or(1)) as u32;
// Find the call location for this line.
while next_call_location.is_some() && next_call_location.unwrap().0 <= line_range_start
{
current_call_location = next_call_location.unwrap();
next_call_location = call_location_iter.next();
}
let inlined_into_idx = current_call_location.1;
let mut location = transform::SourceLocation {
file: transform::File {
name: line.file.name_str(),
directory: Some(line.file.dir_str()),
comp_dir: comp_dir.map(Into::into),
},
line: line.line as u32,
};
for transformer in &mut self.transformers.0 {
location = transformer.transform_source_location(location);
}
let name_offset = string_table.insert(&location.file.name) as u32;
let directory_offset = location
.file
.directory
.map_or(u32::MAX, |d| string_table.insert(&d) as u32);
let comp_dir_offset = location
.file
.comp_dir
.map_or(u32::MAX, |cd| string_table.insert(&cd) as u32);
let (file_idx, _) = self.files.insert_full(raw::File {
name_offset,
directory_offset,
comp_dir_offset,
});
let source_location = raw::SourceLocation {
file_idx: file_idx as u32,
line: location.line,
function_idx,
inlined_into_idx,
};
// The current line can be a "self line", or a "call line", or even a mixture.
//
// Examples:
//
// a) Just self line:
// Line: |==============|
// Inlinee ranges: (none)
//
// Effect: insert_range
//
// b) Just call line:
// Line: |==============|
// Inlinee ranges: |--------------|
//
// Effect: make_call_location
//
// c) Just call line, for multiple inlined calls:
// Line: |==========================|
// Inlinee ranges: |----------||--------------|
//
// Effect: make_call_location, make_call_location
//
// d) Call line and trailing self line:
// Line: |==================|
// Inlinee ranges: |-----------|
//
// Effect: make_call_location, insert_range
//
// e) Leading self line and also call line:
// Line: |==================|
// Inlinee ranges: |-----------|
//
// Effect: insert_range, make_call_location
//
// f) Interleaving
// Line: |======================================|
// Inlinee ranges: |-----------| |-------|
//
// Effect: insert_range, make_call_location, insert_range, make_call_location, insert_range
//
// g) Bad debug info
// Line: |=======|
// Inlinee ranges: |-------------|
//
// Effect: make_call_location
let mut current_address = line_range_start;
while current_address < line_range_end {
// Emit our source location at current_address if current_address is not covered by an inlinee.
if next_inline.is_none() || next_inline.as_ref().unwrap().start > current_address {
// "insert_range"
self.ranges.insert(current_address, source_location.clone());
}
// If there is an inlinee range covered by this line record, turn this line into that
// call's "call line". Make a `call_location_idx` for it and store it in `callee_call_locations`.
if next_inline.is_some() && next_inline.as_ref().unwrap().start < line_range_end {
let inline_range = next_inline.take().unwrap();
// "make_call_location"
let (call_location_idx, _) =
self.call_locations.insert_full(source_location.clone());
callee_call_locations.push((inline_range.start, call_location_idx as u32));
// Advance current_address to the end of this inlinee range.
current_address = inline_range.end;
next_inline = inline_iter.next();
} else {
// No further inlinee ranges are overlapping with this line record. Advance to the
// end of the line record.
current_address = line_range_end;
}
}
// Advance the line iterator.
next_line = line_iter.next();
// Skip any lines that start before current_address.
// Such lines can exist if the debug information is faulty, or if the compiler created
// multiple identical small "call line" records instead of one combined record
// covering the entire inlinee range. We can't have different "call lines" for a single
// inlinee range anyway, so it's fine to skip these.
while next_line.is_some()
&& (next_line.as_ref().unwrap().address as u32) < current_address
{
next_line = line_iter.next();
}
}
if!function.inline {
// add the bare minimum of information for the function if there isn't any.
self.ranges.entry(entry_pc).or_insert(raw::SourceLocation {
file_idx: u32::MAX,
line: 0,
function_idx,
inlined_into_idx: u32::MAX,
});
}
// We've processed all address ranges which are *not* covered by inlinees.
// Now it's time to recurse.
// Process our inlinees.
if!callee_call_locations.is_empty() {
for inlinee in &function.inlinees {
self.process_symbolic_function_recursive(inlinee, &callee_call_locations);
}
}
let function_end = function.end_address() as u32;
let last_addr = self.last_addr.get_or_insert(0);
if function_end > *last_addr {
*last_addr = function_end;
}
}
/// Processes an individual [`Symbol`].
pub fn process_symbolic_symbol(&mut self, symbol: &Symbol<'_>) {
let name_idx = {
let mut function = transform::Function {
name: match symbol.name {
Some(ref name) => name.clone(),
None => return,
},
comp_dir: None,
};
for transformer in &mut self.transformers.0 {
function = transformer.transform_function(function);
}
let function_name = if self.is_windows_object {
undecorate_win_symbol(&function.name)
} else {
&function.name
};
self.string_table.insert(function_name) as u32
};
match self.ranges.entry(symbol.address as u32) {
btree_map::Entry::Vacant(entry) => {
let function = raw::Function {
name_offset: name_idx,
_comp_dir_offset: u32::MAX,
entry_pc: symbol.address as u32,
lang: u32::MAX,
};
let function_idx = self.functions.insert_full(function).0 as u32;
entry.insert(raw::SourceLocation {
file_idx: u32::MAX,
line: 0,
function_idx,
inlined_into_idx: u32::MAX,
});
}
btree_map::Entry::Occupied(entry) => {
// ASSUMPTION:
// the `functions` iterator has already filled in this addr via debug session.
// we could trace the caller hierarchy up to the root, and assert that it is
// indeed the same function, and maybe update its `entry_pc`, but we don’t do
// that for now.
let _function_idx = entry.get().function_idx as usize;
}
}
let last_addr = self.last_addr.get_or_insert(0);
if symbol.address as u32 >= *last_addr {
self.last_addr = None;
}
}
// Methods for serializing to a [`Write`] below:
// Feel free to move these to a separate file.
/// Serialize the converted data.
///
/// This writes the SymCache binary format into the given [`Write`].
pub fn serialize<W: Write>(mut self, writer: &mut W) -> std::io::Result<()> {
let mut writer = Writer::new(writer);
// Insert a trailing sentinel source location in case we have a definite end addr
if let Some(last_addr) = self.last_addr {
// TODO: to be extra safe, we might check that `last_addr` is indeed larger than
// the largest range at some point.
match self.ranges.entry(last_addr) {
btree_map::Entry::Vacant(entry) => {
entry.insert(raw::NO_SOURCE_LOCATION);
}
btree_map::Entry::Occupied(_entry) => {
// BUG:
// the last addr should not map to an already defined range
}
}
}
let num_files = self.files.len() as u32;
let num_functions = self.functions.len() as u32;
let num_source_locations = (self.call_locations.len() + self.ranges.len()) as u32;
let num_ranges = self.ranges.len() as u32;
let string_bytes = self.string_table.into_bytes();
let header = raw::Header {
magic: raw::SYMCACHE_MAGIC,
version: crate::SYMCACHE_VERSION,
debug_id: self.debug_id,
arch: self.arch,
num_files,
num_functions,
num_source_locations,
num_ranges,
string_bytes: string_bytes.len() as u32,
_reserved: [0; 16],
};
writer.write_all(header.as_bytes())?;
writer.align_to(8)?;
for f in | set_arch | identifier_name |
writer.rs |
call_locations: IndexSet<raw::SourceLocation>,
/// A map from code ranges to the [`raw::SourceLocation`]s they correspond to.
///
/// Only the starting address of a range is saved, the end address is given implicitly
/// by the start address of the next range.
ranges: BTreeMap<u32, raw::SourceLocation>,
/// This is highest addr that we know is outside of a valid function.
/// Functions have an explicit end, while Symbols implicitly extend to infinity.
/// In case the highest addr belongs to a Symbol, this will be `None` and the SymCache
/// also extends to infinite, otherwise this is the end of the highest function.
last_addr: Option<u32>,
}
impl<'a> SymCacheConverter<'a> {
/// Creates a new Converter.
pub fn new() -> Self {
Self::default()
}
/// Adds a new [`transform::Transformer`] to this [`SymCacheConverter`].
///
/// Every [`transform::Function`] and [`transform::SourceLocation`] will be passed through
/// this transformer before it is written to the SymCache.
pub fn add_transformer<T>(&mut self, t: T)
where
T: transform::Transformer + 'a,
{
self.transformers.0.push(Box::new(t));
}
/// Sets the CPU architecture of this SymCache.
pub fn set_arch(&mut self, arch: Arch) {
self.arch = arch;
}
/// Sets the debug identifier of this SymCache.
pub fn set_debug_id(&mut self, debug_id: DebugId) {
self.debug_id = debug_id;
}
// Methods processing symbolic-debuginfo [`ObjectLike`] below:
// Feel free to move these to a separate file.
/// This processes the given [`ObjectLike`] object, collecting all its functions and line
/// information into the converter.
#[tracing::instrument(skip_all, fields(object.debug_id = %object.debug_id().breakpad()))]
pub fn process_object<'d, 'o, O>(&mut self, object: &'o O) -> Result<(), Error>
where
O: ObjectLike<'d, 'o>,
O::Error: std::error::Error + Send + Sync +'static,
{
let session = object
.debug_session()
.map_err(|e| Error::new(ErrorKind::BadDebugFile, e))?;
self.set_arch(object.arch());
self.set_debug_id(object.debug_id());
self.is_windows_object = matches!(object.file_format(), FileFormat::Pe | FileFormat::Pdb);
for function in session.functions() {
let function = function.map_err(|e| Error::new(ErrorKind::BadDebugFile, e))?;
self.process_symbolic_function(&function);
}
for symbol in object.symbols() {
self.process_symbolic_symbol(&symbol);
}
self.is_windows_object = false;
Ok(())
}
/// Processes an individual [`Function`], adding its line information to the converter.
pub fn process_symbolic_function(&mut self, function: &Function<'_>) {
self.process_symbolic_function_recursive(function, &[(0x0, u32::MAX)]);
}
/// Processes an individual [`Function`], adding its line information to the converter.
///
/// `call_locations` is a non-empty sorted list of `(address, call_location index)` pairs.
fn process_symbolic_function_recursive(
&mut self,
function: &Function<'_>,
call_locations: &[(u32, u32)],
) {
let string_table = &mut self.string_table;
// skip over empty functions or functions whose address is too large to fit in a u32
if function.size == 0 || function.address > u32::MAX as u64 {
return;
}
let comp_dir = std::str::from_utf8(function.compilation_dir).ok();
let entry_pc = if function.inline {
u32::MAX
} else {
function.address as u32
};
let function_idx = {
let language = function.name.language();
let mut function = transform::Function {
name: function.name.as_str().into(),
comp_dir: comp_dir.map(Into::into),
};
for transformer in &mut self.transformers.0 {
function = transformer.transform_function(function);
}
let function_name = if self.is_windows_object {
undecorate_win_symbol(&function.name)
} else {
&function.name
};
let name_offset = string_table.insert(function_name) as u32;
let lang = language as u32;
let (fun_idx, _) = self.functions.insert_full(raw::Function {
name_offset,
_comp_dir_offset: u32::MAX,
entry_pc,
lang,
});
fun_idx as u32
};
// We can divide the instructions in a function into two buckets:
// (1) Instructions which are part of an inlined function call, and
// (2) instructions which are *not* part of an inlined function call.
//
// Our incoming line records cover both (1) and (2) types of instructions.
//
// Let's call the address ranges of these instructions (1) inlinee ranges and (2) self ranges.
//
// We use the following strategy: For each function, only insert that function's "self ranges"
// into `self.ranges`. Then recurse into the function's inlinees. Those will insert their
// own "self ranges". Once the entire tree has been traversed, `self.ranges` will contain
// entries from all levels.
//
// In order to compute this function's "self ranges", we first gather and sort its
// "inlinee ranges". Later, when we iterate over this function's lines, we will compute the
// "self ranges" from the gaps between the "inlinee ranges".
let mut inlinee_ranges = Vec::new();
for inlinee in &function.inlinees {
for line in &inlinee.lines {
let start = line.address as u32;
let end = (line.address + line.size.unwrap_or(1)) as u32;
inlinee_ranges.push(start..end);
}
}
inlinee_ranges.sort_unstable_by_key(|range| range.start);
// Walk three iterators. All of these are already sorted by address.
let mut line_iter = function.lines.iter();
let mut call_location_iter = call_locations.iter();
let mut inline_iter = inlinee_ranges.into_iter();
// call_locations is non-empty, so the first element always exists.
let mut current_call_location = call_location_iter.next().unwrap();
let mut next_call_location = call_location_iter.next();
let mut next_line = line_iter.next();
let mut next_inline = inline_iter.next();
// This will be the list we pass to our inlinees as the call_locations argument.
// This list is ordered by address by construction.
let mut callee_call_locations = Vec::new();
// Iterate over the line records.
while let Some(line) = next_line.take() {
let line_range_start = line.address as u32;
let line_range_end = (line.address + line.size.unwrap_or(1)) as u32;
// Find the call location for this line.
while next_call_location.is_some() && next_call_location.unwrap().0 <= line_range_start
{
current_call_location = next_call_location.unwrap();
next_call_location = call_location_iter.next();
}
let inlined_into_idx = current_call_location.1;
let mut location = transform::SourceLocation {
file: transform::File {
name: line.file.name_str(),
directory: Some(line.file.dir_str()),
comp_dir: comp_dir.map(Into::into),
},
line: line.line as u32,
};
for transformer in &mut self.transformers.0 {
location = transformer.transform_source_location(location);
}
let name_offset = string_table.insert(&location.file.name) as u32;
let directory_offset = location
.file
.directory
.map_or(u32::MAX, |d| string_table.insert(&d) as u32);
let comp_dir_offset = location
.file
.comp_dir
.map_or(u32::MAX, |cd| string_table.insert(&cd) as u32);
let (file_idx, _) = self.files.insert_full(raw::File {
name_offset,
directory_offset,
comp_dir_offset,
});
let source_location = raw::SourceLocation {
file_idx: file_idx as u32,
line: location.line,
function_idx,
inlined_into_idx,
};
// The current line can be a "self line", or a "call line", or even a mixture.
//
// Examples:
//
// a) Just self line:
// Line: |==============|
// Inlinee ranges: (none)
//
// Effect: insert_range
//
// b) Just call line:
// Line: |==============|
// Inlinee ranges: |--------------|
//
// Effect: make_call_location
//
// c) Just call line, for multiple inlined calls:
// Line: |==========================|
// Inlinee ranges: |----------||--------------|
//
// Effect: make_call_location, make_call_location
//
// d) Call line and trailing self line:
// Line: |==================|
// Inlinee ranges: |-----------|
//
// Effect: make_call_location, insert_range
//
// e) Leading self line and also call line:
// Line: |==================|
// Inlinee ranges: |-----------|
//
// Effect: insert_range, make_call_location
//
// f) Interleaving
// Line: |======================================|
// Inlinee ranges: |-----------| |-------|
//
// Effect: insert_range, make_call_location, insert_range, make_call_location, insert_range
//
// g) Bad debug info
// Line: |=======|
// Inlinee ranges: |-------------|
//
// Effect: make_call_location
let mut current_address = line_range_start;
while current_address < line_range_end {
// Emit our source location at current_address if current_address is not covered by an inlinee.
if next_inline.is_none() || next_inline.as_ref().unwrap().start > current_address {
// "insert_range"
self.ranges.insert(current_address, source_location.clone());
}
// If there is an inlinee range covered by this line record, turn this line into that
// call's "call line". Make a `call_location_idx` for it and store it in `callee_call_locations`.
if next_inline.is_some() && next_inline.as_ref().unwrap().start < line_range_end {
let inline_range = next_inline.take().unwrap();
// "make_call_location"
let (call_location_idx, _) =
self.call_locations.insert_full(source_location.clone());
callee_call_locations.push((inline_range.start, call_location_idx as u32));
// Advance current_address to the end of this inlinee range.
current_address = inline_range.end;
next_inline = inline_iter.next();
} else {
// No further inlinee ranges are overlapping with this line record. Advance to the
// end of the line record.
current_address = line_range_end;
}
}
// Advance the line iterator.
next_line = line_iter.next();
// Skip any lines that start before current_address.
// Such lines can exist if the debug information is faulty, or if the compiler created
// multiple identical small "call line" records instead of one combined record
// covering the entire inlinee range. We can't have different "call lines" for a single
// inlinee range anyway, so it's fine to skip these.
while next_line.is_some()
&& (next_line.as_ref().unwrap().address as u32) < current_address
{
next_line = line_iter.next();
}
}
if!function.inline {
// add the bare minimum of information for the function if there isn't any.
self.ranges.entry(entry_pc).or_insert(raw::SourceLocation {
file_idx: u32::MAX,
line: 0,
function_idx,
inlined_into_idx: u32::MAX,
});
}
// We've processed all address ranges which are *not* covered by inlinees.
// Now it's time to recurse.
// Process our inlinees.
if!callee_call_locations.is_empty() {
for inlinee in &function.inlinees {
self.process_symbolic_function_recursive(inlinee, &callee_call_locations);
}
}
let function_end = function.end_address() as u32;
let last_addr = self.last_addr.get_or_insert(0);
if function_end > *last_addr {
*last_addr = function_end;
}
}
/// Processes an individual [`Symbol`].
pub fn process_symbolic_symbol(&mut self, symbol: &Symbol<'_>) {
let name_idx = {
let mut function = transform::Function {
name: match symbol.name {
Some(ref name) => name.clone(),
None => return,
},
comp_dir: None,
};
for transformer in &mut self.transformers.0 {
function = transformer.transform_function(function);
}
let function_name = if self.is_windows_object {
undecorate_win_symbol(&function.name)
} else {
&function.name
};
self.string_table.insert(function_name) as u32
};
match self.ranges.entry(symbol.address as u32) {
btree_map::Entry::Vacant(entry) => {
let function = raw::Function {
name_offset: name_idx,
_comp_dir_offset: u32::MAX,
entry_pc: symbol.address as u32,
lang: u32::MAX,
};
let function_idx = self.functions.insert_full(function).0 as u32;
entry.insert(raw::SourceLocation {
file_idx: u32::MAX,
line: 0,
function_idx,
inlined_into_idx: u32::MAX,
});
}
btree_map::Entry::Occupied(entry) => {
// ASSUMPTION:
// the `functions` iterator has already filled in this addr via debug session.
// we could trace the caller hierarchy up to the root, and assert that it is
// indeed the same function, and maybe update its `entry_pc`, but we don’t do
// that for now.
let _function_idx = entry.get().function_idx as usize;
}
}
let last_addr = self.last_addr.get_or_insert(0);
if symbol.address as u32 >= *last_addr {
self.last_addr = None;
}
}
// Methods for serializing to a [`Write`] below:
// Feel free to move these to a separate file.
/// Serialize the converted data.
///
/// This writes the SymCache binary format into the given [`Write`].
pub fn serialize<W: Write>(mut self, writer: &mut W) -> std::io::Result<()> {
| let num_ranges = self.ranges.len() as u32;
let string_bytes = self.string_table.into_bytes();
let header = raw::Header {
magic: raw::SYMCACHE_MAGIC,
version: crate::SYMCACHE_VERSION,
debug_id: self.debug_id,
arch: self.arch,
num_files,
num_functions,
num_source_locations,
num_ranges,
string_bytes: string_bytes.len() as u32,
_reserved: [0; 16],
};
writer.write_all(header.as_bytes())?;
writer.align_to(8)?;
for f in self. | let mut writer = Writer::new(writer);
// Insert a trailing sentinel source location in case we have a definite end addr
if let Some(last_addr) = self.last_addr {
// TODO: to be extra safe, we might check that `last_addr` is indeed larger than
// the largest range at some point.
match self.ranges.entry(last_addr) {
btree_map::Entry::Vacant(entry) => {
entry.insert(raw::NO_SOURCE_LOCATION);
}
btree_map::Entry::Occupied(_entry) => {
// BUG:
// the last addr should not map to an already defined range
}
}
}
let num_files = self.files.len() as u32;
let num_functions = self.functions.len() as u32;
let num_source_locations = (self.call_locations.len() + self.ranges.len()) as u32; | identifier_body |
writer.rs | `.
call_locations: IndexSet<raw::SourceLocation>,
/// A map from code ranges to the [`raw::SourceLocation`]s they correspond to.
///
/// Only the starting address of a range is saved, the end address is given implicitly
/// by the start address of the next range.
ranges: BTreeMap<u32, raw::SourceLocation>,
/// This is highest addr that we know is outside of a valid function.
/// Functions have an explicit end, while Symbols implicitly extend to infinity.
/// In case the highest addr belongs to a Symbol, this will be `None` and the SymCache
/// also extends to infinite, otherwise this is the end of the highest function.
last_addr: Option<u32>,
}
impl<'a> SymCacheConverter<'a> {
/// Creates a new Converter.
pub fn new() -> Self {
Self::default()
}
/// Adds a new [`transform::Transformer`] to this [`SymCacheConverter`].
///
/// Every [`transform::Function`] and [`transform::SourceLocation`] will be passed through
/// this transformer before it is written to the SymCache.
pub fn add_transformer<T>(&mut self, t: T)
where
T: transform::Transformer + 'a,
{
self.transformers.0.push(Box::new(t));
}
/// Sets the CPU architecture of this SymCache.
pub fn set_arch(&mut self, arch: Arch) {
self.arch = arch;
}
/// Sets the debug identifier of this SymCache.
pub fn set_debug_id(&mut self, debug_id: DebugId) {
self.debug_id = debug_id;
}
// Methods processing symbolic-debuginfo [`ObjectLike`] below:
// Feel free to move these to a separate file.
/// This processes the given [`ObjectLike`] object, collecting all its functions and line
/// information into the converter.
#[tracing::instrument(skip_all, fields(object.debug_id = %object.debug_id().breakpad()))]
pub fn process_object<'d, 'o, O>(&mut self, object: &'o O) -> Result<(), Error>
where
O: ObjectLike<'d, 'o>,
O::Error: std::error::Error + Send + Sync +'static,
{
let session = object
.debug_session()
.map_err(|e| Error::new(ErrorKind::BadDebugFile, e))?;
self.set_arch(object.arch());
self.set_debug_id(object.debug_id());
self.is_windows_object = matches!(object.file_format(), FileFormat::Pe | FileFormat::Pdb);
for function in session.functions() {
let function = function.map_err(|e| Error::new(ErrorKind::BadDebugFile, e))?;
self.process_symbolic_function(&function);
}
for symbol in object.symbols() {
self.process_symbolic_symbol(&symbol);
}
self.is_windows_object = false;
Ok(())
}
/// Processes an individual [`Function`], adding its line information to the converter.
pub fn process_symbolic_function(&mut self, function: &Function<'_>) {
self.process_symbolic_function_recursive(function, &[(0x0, u32::MAX)]);
}
/// Processes an individual [`Function`], adding its line information to the converter.
///
/// `call_locations` is a non-empty sorted list of `(address, call_location index)` pairs.
fn process_symbolic_function_recursive(
&mut self,
function: &Function<'_>,
call_locations: &[(u32, u32)],
) {
let string_table = &mut self.string_table;
// skip over empty functions or functions whose address is too large to fit in a u32
if function.size == 0 || function.address > u32::MAX as u64 {
return;
}
let comp_dir = std::str::from_utf8(function.compilation_dir).ok();
let entry_pc = if function.inline {
u32::MAX | let language = function.name.language();
let mut function = transform::Function {
name: function.name.as_str().into(),
comp_dir: comp_dir.map(Into::into),
};
for transformer in &mut self.transformers.0 {
function = transformer.transform_function(function);
}
let function_name = if self.is_windows_object {
undecorate_win_symbol(&function.name)
} else {
&function.name
};
let name_offset = string_table.insert(function_name) as u32;
let lang = language as u32;
let (fun_idx, _) = self.functions.insert_full(raw::Function {
name_offset,
_comp_dir_offset: u32::MAX,
entry_pc,
lang,
});
fun_idx as u32
};
// We can divide the instructions in a function into two buckets:
// (1) Instructions which are part of an inlined function call, and
// (2) instructions which are *not* part of an inlined function call.
//
// Our incoming line records cover both (1) and (2) types of instructions.
//
// Let's call the address ranges of these instructions (1) inlinee ranges and (2) self ranges.
//
// We use the following strategy: For each function, only insert that function's "self ranges"
// into `self.ranges`. Then recurse into the function's inlinees. Those will insert their
// own "self ranges". Once the entire tree has been traversed, `self.ranges` will contain
// entries from all levels.
//
// In order to compute this function's "self ranges", we first gather and sort its
// "inlinee ranges". Later, when we iterate over this function's lines, we will compute the
// "self ranges" from the gaps between the "inlinee ranges".
let mut inlinee_ranges = Vec::new();
for inlinee in &function.inlinees {
for line in &inlinee.lines {
let start = line.address as u32;
let end = (line.address + line.size.unwrap_or(1)) as u32;
inlinee_ranges.push(start..end);
}
}
inlinee_ranges.sort_unstable_by_key(|range| range.start);
// Walk three iterators. All of these are already sorted by address.
let mut line_iter = function.lines.iter();
let mut call_location_iter = call_locations.iter();
let mut inline_iter = inlinee_ranges.into_iter();
// call_locations is non-empty, so the first element always exists.
let mut current_call_location = call_location_iter.next().unwrap();
let mut next_call_location = call_location_iter.next();
let mut next_line = line_iter.next();
let mut next_inline = inline_iter.next();
// This will be the list we pass to our inlinees as the call_locations argument.
// This list is ordered by address by construction.
let mut callee_call_locations = Vec::new();
// Iterate over the line records.
while let Some(line) = next_line.take() {
let line_range_start = line.address as u32;
let line_range_end = (line.address + line.size.unwrap_or(1)) as u32;
// Find the call location for this line.
while next_call_location.is_some() && next_call_location.unwrap().0 <= line_range_start
{
current_call_location = next_call_location.unwrap();
next_call_location = call_location_iter.next();
}
let inlined_into_idx = current_call_location.1;
let mut location = transform::SourceLocation {
file: transform::File {
name: line.file.name_str(),
directory: Some(line.file.dir_str()),
comp_dir: comp_dir.map(Into::into),
},
line: line.line as u32,
};
for transformer in &mut self.transformers.0 {
location = transformer.transform_source_location(location);
}
let name_offset = string_table.insert(&location.file.name) as u32;
let directory_offset = location
.file
.directory
.map_or(u32::MAX, |d| string_table.insert(&d) as u32);
let comp_dir_offset = location
.file
.comp_dir
.map_or(u32::MAX, |cd| string_table.insert(&cd) as u32);
let (file_idx, _) = self.files.insert_full(raw::File {
name_offset,
directory_offset,
comp_dir_offset,
});
let source_location = raw::SourceLocation {
file_idx: file_idx as u32,
line: location.line,
function_idx,
inlined_into_idx,
};
// The current line can be a "self line", or a "call line", or even a mixture.
//
// Examples:
//
// a) Just self line:
// Line: |==============|
// Inlinee ranges: (none)
//
// Effect: insert_range
//
// b) Just call line:
// Line: |==============|
// Inlinee ranges: |--------------|
//
// Effect: make_call_location
//
// c) Just call line, for multiple inlined calls:
// Line: |==========================|
// Inlinee ranges: |----------||--------------|
//
// Effect: make_call_location, make_call_location
//
// d) Call line and trailing self line:
// Line: |==================|
// Inlinee ranges: |-----------|
//
// Effect: make_call_location, insert_range
//
// e) Leading self line and also call line:
// Line: |==================|
// Inlinee ranges: |-----------|
//
// Effect: insert_range, make_call_location
//
// f) Interleaving
// Line: |======================================|
// Inlinee ranges: |-----------| |-------|
//
// Effect: insert_range, make_call_location, insert_range, make_call_location, insert_range
//
// g) Bad debug info
// Line: |=======|
// Inlinee ranges: |-------------|
//
// Effect: make_call_location
let mut current_address = line_range_start;
while current_address < line_range_end {
// Emit our source location at current_address if current_address is not covered by an inlinee.
if next_inline.is_none() || next_inline.as_ref().unwrap().start > current_address {
// "insert_range"
self.ranges.insert(current_address, source_location.clone());
}
// If there is an inlinee range covered by this line record, turn this line into that
// call's "call line". Make a `call_location_idx` for it and store it in `callee_call_locations`.
if next_inline.is_some() && next_inline.as_ref().unwrap().start < line_range_end {
let inline_range = next_inline.take().unwrap();
// "make_call_location"
let (call_location_idx, _) =
self.call_locations.insert_full(source_location.clone());
callee_call_locations.push((inline_range.start, call_location_idx as u32));
// Advance current_address to the end of this inlinee range.
current_address = inline_range.end;
next_inline = inline_iter.next();
} else {
// No further inlinee ranges are overlapping with this line record. Advance to the
// end of the line record.
current_address = line_range_end;
}
}
// Advance the line iterator.
next_line = line_iter.next();
// Skip any lines that start before current_address.
// Such lines can exist if the debug information is faulty, or if the compiler created
// multiple identical small "call line" records instead of one combined record
// covering the entire inlinee range. We can't have different "call lines" for a single
// inlinee range anyway, so it's fine to skip these.
while next_line.is_some()
&& (next_line.as_ref().unwrap().address as u32) < current_address
{
next_line = line_iter.next();
}
}
if!function.inline {
// add the bare minimum of information for the function if there isn't any.
self.ranges.entry(entry_pc).or_insert(raw::SourceLocation {
file_idx: u32::MAX,
line: 0,
function_idx,
inlined_into_idx: u32::MAX,
});
}
// We've processed all address ranges which are *not* covered by inlinees.
// Now it's time to recurse.
// Process our inlinees.
if!callee_call_locations.is_empty() {
for inlinee in &function.inlinees {
self.process_symbolic_function_recursive(inlinee, &callee_call_locations);
}
}
let function_end = function.end_address() as u32;
let last_addr = self.last_addr.get_or_insert(0);
if function_end > *last_addr {
*last_addr = function_end;
}
}
/// Processes an individual [`Symbol`].
pub fn process_symbolic_symbol(&mut self, symbol: &Symbol<'_>) {
let name_idx = {
let mut function = transform::Function {
name: match symbol.name {
Some(ref name) => name.clone(),
None => return,
},
comp_dir: None,
};
for transformer in &mut self.transformers.0 {
function = transformer.transform_function(function);
}
let function_name = if self.is_windows_object {
undecorate_win_symbol(&function.name)
} else {
&function.name
};
self.string_table.insert(function_name) as u32
};
match self.ranges.entry(symbol.address as u32) {
btree_map::Entry::Vacant(entry) => {
let function = raw::Function {
name_offset: name_idx,
_comp_dir_offset: u32::MAX,
entry_pc: symbol.address as u32,
lang: u32::MAX,
};
let function_idx = self.functions.insert_full(function).0 as u32;
entry.insert(raw::SourceLocation {
file_idx: u32::MAX,
line: 0,
function_idx,
inlined_into_idx: u32::MAX,
});
}
btree_map::Entry::Occupied(entry) => {
// ASSUMPTION:
// the `functions` iterator has already filled in this addr via debug session.
// we could trace the caller hierarchy up to the root, and assert that it is
// indeed the same function, and maybe update its `entry_pc`, but we don’t do
// that for now.
let _function_idx = entry.get().function_idx as usize;
}
}
let last_addr = self.last_addr.get_or_insert(0);
if symbol.address as u32 >= *last_addr {
self.last_addr = None;
}
}
// Methods for serializing to a [`Write`] below:
// Feel free to move these to a separate file.
/// Serialize the converted data.
///
/// This writes the SymCache binary format into the given [`Write`].
pub fn serialize<W: Write>(mut self, writer: &mut W) -> std::io::Result<()> {
let mut writer = Writer::new(writer);
// Insert a trailing sentinel source location in case we have a definite end addr
if let Some(last_addr) = self.last_addr {
// TODO: to be extra safe, we might check that `last_addr` is indeed larger than
// the largest range at some point.
match self.ranges.entry(last_addr) {
btree_map::Entry::Vacant(entry) => {
entry.insert(raw::NO_SOURCE_LOCATION);
}
btree_map::Entry::Occupied(_entry) => {
// BUG:
// the last addr should not map to an already defined range
}
}
}
let num_files = self.files.len() as u32;
let num_functions = self.functions.len() as u32;
let num_source_locations = (self.call_locations.len() + self.ranges.len()) as u32;
let num_ranges = self.ranges.len() as u32;
let string_bytes = self.string_table.into_bytes();
let header = raw::Header {
magic: raw::SYMCACHE_MAGIC,
version: crate::SYMCACHE_VERSION,
debug_id: self.debug_id,
arch: self.arch,
num_files,
num_functions,
num_source_locations,
num_ranges,
string_bytes: string_bytes.len() as u32,
_reserved: [0; 16],
};
writer.write_all(header.as_bytes())?;
writer.align_to(8)?;
for f in self. | } else {
function.address as u32
};
let function_idx = { | random_line_split |
sup.rs | use super::util::{CacheKeyPath,
RemoteSup};
use crate::VERSION;
use configopt::{ConfigOptDefaults,
ConfigOptToString,
Partial};
use habitat_common::{cli::{RING_ENVVAR,
RING_KEY_ENVVAR},
types::{AutomateAuthToken,
EventStreamConnectMethod,
EventStreamMetadata,
EventStreamServerCertificate,
GossipListenAddr,
HttpListenAddr,
ListenCtlAddr}};
use habitat_core::{env::Config,
os::process::ShutdownTimeout,
package::PackageIdent,
service::HealthCheckInterval,
util::serde_string};
use rants::{error::Error as RantsError,
Address as NatsAddress};
use std::{fmt,
net::{Ipv4Addr,
SocketAddr},
path::PathBuf,
str::FromStr};
use structopt::{clap::AppSettings,
StructOpt};
use url::Url;
#[derive(StructOpt)]
#[structopt(name = "hab",
version = VERSION,
about = "The Habitat Supervisor",
author = "\nThe Habitat Maintainers <[email protected]>\n",
usage = "hab sup <SUBCOMMAND>",
global_settings = &[AppSettings::VersionlessSubcommands],
)]
#[allow(clippy::large_enum_variant)]
pub enum Sup {
/// Start an interactive Bash-like shell
#[structopt(usage = "hab sup bash", no_version)]
Bash,
/// Depart a Supervisor from the gossip ring; kicking and banning the target from joining again
/// with the same member-id
#[structopt(no_version)]
Depart {
/// The member-id of the Supervisor to depart
#[structopt(name = "MEMBER_ID")]
member_id: String,
#[structopt(flatten)]
remote_sup: RemoteSup,
},
/// Run the Habitat Supervisor
#[structopt(no_version)]
Run(SupRun),
#[structopt(no_version)]
Secret(Secret),
/// Start an interactive Bourne-like shell
#[structopt(usage = "hab sup sh", no_version)]
Sh,
/// Query the status of Habitat services
#[structopt(no_version)]
Status {
/// A package identifier (ex: core/redis, core/busybox-static/1.42.2)
#[structopt(name = "PKG_IDENT")]
pkg_ident: Option<PackageIdent>,
#[structopt(flatten)]
remote_sup: RemoteSup,
},
/// Gracefully terminate the Habitat Supervisor and all of its running services
#[structopt(usage = "hab sup term [OPTIONS]", no_version)]
Term,
}
// TODO (DM): This is unnecessarily difficult due to the orphan rule and the lack of specialization.
// The `configopt` library could be improved to make this easier.
#[derive(Deserialize, Serialize, Debug)]
struct | (#[serde(with = "serde_string")] NatsAddress);
impl fmt::Display for EventStreamAddress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) }
}
impl FromStr for EventStreamAddress {
type Err = RantsError;
fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(EventStreamAddress(s.parse()?)) }
}
impl ConfigOptToString for EventStreamAddress {}
#[derive(ConfigOptDefaults, Partial, StructOpt, Deserialize)]
#[configopt_defaults(type = "PartialSupRun")]
#[partial(derive(Debug, Default, Deserialize), attrs(serde))]
#[serde(deny_unknown_fields)]
#[structopt(name = "run",
no_version,
about = "Run the Habitat Supervisor",
// set custom usage string, otherwise the binary
// is displayed confusingly as `hab-sup`
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
usage = "hab sup run [FLAGS] [OPTIONS] [--] [PKG_IDENT_OR_ARTIFACT]"
)]
#[allow(dead_code)]
pub struct SupRun {
/// The listen address for the Gossip System Gateway
#[structopt(name = "LISTEN_GOSSIP",
long = "listen-gossip",
env = GossipListenAddr::ENVVAR,
default_value = GossipListenAddr::default_as_str())]
listen_gossip: SocketAddr,
/// Start the supervisor in local mode
#[structopt(name = "LOCAL_GOSSIP_MODE",
long = "local-gossip-mode",
conflicts_with_all = &["LISTEN_GOSSIP", "PEER", "PEER_WATCH_FILE"])]
local_gossip_mode: bool,
/// The listen address for the HTTP Gateway
#[structopt(name = "LISTEN_HTTP",
long = "listen-http",
env = HttpListenAddr::ENVVAR,
default_value = HttpListenAddr::default_as_str())]
listen_http: SocketAddr,
/// Disable the HTTP Gateway completely
#[structopt(name = "HTTP_DISABLE", long = "http-disable", short = "D")]
http_disable: bool,
/// The listen address for the Control Gateway. If not specified, the value will be taken from
/// the HAB_LISTEN_CTL environment variable if defined
#[structopt(name = "LISTEN_CTL",
long = "listen-ctl",
env = ListenCtlAddr::ENVVAR,
default_value = ListenCtlAddr::default_as_str())]
listen_ctl: SocketAddr,
/// The organization that the Supervisor and its subsequent services are part of
#[structopt(name = "ORGANIZATION", long = "org")]
organization: Option<String>,
/// The listen address of one or more initial peers (IP[:PORT])
#[structopt(name = "PEER", long = "peer")]
// TODO (DM): This could probably be a different type for better validation (Vec<SockAddr>?)
peer: Vec<String>,
/// If this Supervisor is a permanent peer
#[structopt(name = "PERMANENT_PEER", long = "permanent-peer", short = "I")]
permanent_peer: bool,
/// Watch this file for connecting to the ring
#[structopt(name = "PEER_WATCH_FILE",
long = "peer-watch-file",
conflicts_with = "PEER")]
peer_watch_file: PathBuf,
#[structopt(flatten)]
cache_key_path: CacheKeyPath,
/// The name of the ring used by the Supervisor when running with wire encryption. (ex: hab sup
/// run --ring myring)
#[structopt(name = "RING",
long = "ring",
short = "r",
env = RING_ENVVAR,
conflicts_with = "RING_KEY")]
ring: String,
/// The contents of the ring key when running with wire encryption. (Note: This option is
/// explicitly undocumented and for testing purposes only. Do not use it in a production
/// system. Use the corresponding environment variable instead.) (ex: hab sup run --ring-key
/// 'SYM-SEC-1 foo-20181113185935GCrBOW6CCN75LMl0j2V5QqQ6nNzWm6and9hkKBSUFPI=')
#[structopt(name = "RING_KEY",
long = "ring-key",
env = RING_KEY_ENVVAR,
hidden = true,
conflicts_with = "RING")]
ring_key: Option<String>,
/// Receive Supervisor updates from the specified release channel
#[structopt(name = "CHANNEL", long = "channel", default_value = "stable")]
channel: String,
/// Specify an alternate Builder endpoint. If not specified, the value will be taken from the
/// HAB_BLDR_URL environment variable if defined (default: https://bldr.habitat.sh)
#[structopt(name = "BLDR_URL",
long = "url",
short = "u",
// TODO (DM): These fields are not actual set in the clap macro but I think they should
// env = BLDR_URL_ENVVAR,
// default_value = DEFAULT_BLDR_URL
)]
bldr_url: Url,
/// Use package config from this path, rather than the package itself
#[structopt(name = "CONFIG_DIR", long = "config-from")]
config_dir: Option<PathBuf>,
/// Enable automatic updates for the Supervisor itself
#[structopt(name = "AUTO_UPDATE", long = "auto-update", short = "A")]
auto_update: bool,
/// Used for enabling TLS for the HTTP gateway. Read private key from KEY_FILE. This should be
/// a RSA private key or PKCS8-encoded private key, in PEM format
#[structopt(name = "KEY_FILE", long = "key", requires = "CERT_FILE")]
key_file: Option<PathBuf>,
/// Used for enabling TLS for the HTTP gateway. Read server certificates from CERT_FILE. This
/// should contain PEM-format certificates in the right order (the first certificate should
/// certify KEY_FILE, the last should be a root CA)
#[structopt(name = "CERT_FILE", long = "certs", requires = "KEY_FILE")]
cert_file: Option<PathBuf>,
/// Used for enabling client-authentication with TLS for the HTTP gateway. Read CA certificate
/// from CA_CERT_FILE. This should contain PEM-format certificate that can be used to validate
/// client requests
#[structopt(name = "CA_CERT_FILE",
long = "ca-certs",
requires_all = &["CERT_FILE", "KEY_FILE"])]
ca_cert_file: Option<PathBuf>,
/// Load the given Habitat package as part of the Supervisor startup specified by a package
/// identifier (ex: core/redis) or filepath to a Habitat Artifact (ex:
/// /home/core-redis-3.0.7-21120102031201-x86_64-linux.hart)
// TODO (DM): We could probably do better validation here
#[structopt(name = "PKG_IDENT_OR_ARTIFACT")]
pkg_ident_or_artifact: Option<String>,
// TODO (DM): This flag can eventually be removed.
// See https://github.com/habitat-sh/habitat/issues/7339
#[structopt(name = "APPLICATION", long = "application", hidden = true)]
application: Vec<String>,
// TODO (DM): This flag can eventually be removed.
// See https://github.com/habitat-sh/habitat/issues/7339
#[structopt(name = "ENVIRONMENT", long = "environment", hidden = true)]
environment: Vec<String>,
/// The service group; shared config and topology [default: default]
// TODO (DM): This should set a default value
#[structopt(name = "GROUP", long = "group")]
group: String,
/// Service topology; [default: none]
// TODO (DM): I dont think saying the default is none makes sense here
#[structopt(name = "TOPOLOGY",
long = "topology",
short = "t",
possible_values = &["standalone", "leader"])]
topology: Option<habitat_sup_protocol::types::Topology>,
/// The update strategy; [default: none] [values: none, at-once, rolling]
// TODO (DM): this should set a default_value and use possible_values = &["none", "at-once",
// "rolling"]
#[structopt(name = "STRATEGY", long = "strategy", short = "s")]
strategy: Option<habitat_sup_protocol::types::UpdateStrategy>,
/// One or more service groups to bind to a configuration
#[structopt(name = "BIND", long = "bind")]
bind: Vec<String>,
/// Governs how the presence or absence of binds affects service startup. `strict` blocks
/// startup until all binds are present. [default: strict] [values: relaxed, strict]
// TODO (DM): This should set default_value and use possible_values
#[structopt(name = "BINDING_MODE", long = "binding-mode")]
binding_mode: Option<habitat_sup_protocol::types::BindingMode>,
/// Verbose output; shows file and line/column numbers
#[structopt(name = "VERBOSE", short = "v")]
verbose: bool,
/// Turn ANSI color off
#[structopt(name = "NO_COLOR", long = "no-color")]
no_color: bool,
/// Use structured JSON logging for the Supervisor. Implies NO_COLOR
#[structopt(name = "JSON", long = "json-logging")]
json_logging: bool,
/// The interval (seconds) on which to run health checks [default: 30]
// TODO (DM): Should use default_value = "30"
#[structopt(name = "HEALTH_CHECK_INTERVAL",
long = "health-check-interval",
short = "i")]
health_check_interval: HealthCheckInterval,
/// The IPv4 address to use as the `sys.ip` template variable. If this argument is not set, the
/// supervisor tries to dynamically determine an IP address. If that fails, the supervisor
/// defaults to using `127.0.0.1`
#[structopt(name = "SYS_IP_ADDRESS", long = "sys-ip-address")]
sys_ip_address: Option<Ipv4Addr>,
/// The name of the application for event stream purposes. This will be attached to all events
/// generated by this Supervisor
#[structopt(name = "EVENT_STREAM_APPLICATION", long = "event-stream-application")]
event_stream_application: String,
/// The name of the environment for event stream purposes. This will be attached to all events
/// generated by this Supervisor
#[structopt(name = "EVENT_STREAM_ENVIRONMENT", long = "event-stream-environment")]
event_stream_environment: Option<String>,
/// How long in seconds to wait for an event stream connection before exiting the Supervisor.
/// Set to '0' to immediately start the Supervisor and continue running regardless of the
/// initial connection status
#[structopt(name = "EVENT_STREAM_CONNECT_TIMEOUT",
long = "event-stream-connect-timeout",
default_value = "0",
env = EventStreamConnectMethod::ENVVAR)]
event_stream_connect_timeout: u64,
/// The event stream connection string (host:port) used by this Supervisor to send events to
/// Chef Automate. This enables the event stream and requires --event-stream-application,
/// --event-stream-environment, and --event-stream-token also be set
#[structopt(name = "EVENT_STREAM_URL",
long = "event-stream-url",
requires_all = &["EVENT_STREAM_APPLICATION",
"EVENT_STREAM_ENVIRONMENT",
AutomateAuthToken::ARG_NAME])]
event_stream_url: Option<EventStreamAddress>,
/// The name of the site where this Supervisor is running for event stream purposes
#[structopt(name = "EVENT_STREAM_SITE", long = "event-stream-site")]
event_stream_site: Option<String>,
/// The authentication token for connecting the event stream to Chef Automate
#[structopt(name = "EVENT_STREAM_TOKEN",
long = "event-stream-token",
env = AutomateAuthToken::ENVVAR,
validator = AutomateAuthToken::validate)]
automate_auth_token: Option<String>,
/// An arbitrary key-value pair to add to each event generated by this Supervisor
#[structopt(name = "EVENT_STREAM_METADATA",
long = "event-meta",
validator = EventStreamMetadata::validate)]
event_meta: Vec<String>,
/// The path to Chef Automate's event stream certificate in PEM format used to establish a TLS
/// connection
#[structopt(name = "EVENT_STREAM_SERVER_CERTIFICATE",
long = "event-stream-server-certificate",
validator = EventStreamServerCertificate::validate)]
event_stream_server_certificate: Option<String>,
/// The number of seconds after sending a shutdown signal to wait before killing a service
/// process (default: set in plan)
#[structopt(name = "SHUTDOWN_TIMEOUT", long = "shutdown-timeout")]
shutdown_timeout: ShutdownTimeout,
}
#[derive(StructOpt)]
#[structopt(no_version)]
/// Commands relating to a Habitat Supervisor's Control Gateway secret
pub enum Secret {
/// Generate a secret key to use as a Supervisor's Control Gateway secret
Generate,
}
| EventStreamAddress | identifier_name |
sup.rs | use super::util::{CacheKeyPath,
RemoteSup};
use crate::VERSION;
use configopt::{ConfigOptDefaults,
ConfigOptToString,
Partial};
use habitat_common::{cli::{RING_ENVVAR,
RING_KEY_ENVVAR},
types::{AutomateAuthToken,
EventStreamConnectMethod,
EventStreamMetadata,
EventStreamServerCertificate,
GossipListenAddr,
HttpListenAddr,
ListenCtlAddr}};
use habitat_core::{env::Config,
os::process::ShutdownTimeout,
package::PackageIdent,
service::HealthCheckInterval,
util::serde_string};
use rants::{error::Error as RantsError,
Address as NatsAddress};
use std::{fmt,
net::{Ipv4Addr,
SocketAddr},
path::PathBuf,
str::FromStr};
use structopt::{clap::AppSettings,
StructOpt};
use url::Url;
#[derive(StructOpt)]
#[structopt(name = "hab",
version = VERSION,
about = "The Habitat Supervisor",
author = "\nThe Habitat Maintainers <[email protected]>\n",
usage = "hab sup <SUBCOMMAND>",
global_settings = &[AppSettings::VersionlessSubcommands],
)]
#[allow(clippy::large_enum_variant)]
pub enum Sup {
/// Start an interactive Bash-like shell
#[structopt(usage = "hab sup bash", no_version)]
Bash,
/// Depart a Supervisor from the gossip ring; kicking and banning the target from joining again
/// with the same member-id
#[structopt(no_version)]
Depart {
/// The member-id of the Supervisor to depart
#[structopt(name = "MEMBER_ID")]
member_id: String,
#[structopt(flatten)]
remote_sup: RemoteSup,
},
/// Run the Habitat Supervisor
#[structopt(no_version)]
Run(SupRun),
#[structopt(no_version)]
Secret(Secret),
/// Start an interactive Bourne-like shell
#[structopt(usage = "hab sup sh", no_version)]
Sh,
/// Query the status of Habitat services
#[structopt(no_version)]
Status {
/// A package identifier (ex: core/redis, core/busybox-static/1.42.2)
#[structopt(name = "PKG_IDENT")]
pkg_ident: Option<PackageIdent>,
#[structopt(flatten)]
remote_sup: RemoteSup,
},
/// Gracefully terminate the Habitat Supervisor and all of its running services
#[structopt(usage = "hab sup term [OPTIONS]", no_version)]
Term,
}
// TODO (DM): This is unnecessarily difficult due to the orphan rule and the lack of specialization.
// The `configopt` library could be improved to make this easier.
#[derive(Deserialize, Serialize, Debug)]
struct EventStreamAddress(#[serde(with = "serde_string")] NatsAddress);
impl fmt::Display for EventStreamAddress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) }
}
impl FromStr for EventStreamAddress {
type Err = RantsError;
fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(EventStreamAddress(s.parse()?)) }
}
impl ConfigOptToString for EventStreamAddress {}
#[derive(ConfigOptDefaults, Partial, StructOpt, Deserialize)]
#[configopt_defaults(type = "PartialSupRun")]
#[partial(derive(Debug, Default, Deserialize), attrs(serde))]
#[serde(deny_unknown_fields)]
#[structopt(name = "run",
no_version,
about = "Run the Habitat Supervisor",
// set custom usage string, otherwise the binary
// is displayed confusingly as `hab-sup`
// see: https://github.com/kbknapp/clap-rs/blob/2724ec5399c500b12a1a24d356f4090f4816f5e2/src/app/mod.rs#L373-L394
usage = "hab sup run [FLAGS] [OPTIONS] [--] [PKG_IDENT_OR_ARTIFACT]"
)]
#[allow(dead_code)]
pub struct SupRun {
/// The listen address for the Gossip System Gateway
#[structopt(name = "LISTEN_GOSSIP",
long = "listen-gossip",
env = GossipListenAddr::ENVVAR,
default_value = GossipListenAddr::default_as_str())]
listen_gossip: SocketAddr,
/// Start the supervisor in local mode
#[structopt(name = "LOCAL_GOSSIP_MODE",
long = "local-gossip-mode",
conflicts_with_all = &["LISTEN_GOSSIP", "PEER", "PEER_WATCH_FILE"])]
local_gossip_mode: bool,
/// The listen address for the HTTP Gateway
#[structopt(name = "LISTEN_HTTP",
long = "listen-http",
env = HttpListenAddr::ENVVAR,
default_value = HttpListenAddr::default_as_str())]
listen_http: SocketAddr,
/// Disable the HTTP Gateway completely
#[structopt(name = "HTTP_DISABLE", long = "http-disable", short = "D")]
http_disable: bool,
/// The listen address for the Control Gateway. If not specified, the value will be taken from
/// the HAB_LISTEN_CTL environment variable if defined
#[structopt(name = "LISTEN_CTL",
long = "listen-ctl",
env = ListenCtlAddr::ENVVAR,
default_value = ListenCtlAddr::default_as_str())]
listen_ctl: SocketAddr,
/// The organization that the Supervisor and its subsequent services are part of
#[structopt(name = "ORGANIZATION", long = "org")]
organization: Option<String>,
/// The listen address of one or more initial peers (IP[:PORT])
#[structopt(name = "PEER", long = "peer")]
// TODO (DM): This could probably be a different type for better validation (Vec<SockAddr>?)
peer: Vec<String>,
/// If this Supervisor is a permanent peer
#[structopt(name = "PERMANENT_PEER", long = "permanent-peer", short = "I")]
permanent_peer: bool,
/// Watch this file for connecting to the ring
#[structopt(name = "PEER_WATCH_FILE",
long = "peer-watch-file",
conflicts_with = "PEER")]
peer_watch_file: PathBuf, | /// run --ring myring)
#[structopt(name = "RING",
long = "ring",
short = "r",
env = RING_ENVVAR,
conflicts_with = "RING_KEY")]
ring: String,
/// The contents of the ring key when running with wire encryption. (Note: This option is
/// explicitly undocumented and for testing purposes only. Do not use it in a production
/// system. Use the corresponding environment variable instead.) (ex: hab sup run --ring-key
/// 'SYM-SEC-1 foo-20181113185935GCrBOW6CCN75LMl0j2V5QqQ6nNzWm6and9hkKBSUFPI=')
#[structopt(name = "RING_KEY",
long = "ring-key",
env = RING_KEY_ENVVAR,
hidden = true,
conflicts_with = "RING")]
ring_key: Option<String>,
/// Receive Supervisor updates from the specified release channel
#[structopt(name = "CHANNEL", long = "channel", default_value = "stable")]
channel: String,
/// Specify an alternate Builder endpoint. If not specified, the value will be taken from the
/// HAB_BLDR_URL environment variable if defined (default: https://bldr.habitat.sh)
#[structopt(name = "BLDR_URL",
long = "url",
short = "u",
// TODO (DM): These fields are not actual set in the clap macro but I think they should
// env = BLDR_URL_ENVVAR,
// default_value = DEFAULT_BLDR_URL
)]
bldr_url: Url,
/// Use package config from this path, rather than the package itself
#[structopt(name = "CONFIG_DIR", long = "config-from")]
config_dir: Option<PathBuf>,
/// Enable automatic updates for the Supervisor itself
#[structopt(name = "AUTO_UPDATE", long = "auto-update", short = "A")]
auto_update: bool,
/// Used for enabling TLS for the HTTP gateway. Read private key from KEY_FILE. This should be
/// a RSA private key or PKCS8-encoded private key, in PEM format
#[structopt(name = "KEY_FILE", long = "key", requires = "CERT_FILE")]
key_file: Option<PathBuf>,
/// Used for enabling TLS for the HTTP gateway. Read server certificates from CERT_FILE. This
/// should contain PEM-format certificates in the right order (the first certificate should
/// certify KEY_FILE, the last should be a root CA)
#[structopt(name = "CERT_FILE", long = "certs", requires = "KEY_FILE")]
cert_file: Option<PathBuf>,
/// Used for enabling client-authentication with TLS for the HTTP gateway. Read CA certificate
/// from CA_CERT_FILE. This should contain PEM-format certificate that can be used to validate
/// client requests
#[structopt(name = "CA_CERT_FILE",
long = "ca-certs",
requires_all = &["CERT_FILE", "KEY_FILE"])]
ca_cert_file: Option<PathBuf>,
/// Load the given Habitat package as part of the Supervisor startup specified by a package
/// identifier (ex: core/redis) or filepath to a Habitat Artifact (ex:
/// /home/core-redis-3.0.7-21120102031201-x86_64-linux.hart)
// TODO (DM): We could probably do better validation here
#[structopt(name = "PKG_IDENT_OR_ARTIFACT")]
pkg_ident_or_artifact: Option<String>,
// TODO (DM): This flag can eventually be removed.
// See https://github.com/habitat-sh/habitat/issues/7339
#[structopt(name = "APPLICATION", long = "application", hidden = true)]
application: Vec<String>,
// TODO (DM): This flag can eventually be removed.
// See https://github.com/habitat-sh/habitat/issues/7339
#[structopt(name = "ENVIRONMENT", long = "environment", hidden = true)]
environment: Vec<String>,
/// The service group; shared config and topology [default: default]
// TODO (DM): This should set a default value
#[structopt(name = "GROUP", long = "group")]
group: String,
/// Service topology; [default: none]
// TODO (DM): I dont think saying the default is none makes sense here
#[structopt(name = "TOPOLOGY",
long = "topology",
short = "t",
possible_values = &["standalone", "leader"])]
topology: Option<habitat_sup_protocol::types::Topology>,
/// The update strategy; [default: none] [values: none, at-once, rolling]
// TODO (DM): this should set a default_value and use possible_values = &["none", "at-once",
// "rolling"]
#[structopt(name = "STRATEGY", long = "strategy", short = "s")]
strategy: Option<habitat_sup_protocol::types::UpdateStrategy>,
/// One or more service groups to bind to a configuration
#[structopt(name = "BIND", long = "bind")]
bind: Vec<String>,
/// Governs how the presence or absence of binds affects service startup. `strict` blocks
/// startup until all binds are present. [default: strict] [values: relaxed, strict]
// TODO (DM): This should set default_value and use possible_values
#[structopt(name = "BINDING_MODE", long = "binding-mode")]
binding_mode: Option<habitat_sup_protocol::types::BindingMode>,
/// Verbose output; shows file and line/column numbers
#[structopt(name = "VERBOSE", short = "v")]
verbose: bool,
/// Turn ANSI color off
#[structopt(name = "NO_COLOR", long = "no-color")]
no_color: bool,
/// Use structured JSON logging for the Supervisor. Implies NO_COLOR
#[structopt(name = "JSON", long = "json-logging")]
json_logging: bool,
/// The interval (seconds) on which to run health checks [default: 30]
// TODO (DM): Should use default_value = "30"
#[structopt(name = "HEALTH_CHECK_INTERVAL",
long = "health-check-interval",
short = "i")]
health_check_interval: HealthCheckInterval,
/// The IPv4 address to use as the `sys.ip` template variable. If this argument is not set, the
/// supervisor tries to dynamically determine an IP address. If that fails, the supervisor
/// defaults to using `127.0.0.1`
#[structopt(name = "SYS_IP_ADDRESS", long = "sys-ip-address")]
sys_ip_address: Option<Ipv4Addr>,
/// The name of the application for event stream purposes. This will be attached to all events
/// generated by this Supervisor
#[structopt(name = "EVENT_STREAM_APPLICATION", long = "event-stream-application")]
event_stream_application: String,
/// The name of the environment for event stream purposes. This will be attached to all events
/// generated by this Supervisor
#[structopt(name = "EVENT_STREAM_ENVIRONMENT", long = "event-stream-environment")]
event_stream_environment: Option<String>,
/// How long in seconds to wait for an event stream connection before exiting the Supervisor.
/// Set to '0' to immediately start the Supervisor and continue running regardless of the
/// initial connection status
#[structopt(name = "EVENT_STREAM_CONNECT_TIMEOUT",
long = "event-stream-connect-timeout",
default_value = "0",
env = EventStreamConnectMethod::ENVVAR)]
event_stream_connect_timeout: u64,
/// The event stream connection string (host:port) used by this Supervisor to send events to
/// Chef Automate. This enables the event stream and requires --event-stream-application,
/// --event-stream-environment, and --event-stream-token also be set
#[structopt(name = "EVENT_STREAM_URL",
long = "event-stream-url",
requires_all = &["EVENT_STREAM_APPLICATION",
"EVENT_STREAM_ENVIRONMENT",
AutomateAuthToken::ARG_NAME])]
event_stream_url: Option<EventStreamAddress>,
/// The name of the site where this Supervisor is running for event stream purposes
#[structopt(name = "EVENT_STREAM_SITE", long = "event-stream-site")]
event_stream_site: Option<String>,
/// The authentication token for connecting the event stream to Chef Automate
#[structopt(name = "EVENT_STREAM_TOKEN",
long = "event-stream-token",
env = AutomateAuthToken::ENVVAR,
validator = AutomateAuthToken::validate)]
automate_auth_token: Option<String>,
/// An arbitrary key-value pair to add to each event generated by this Supervisor
#[structopt(name = "EVENT_STREAM_METADATA",
long = "event-meta",
validator = EventStreamMetadata::validate)]
event_meta: Vec<String>,
/// The path to Chef Automate's event stream certificate in PEM format used to establish a TLS
/// connection
#[structopt(name = "EVENT_STREAM_SERVER_CERTIFICATE",
long = "event-stream-server-certificate",
validator = EventStreamServerCertificate::validate)]
event_stream_server_certificate: Option<String>,
/// The number of seconds after sending a shutdown signal to wait before killing a service
/// process (default: set in plan)
#[structopt(name = "SHUTDOWN_TIMEOUT", long = "shutdown-timeout")]
shutdown_timeout: ShutdownTimeout,
}
#[derive(StructOpt)]
#[structopt(no_version)]
/// Commands relating to a Habitat Supervisor's Control Gateway secret
pub enum Secret {
/// Generate a secret key to use as a Supervisor's Control Gateway secret
Generate,
} | #[structopt(flatten)]
cache_key_path: CacheKeyPath,
/// The name of the ring used by the Supervisor when running with wire encryption. (ex: hab sup | random_line_split |
texture.rs | use enum_dispatch::enum_dispatch;
use log::error;
use crate::textures::dots::DotsTexture;
use crate::textures::constant::ConstantTexture;
use crate::textures::scaled::ScaleTexture;
use crate::core::interaction::SurfaceInteraction;
use crate::textures::imagemap::{ ImageTextureFloat, ImageTextureRGB};
use crate::textures::mix::MixTexture;
use crate::textures::biler::BilerTexture;
use crate::textures::uv::UVTexture;
use crate::textures::marble::MarbleTexture;
use crate::textures::wrinkled::WrinkledTexture;
use crate::textures::fbm::FBmTexture;
use crate::textures::windy::WindyTexture;
use crate::textures::checkerboard::{Checkerboard3DTexture, Checkerboard2DTexture};
use crate::core::geometry::vector::{Vector2f, Vector3f};
use crate::core::geometry::point::{Point2f, Point3f};
use crate::core::pbrt::{Float, INV_PI, INV2_PI, PI, lerp, clamp, log2};
use crate::core::transform::Transform;
use crate::core::geometry::geometry::{spherical_theta, spherical_phi};
use crate::core::spectrum::{Spectrum, RGBSpectrum, SampledSpectrum};
use std::ops::{Mul, Add, AddAssign, Div};
use crate::core::mipmap::Clampable;
use crate::core::paramset::TextureParams;
const NOISE_PERM_SIZE: usize = 256;
const NOISE_PERM: [usize; 2 * NOISE_PERM_SIZE] = [
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140,
36, 103, 30, 69, 142,
// Remainder of the noise permutation table
8, 99, 37, 240, 21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62,
94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174,
20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77,
146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55,
46, 245, 40, 244, 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76,
132, 187, 208, 89, 18, 169, 200, 196, 135, 130, 116, 188, 159, 86, 164, 100,
109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123, 5, 202, 38, 147,
118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28,
42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232,
178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12,
191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31,
181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254,
138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66,
215, 61, 156, 180, 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194,
233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6,
148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32,
57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74,
165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60,
211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25,
63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196, 135,
130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226,
250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59,
227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2,
44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19,
98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251,
34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249,
14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115,
121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72,
243, 141, 128, 195, 78, 66, 215, 61, 156, 180
];
pub type TextureFloat = Textures<Float, Float>;
pub type TextureSpec = Textures<Spectrum, Spectrum>;
#[enum_dispatch]
pub trait Texture<T2> {
fn evaluate(&self, s: &SurfaceInteraction) -> T2;
}
// All Texture generic types must implement these traits
pub trait SpectrumT<T>:
Copy +
Send +
Sync +
num::Zero +
Clampable +
AddAssign +
From<Float> +
From<SampledSpectrum> +
From<RGBSpectrum> +
Mul<T, Output = T> +
Mul<Float, Output = T> +
Div<Float, Output = T> +
Add<T, Output = T>{}
// Implementations for valid Texture generic types
impl SpectrumT<Float> for Float{}
impl SpectrumT<RGBSpectrum> for RGBSpectrum{}
impl SpectrumT<SampledSpectrum> for SampledSpectrum{}
#[enum_dispatch(Texture<T2>)]
pub enum Textures<T1, T2>
where T1: SpectrumT<T1> + Mul<T2, Output = T2>,
T2: SpectrumT<T2> + From<T1>
{
MarbleTexture,
UVTexture,
FBmTexture,
WrinkledTexture,
WindyTexture,
MixTexture(MixTexture<T2>),
BilerTexture(BilerTexture<T2>),
ScaleTexture(ScaleTexture<T1, T2>),
DotsTexture(DotsTexture<T2>),
ImageTextureFloat(ImageTextureFloat),
ImageTextureRGB(ImageTextureRGB),
ConstantTexture(ConstantTexture<T2>),
Checkerboard2DTexture(Checkerboard2DTexture<T2>),
Checkerboard3DTexture(Checkerboard3DTexture<T2>)
}
#[enum_dispatch]
pub trait TextureMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f;
}
#[enum_dispatch(TextureMapping2D)]
pub enum TextureMapping2Ds {
UVMapping2D,
PlannarMapping2D,
SphericalMapping2D,
CylindricalMapping2D
}
pub struct UVMapping2D {
su: Float,
sv: Float,
du: Float,
dv: Float,
}
impl UVMapping2D {
pub fn new(su: Float, sv: Float, du: Float, dv: Float) -> Self {
Self { su, sv, du, dv }
}
}
impl TextureMapping2D for UVMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
// Compute texture differentials for sphere (u, v) mapping
*dstdx = Vector2f::new(self.su * si.dudx.get(), self.sv * si.dvdx.get());
*dstdy = Vector2f::new(self.su * si.dudy.get(), self.sv * si.dvdy.get());
Point2f::new(self.su * si.uv[0] + self.du, self.sv * si.uv[1] + self.dv)
}
}
impl Default for UVMapping2D {
fn default() -> Self {
Self {
su: 1.0,
sv: 1.0,
du: 0.0,
dv: 0.0
}
}
}
pub struct SphericalMapping2D {
world_to_texture: Transform
}
impl SphericalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn sphere(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
let theta = spherical_theta(&vec);
let phi = spherical_phi(&vec);
Point2f::new(theta * INV_PI, phi * INV2_PI)
}
}
impl TextureMapping2D for SphericalMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let st = self.sphere(&si.p);
// Compute texture coordinate differentials for sphere (u, v) mapping
let delta = 0.1;
let st_deltax = self.sphere(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.sphere(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct CylindricalMapping2D {
world_to_texture: Transform
}
impl CylindricalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn cylinder(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
Point2f::new(PI + vec.y.atan2(vec.x) * INV2_PI, vec.z)
}
}
impl TextureMapping2D for CylindricalMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
let st = self.cylinder(&si.p);
// Compute texture coordinate differentials for cylinder (u, v) mapping
let delta = 0.1;
let st_deltax = self.cylinder(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.cylinder(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct PlannarMapping2D {
vs: Vector3f,
vt: Vector3f,
ds: Float,
dt: Float
}
impl PlannarMapping2D {
pub fn new(vs: &Vector3f, vt: &Vector3f,
ds: Float, dt: Float) -> Self {
Self {
ds,
dt,
vs: *vs,
vt: *vt
}
}
}
impl TextureMapping2D for PlannarMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let vec = Vector3f::from(si.p);
*dstdx = Vector2f::new(si.dpdx.get().dot(&self.vs), si.dpdx.get().dot(&self.vt));
*dstdy = Vector2f::new(si.dpdy.get().dot(&self.vs), si.dpdy.get().dot(&self.vt));
Point2f::new(self.ds + vec.dot(&self.vs), self.dt + vec.dot(&self.vt))
}
}
#[enum_dispatch]
pub trait TextureMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f;
}
#[enum_dispatch(TextureMapping3D)]
pub enum TextureMapping3Ds {
IdentityMapping3D
}
pub struct IdentityMapping3D {
world_to_texture: Transform
}
impl IdentityMapping3D {
pub fn new(w2t: &Transform) -> Self {
Self { world_to_texture: *w2t }
}
}
impl TextureMapping3D for IdentityMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f {
*dpdx = self.world_to_texture.transform_vector(&si.dpdx.get());
*dpdy = self.world_to_texture.transform_vector(&si.dpdy.get());
self.world_to_texture.transform_point(&si.p)
}
}
pub fn lanczos(mut x: Float, tau: Float) -> Float {
x = x.abs();
if x < 1.0e-5 { return 1.0; }
if x > 1.0 { return 0.0; }
x *= PI;
let s = (x * tau).sin() / ( x * tau);
let lanc = x.sin() / x;
s * lanc
}
pub fn noise(x: Float, y: Float, z: Float) -> Float {
let mut ix = x.floor() as usize;
let mut iy = y.floor() as usize;
let mut iz = z.floor() as usize;
let dx = x - ix as Float;
let dy = y - iy as Float;
let dz = z - iz as Float;
// Compute gradient weights
ix &= NOISE_PERM_SIZE - 1;
iy &= NOISE_PERM_SIZE - 1;
iz &= NOISE_PERM_SIZE - 1;
let w000 = grad(ix, iy, iz, dx, dy, dz);
let w100 = grad(ix + 1, iy, iz, dx - 1.0, dy, dz);
let w010 = grad(ix, iy + 1, iz, dx, dy - 1.0, dz);
let w110 = grad(ix + 1, iy + 1, iz, dx - 1.0, dy - 1.0, dz);
let w001 = grad(ix, iy, iz + 1, dx, dy, dz - 1.0);
let w101 = grad(ix + 1, iy, iz + 1, dx - 1.0, dy, dz - 1.0);
let w011 = grad(ix, iy + 1, iz + 1, dx, dy - 1.0, dz - 1.0);
let w111 = grad(ix + 1, iy + 1, iz + 1, dx - 1.0, dy - 1.0, dz - 1.0);
// Compute trilinear interpolation of weights
let wx = noise_weight(dx);
let wy = noise_weight(dy);
let wz = noise_weight(dz);
let x00 = lerp(wx, w000, w100);
let x10 = lerp(wx, w010, w110);
let x01 = lerp(wx, w001, w101);
let x11 = lerp(wx, w011, w111);
let y0 = lerp(wy, x00, x10);
let y1 = lerp(wy, x01, x11);
lerp(wz, y0, y1)
}
pub fn noisep(p: Point3f) -> Float {
noise(p.x, p.y, p.z)
}
fn grad(x: usize, y: usize, z: usize, dx: Float, dy: Float, dz: Float) -> Float {
let mut h = NOISE_PERM[NOISE_PERM[NOISE_PERM[x] + y] + z];
h &= 15;
let u = if h < 8 || h == 12 || h == 13 { dx } else { dy };
let v = if h < 4 || h == 12 || h == 13 { dy } else { dz };
(if (h & 1)!= 0 { -u } else { u }) + (if (h & 2)!= 0 | else { v })
}
fn noise_weight(t: Float) -> Float {
let t3 = t * t * t;
let t4 = t3 * t;
6.0 * t4 * t - 15.0 * t4 + 10.0 * t3
}
pub fn fbm(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * log2(len2), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for fbm
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o * noisep(*p * lambda);
lambda *= 1.99;
o *= omega;
}
let npartial = n - nint as Float;
sum += o * smooth_step(0.3, 0.7, npartial) * noisep(*p * lambda);
sum
}
pub fn turbulence(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * len2.log2(), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for turbulence
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o + noisep(*p * lambda).abs();
lambda *= 1.99;
o *= omega;
}
// Account for contributions of clamped octaves in turbulence
let npartial = n - nint as Float;
sum += o + lerp(
smooth_step(0.3, 0.7, npartial),
0.2,
noisep(*p * lambda).abs());
for _i in nint..max_octaves {
sum += o * 0.2;
o *= omega;
}
sum
}
fn smooth_step(min: Float, max: Float, value: Float) -> Float {
let v = clamp((value - min) / (max - min), 0.0, 1.0);
v * v * (-2.0 * v + 3.0)
}
pub fn get_mapping2d(t2w: &Transform, tp: &mut TextureParams) -> TextureMapping2Ds {
let ty = tp.find_string("mapping", "uv");
match ty.as_str() {
"uv" => {
let su = tp.find_float("uscale", 1.0);
let sv = tp.find_float("vscale", 1.0);
let du = tp.find_float("udelta", 0.0);
let dv = tp.find_float("vdelta", 0.0);
UVMapping2D::new(su, sv, du, dv).into()
},
"planar" => {
let vs = tp.find_vector3f("v1", Vector3f::new(1.0, 0.0, 0.0));
let vt = tp.find_vector3f("v2", Vector3f::new(0.0, 1.0, 0.0));
let ds = tp.find_float("udelta", 0.0);
let dt = tp.find_float("vdelta", 0.0);
PlannarMapping2D::new(&vs, &vt, ds, dt).into()
}
"spherical" => SphericalMapping2D::new(&Transform::inverse(t2w)).into(),
"cylindrical" => CylindricalMapping2D::new(&Transform::inverse(t2w)).into(),
_ => {
error!("2D texture mapping \"{}\" unknown", ty);
UVMapping2D::new(1.0, 1.0, 0.0, 0.0).into()
}
}
} | { -v } | conditional_block |
texture.rs | use enum_dispatch::enum_dispatch;
use log::error;
use crate::textures::dots::DotsTexture;
use crate::textures::constant::ConstantTexture;
use crate::textures::scaled::ScaleTexture;
use crate::core::interaction::SurfaceInteraction;
use crate::textures::imagemap::{ ImageTextureFloat, ImageTextureRGB};
use crate::textures::mix::MixTexture;
use crate::textures::biler::BilerTexture;
use crate::textures::uv::UVTexture;
use crate::textures::marble::MarbleTexture;
use crate::textures::wrinkled::WrinkledTexture;
use crate::textures::fbm::FBmTexture;
use crate::textures::windy::WindyTexture;
use crate::textures::checkerboard::{Checkerboard3DTexture, Checkerboard2DTexture};
use crate::core::geometry::vector::{Vector2f, Vector3f};
use crate::core::geometry::point::{Point2f, Point3f};
use crate::core::pbrt::{Float, INV_PI, INV2_PI, PI, lerp, clamp, log2};
use crate::core::transform::Transform;
use crate::core::geometry::geometry::{spherical_theta, spherical_phi};
use crate::core::spectrum::{Spectrum, RGBSpectrum, SampledSpectrum};
use std::ops::{Mul, Add, AddAssign, Div};
use crate::core::mipmap::Clampable;
use crate::core::paramset::TextureParams;
const NOISE_PERM_SIZE: usize = 256;
const NOISE_PERM: [usize; 2 * NOISE_PERM_SIZE] = [
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140,
36, 103, 30, 69, 142,
// Remainder of the noise permutation table
8, 99, 37, 240, 21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62,
94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174,
20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77,
146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55,
46, 245, 40, 244, 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76,
132, 187, 208, 89, 18, 169, 200, 196, 135, 130, 116, 188, 159, 86, 164, 100,
109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123, 5, 202, 38, 147,
118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28,
42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232,
178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12,
191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31,
181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254,
138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66,
215, 61, 156, 180, 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194,
233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6,
148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32,
57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74,
165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60,
211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25,
63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196, 135,
130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226,
250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59,
227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2,
44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19,
98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251,
34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249,
14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115,
121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72,
243, 141, 128, 195, 78, 66, 215, 61, 156, 180
];
pub type TextureFloat = Textures<Float, Float>;
pub type TextureSpec = Textures<Spectrum, Spectrum>;
#[enum_dispatch]
pub trait Texture<T2> {
fn evaluate(&self, s: &SurfaceInteraction) -> T2;
}
// All Texture generic types must implement these traits
pub trait SpectrumT<T>:
Copy +
Send +
Sync +
num::Zero +
Clampable +
AddAssign +
From<Float> +
From<SampledSpectrum> +
From<RGBSpectrum> +
Mul<T, Output = T> +
Mul<Float, Output = T> +
Div<Float, Output = T> +
Add<T, Output = T>{}
// Implementations for valid Texture generic types
impl SpectrumT<Float> for Float{}
impl SpectrumT<RGBSpectrum> for RGBSpectrum{}
impl SpectrumT<SampledSpectrum> for SampledSpectrum{}
#[enum_dispatch(Texture<T2>)]
pub enum Textures<T1, T2>
where T1: SpectrumT<T1> + Mul<T2, Output = T2>,
T2: SpectrumT<T2> + From<T1>
{
MarbleTexture,
UVTexture,
FBmTexture,
WrinkledTexture,
WindyTexture,
MixTexture(MixTexture<T2>),
BilerTexture(BilerTexture<T2>),
ScaleTexture(ScaleTexture<T1, T2>),
DotsTexture(DotsTexture<T2>),
ImageTextureFloat(ImageTextureFloat),
ImageTextureRGB(ImageTextureRGB),
ConstantTexture(ConstantTexture<T2>),
Checkerboard2DTexture(Checkerboard2DTexture<T2>),
Checkerboard3DTexture(Checkerboard3DTexture<T2>)
}
#[enum_dispatch]
pub trait TextureMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f;
}
#[enum_dispatch(TextureMapping2D)]
pub enum TextureMapping2Ds {
UVMapping2D,
PlannarMapping2D,
SphericalMapping2D,
CylindricalMapping2D
}
pub struct UVMapping2D {
su: Float,
sv: Float,
du: Float,
dv: Float,
}
impl UVMapping2D {
pub fn new(su: Float, sv: Float, du: Float, dv: Float) -> Self {
Self { su, sv, du, dv }
}
}
impl TextureMapping2D for UVMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
// Compute texture differentials for sphere (u, v) mapping
*dstdx = Vector2f::new(self.su * si.dudx.get(), self.sv * si.dvdx.get());
*dstdy = Vector2f::new(self.su * si.dudy.get(), self.sv * si.dvdy.get());
Point2f::new(self.su * si.uv[0] + self.du, self.sv * si.uv[1] + self.dv)
}
}
impl Default for UVMapping2D {
fn default() -> Self {
Self {
su: 1.0,
sv: 1.0,
du: 0.0,
dv: 0.0
}
}
}
pub struct SphericalMapping2D {
world_to_texture: Transform
}
impl SphericalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn sphere(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
let theta = spherical_theta(&vec);
let phi = spherical_phi(&vec);
Point2f::new(theta * INV_PI, phi * INV2_PI)
}
}
impl TextureMapping2D for SphericalMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let st = self.sphere(&si.p);
// Compute texture coordinate differentials for sphere (u, v) mapping
let delta = 0.1;
let st_deltax = self.sphere(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.sphere(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct CylindricalMapping2D {
world_to_texture: Transform
}
impl CylindricalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn cylinder(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
Point2f::new(PI + vec.y.atan2(vec.x) * INV2_PI, vec.z)
}
}
impl TextureMapping2D for CylindricalMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
let st = self.cylinder(&si.p);
// Compute texture coordinate differentials for cylinder (u, v) mapping
let delta = 0.1;
let st_deltax = self.cylinder(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.cylinder(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct PlannarMapping2D {
vs: Vector3f,
vt: Vector3f,
ds: Float,
dt: Float
}
impl PlannarMapping2D {
pub fn new(vs: &Vector3f, vt: &Vector3f,
ds: Float, dt: Float) -> Self {
Self {
ds,
dt,
vs: *vs,
vt: *vt
}
}
}
impl TextureMapping2D for PlannarMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let vec = Vector3f::from(si.p);
*dstdx = Vector2f::new(si.dpdx.get().dot(&self.vs), si.dpdx.get().dot(&self.vt));
*dstdy = Vector2f::new(si.dpdy.get().dot(&self.vs), si.dpdy.get().dot(&self.vt));
Point2f::new(self.ds + vec.dot(&self.vs), self.dt + vec.dot(&self.vt))
}
}
#[enum_dispatch]
pub trait TextureMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f;
}
#[enum_dispatch(TextureMapping3D)]
pub enum TextureMapping3Ds {
IdentityMapping3D
}
pub struct IdentityMapping3D {
world_to_texture: Transform
}
impl IdentityMapping3D {
pub fn new(w2t: &Transform) -> Self {
Self { world_to_texture: *w2t }
}
}
impl TextureMapping3D for IdentityMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f {
*dpdx = self.world_to_texture.transform_vector(&si.dpdx.get());
*dpdy = self.world_to_texture.transform_vector(&si.dpdy.get());
self.world_to_texture.transform_point(&si.p)
}
}
pub fn lanczos(mut x: Float, tau: Float) -> Float {
x = x.abs();
if x < 1.0e-5 { return 1.0; }
if x > 1.0 { return 0.0; }
x *= PI;
let s = (x * tau).sin() / ( x * tau);
let lanc = x.sin() / x;
s * lanc
}
pub fn noise(x: Float, y: Float, z: Float) -> Float {
let mut ix = x.floor() as usize;
let mut iy = y.floor() as usize;
let mut iz = z.floor() as usize;
let dx = x - ix as Float;
let dy = y - iy as Float;
let dz = z - iz as Float;
// Compute gradient weights
ix &= NOISE_PERM_SIZE - 1;
iy &= NOISE_PERM_SIZE - 1;
iz &= NOISE_PERM_SIZE - 1;
let w000 = grad(ix, iy, iz, dx, dy, dz);
let w100 = grad(ix + 1, iy, iz, dx - 1.0, dy, dz);
let w010 = grad(ix, iy + 1, iz, dx, dy - 1.0, dz);
let w110 = grad(ix + 1, iy + 1, iz, dx - 1.0, dy - 1.0, dz);
let w001 = grad(ix, iy, iz + 1, dx, dy, dz - 1.0);
let w101 = grad(ix + 1, iy, iz + 1, dx - 1.0, dy, dz - 1.0);
let w011 = grad(ix, iy + 1, iz + 1, dx, dy - 1.0, dz - 1.0);
let w111 = grad(ix + 1, iy + 1, iz + 1, dx - 1.0, dy - 1.0, dz - 1.0);
// Compute trilinear interpolation of weights
let wx = noise_weight(dx);
let wy = noise_weight(dy);
let wz = noise_weight(dz);
let x00 = lerp(wx, w000, w100);
let x10 = lerp(wx, w010, w110);
let x01 = lerp(wx, w001, w101);
let x11 = lerp(wx, w011, w111);
let y0 = lerp(wy, x00, x10);
let y1 = lerp(wy, x01, x11);
lerp(wz, y0, y1)
}
pub fn noisep(p: Point3f) -> Float {
noise(p.x, p.y, p.z)
}
fn grad(x: usize, y: usize, z: usize, dx: Float, dy: Float, dz: Float) -> Float {
let mut h = NOISE_PERM[NOISE_PERM[NOISE_PERM[x] + y] + z];
h &= 15;
let u = if h < 8 || h == 12 || h == 13 { dx } else { dy };
let v = if h < 4 || h == 12 || h == 13 { dy } else { dz };
(if (h & 1)!= 0 { -u } else { u }) + (if (h & 2)!= 0 { -v } else { v })
}
fn noise_weight(t: Float) -> Float {
let t3 = t * t * t;
let t4 = t3 * t;
6.0 * t4 * t - 15.0 * t4 + 10.0 * t3
}
pub fn fbm(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * log2(len2), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for fbm
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o * noisep(*p * lambda);
lambda *= 1.99;
o *= omega;
}
let npartial = n - nint as Float;
sum += o * smooth_step(0.3, 0.7, npartial) * noisep(*p * lambda);
sum
}
pub fn turbulence(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * len2.log2(), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for turbulence
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o + noisep(*p * lambda).abs();
lambda *= 1.99;
o *= omega;
}
// Account for contributions of clamped octaves in turbulence
let npartial = n - nint as Float;
sum += o + lerp( |
for _i in nint..max_octaves {
sum += o * 0.2;
o *= omega;
}
sum
}
fn smooth_step(min: Float, max: Float, value: Float) -> Float {
let v = clamp((value - min) / (max - min), 0.0, 1.0);
v * v * (-2.0 * v + 3.0)
}
pub fn get_mapping2d(t2w: &Transform, tp: &mut TextureParams) -> TextureMapping2Ds {
let ty = tp.find_string("mapping", "uv");
match ty.as_str() {
"uv" => {
let su = tp.find_float("uscale", 1.0);
let sv = tp.find_float("vscale", 1.0);
let du = tp.find_float("udelta", 0.0);
let dv = tp.find_float("vdelta", 0.0);
UVMapping2D::new(su, sv, du, dv).into()
},
"planar" => {
let vs = tp.find_vector3f("v1", Vector3f::new(1.0, 0.0, 0.0));
let vt = tp.find_vector3f("v2", Vector3f::new(0.0, 1.0, 0.0));
let ds = tp.find_float("udelta", 0.0);
let dt = tp.find_float("vdelta", 0.0);
PlannarMapping2D::new(&vs, &vt, ds, dt).into()
}
"spherical" => SphericalMapping2D::new(&Transform::inverse(t2w)).into(),
"cylindrical" => CylindricalMapping2D::new(&Transform::inverse(t2w)).into(),
_ => {
error!("2D texture mapping \"{}\" unknown", ty);
UVMapping2D::new(1.0, 1.0, 0.0, 0.0).into()
}
}
} | smooth_step(0.3, 0.7, npartial),
0.2,
noisep(*p * lambda).abs()); | random_line_split |
texture.rs | use enum_dispatch::enum_dispatch;
use log::error;
use crate::textures::dots::DotsTexture;
use crate::textures::constant::ConstantTexture;
use crate::textures::scaled::ScaleTexture;
use crate::core::interaction::SurfaceInteraction;
use crate::textures::imagemap::{ ImageTextureFloat, ImageTextureRGB};
use crate::textures::mix::MixTexture;
use crate::textures::biler::BilerTexture;
use crate::textures::uv::UVTexture;
use crate::textures::marble::MarbleTexture;
use crate::textures::wrinkled::WrinkledTexture;
use crate::textures::fbm::FBmTexture;
use crate::textures::windy::WindyTexture;
use crate::textures::checkerboard::{Checkerboard3DTexture, Checkerboard2DTexture};
use crate::core::geometry::vector::{Vector2f, Vector3f};
use crate::core::geometry::point::{Point2f, Point3f};
use crate::core::pbrt::{Float, INV_PI, INV2_PI, PI, lerp, clamp, log2};
use crate::core::transform::Transform;
use crate::core::geometry::geometry::{spherical_theta, spherical_phi};
use crate::core::spectrum::{Spectrum, RGBSpectrum, SampledSpectrum};
use std::ops::{Mul, Add, AddAssign, Div};
use crate::core::mipmap::Clampable;
use crate::core::paramset::TextureParams;
const NOISE_PERM_SIZE: usize = 256;
const NOISE_PERM: [usize; 2 * NOISE_PERM_SIZE] = [
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140,
36, 103, 30, 69, 142,
// Remainder of the noise permutation table
8, 99, 37, 240, 21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62,
94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174,
20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77,
146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55,
46, 245, 40, 244, 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76,
132, 187, 208, 89, 18, 169, 200, 196, 135, 130, 116, 188, 159, 86, 164, 100,
109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123, 5, 202, 38, 147,
118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28,
42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232,
178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12,
191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31,
181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254,
138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66,
215, 61, 156, 180, 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194,
233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6,
148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32,
57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74,
165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60,
211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25,
63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196, 135,
130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226,
250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59,
227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2,
44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19,
98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251,
34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249,
14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115,
121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72,
243, 141, 128, 195, 78, 66, 215, 61, 156, 180
];
pub type TextureFloat = Textures<Float, Float>;
pub type TextureSpec = Textures<Spectrum, Spectrum>;
#[enum_dispatch]
pub trait Texture<T2> {
fn evaluate(&self, s: &SurfaceInteraction) -> T2;
}
// All Texture generic types must implement these traits
pub trait SpectrumT<T>:
Copy +
Send +
Sync +
num::Zero +
Clampable +
AddAssign +
From<Float> +
From<SampledSpectrum> +
From<RGBSpectrum> +
Mul<T, Output = T> +
Mul<Float, Output = T> +
Div<Float, Output = T> +
Add<T, Output = T>{}
// Implementations for valid Texture generic types
impl SpectrumT<Float> for Float{}
impl SpectrumT<RGBSpectrum> for RGBSpectrum{}
impl SpectrumT<SampledSpectrum> for SampledSpectrum{}
#[enum_dispatch(Texture<T2>)]
pub enum Textures<T1, T2>
where T1: SpectrumT<T1> + Mul<T2, Output = T2>,
T2: SpectrumT<T2> + From<T1>
{
MarbleTexture,
UVTexture,
FBmTexture,
WrinkledTexture,
WindyTexture,
MixTexture(MixTexture<T2>),
BilerTexture(BilerTexture<T2>),
ScaleTexture(ScaleTexture<T1, T2>),
DotsTexture(DotsTexture<T2>),
ImageTextureFloat(ImageTextureFloat),
ImageTextureRGB(ImageTextureRGB),
ConstantTexture(ConstantTexture<T2>),
Checkerboard2DTexture(Checkerboard2DTexture<T2>),
Checkerboard3DTexture(Checkerboard3DTexture<T2>)
}
#[enum_dispatch]
pub trait TextureMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f;
}
#[enum_dispatch(TextureMapping2D)]
pub enum TextureMapping2Ds {
UVMapping2D,
PlannarMapping2D,
SphericalMapping2D,
CylindricalMapping2D
}
pub struct UVMapping2D {
su: Float,
sv: Float,
du: Float,
dv: Float,
}
impl UVMapping2D {
pub fn new(su: Float, sv: Float, du: Float, dv: Float) -> Self {
Self { su, sv, du, dv }
}
}
impl TextureMapping2D for UVMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
// Compute texture differentials for sphere (u, v) mapping
*dstdx = Vector2f::new(self.su * si.dudx.get(), self.sv * si.dvdx.get());
*dstdy = Vector2f::new(self.su * si.dudy.get(), self.sv * si.dvdy.get());
Point2f::new(self.su * si.uv[0] + self.du, self.sv * si.uv[1] + self.dv)
}
}
impl Default for UVMapping2D {
fn default() -> Self {
Self {
su: 1.0,
sv: 1.0,
du: 0.0,
dv: 0.0
}
}
}
pub struct SphericalMapping2D {
world_to_texture: Transform
}
impl SphericalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn sphere(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
let theta = spherical_theta(&vec);
let phi = spherical_phi(&vec);
Point2f::new(theta * INV_PI, phi * INV2_PI)
}
}
impl TextureMapping2D for SphericalMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let st = self.sphere(&si.p);
// Compute texture coordinate differentials for sphere (u, v) mapping
let delta = 0.1;
let st_deltax = self.sphere(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.sphere(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct CylindricalMapping2D {
world_to_texture: Transform
}
impl CylindricalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn cylinder(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
Point2f::new(PI + vec.y.atan2(vec.x) * INV2_PI, vec.z)
}
}
impl TextureMapping2D for CylindricalMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
let st = self.cylinder(&si.p);
// Compute texture coordinate differentials for cylinder (u, v) mapping
let delta = 0.1;
let st_deltax = self.cylinder(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.cylinder(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct PlannarMapping2D {
vs: Vector3f,
vt: Vector3f,
ds: Float,
dt: Float
}
impl PlannarMapping2D {
pub fn | (vs: &Vector3f, vt: &Vector3f,
ds: Float, dt: Float) -> Self {
Self {
ds,
dt,
vs: *vs,
vt: *vt
}
}
}
impl TextureMapping2D for PlannarMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let vec = Vector3f::from(si.p);
*dstdx = Vector2f::new(si.dpdx.get().dot(&self.vs), si.dpdx.get().dot(&self.vt));
*dstdy = Vector2f::new(si.dpdy.get().dot(&self.vs), si.dpdy.get().dot(&self.vt));
Point2f::new(self.ds + vec.dot(&self.vs), self.dt + vec.dot(&self.vt))
}
}
#[enum_dispatch]
pub trait TextureMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f;
}
#[enum_dispatch(TextureMapping3D)]
pub enum TextureMapping3Ds {
IdentityMapping3D
}
pub struct IdentityMapping3D {
world_to_texture: Transform
}
impl IdentityMapping3D {
pub fn new(w2t: &Transform) -> Self {
Self { world_to_texture: *w2t }
}
}
impl TextureMapping3D for IdentityMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f {
*dpdx = self.world_to_texture.transform_vector(&si.dpdx.get());
*dpdy = self.world_to_texture.transform_vector(&si.dpdy.get());
self.world_to_texture.transform_point(&si.p)
}
}
pub fn lanczos(mut x: Float, tau: Float) -> Float {
x = x.abs();
if x < 1.0e-5 { return 1.0; }
if x > 1.0 { return 0.0; }
x *= PI;
let s = (x * tau).sin() / ( x * tau);
let lanc = x.sin() / x;
s * lanc
}
pub fn noise(x: Float, y: Float, z: Float) -> Float {
let mut ix = x.floor() as usize;
let mut iy = y.floor() as usize;
let mut iz = z.floor() as usize;
let dx = x - ix as Float;
let dy = y - iy as Float;
let dz = z - iz as Float;
// Compute gradient weights
ix &= NOISE_PERM_SIZE - 1;
iy &= NOISE_PERM_SIZE - 1;
iz &= NOISE_PERM_SIZE - 1;
let w000 = grad(ix, iy, iz, dx, dy, dz);
let w100 = grad(ix + 1, iy, iz, dx - 1.0, dy, dz);
let w010 = grad(ix, iy + 1, iz, dx, dy - 1.0, dz);
let w110 = grad(ix + 1, iy + 1, iz, dx - 1.0, dy - 1.0, dz);
let w001 = grad(ix, iy, iz + 1, dx, dy, dz - 1.0);
let w101 = grad(ix + 1, iy, iz + 1, dx - 1.0, dy, dz - 1.0);
let w011 = grad(ix, iy + 1, iz + 1, dx, dy - 1.0, dz - 1.0);
let w111 = grad(ix + 1, iy + 1, iz + 1, dx - 1.0, dy - 1.0, dz - 1.0);
// Compute trilinear interpolation of weights
let wx = noise_weight(dx);
let wy = noise_weight(dy);
let wz = noise_weight(dz);
let x00 = lerp(wx, w000, w100);
let x10 = lerp(wx, w010, w110);
let x01 = lerp(wx, w001, w101);
let x11 = lerp(wx, w011, w111);
let y0 = lerp(wy, x00, x10);
let y1 = lerp(wy, x01, x11);
lerp(wz, y0, y1)
}
pub fn noisep(p: Point3f) -> Float {
noise(p.x, p.y, p.z)
}
fn grad(x: usize, y: usize, z: usize, dx: Float, dy: Float, dz: Float) -> Float {
let mut h = NOISE_PERM[NOISE_PERM[NOISE_PERM[x] + y] + z];
h &= 15;
let u = if h < 8 || h == 12 || h == 13 { dx } else { dy };
let v = if h < 4 || h == 12 || h == 13 { dy } else { dz };
(if (h & 1)!= 0 { -u } else { u }) + (if (h & 2)!= 0 { -v } else { v })
}
fn noise_weight(t: Float) -> Float {
let t3 = t * t * t;
let t4 = t3 * t;
6.0 * t4 * t - 15.0 * t4 + 10.0 * t3
}
pub fn fbm(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * log2(len2), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for fbm
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o * noisep(*p * lambda);
lambda *= 1.99;
o *= omega;
}
let npartial = n - nint as Float;
sum += o * smooth_step(0.3, 0.7, npartial) * noisep(*p * lambda);
sum
}
pub fn turbulence(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * len2.log2(), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for turbulence
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o + noisep(*p * lambda).abs();
lambda *= 1.99;
o *= omega;
}
// Account for contributions of clamped octaves in turbulence
let npartial = n - nint as Float;
sum += o + lerp(
smooth_step(0.3, 0.7, npartial),
0.2,
noisep(*p * lambda).abs());
for _i in nint..max_octaves {
sum += o * 0.2;
o *= omega;
}
sum
}
fn smooth_step(min: Float, max: Float, value: Float) -> Float {
let v = clamp((value - min) / (max - min), 0.0, 1.0);
v * v * (-2.0 * v + 3.0)
}
pub fn get_mapping2d(t2w: &Transform, tp: &mut TextureParams) -> TextureMapping2Ds {
let ty = tp.find_string("mapping", "uv");
match ty.as_str() {
"uv" => {
let su = tp.find_float("uscale", 1.0);
let sv = tp.find_float("vscale", 1.0);
let du = tp.find_float("udelta", 0.0);
let dv = tp.find_float("vdelta", 0.0);
UVMapping2D::new(su, sv, du, dv).into()
},
"planar" => {
let vs = tp.find_vector3f("v1", Vector3f::new(1.0, 0.0, 0.0));
let vt = tp.find_vector3f("v2", Vector3f::new(0.0, 1.0, 0.0));
let ds = tp.find_float("udelta", 0.0);
let dt = tp.find_float("vdelta", 0.0);
PlannarMapping2D::new(&vs, &vt, ds, dt).into()
}
"spherical" => SphericalMapping2D::new(&Transform::inverse(t2w)).into(),
"cylindrical" => CylindricalMapping2D::new(&Transform::inverse(t2w)).into(),
_ => {
error!("2D texture mapping \"{}\" unknown", ty);
UVMapping2D::new(1.0, 1.0, 0.0, 0.0).into()
}
}
} | new | identifier_name |
texture.rs | use enum_dispatch::enum_dispatch;
use log::error;
use crate::textures::dots::DotsTexture;
use crate::textures::constant::ConstantTexture;
use crate::textures::scaled::ScaleTexture;
use crate::core::interaction::SurfaceInteraction;
use crate::textures::imagemap::{ ImageTextureFloat, ImageTextureRGB};
use crate::textures::mix::MixTexture;
use crate::textures::biler::BilerTexture;
use crate::textures::uv::UVTexture;
use crate::textures::marble::MarbleTexture;
use crate::textures::wrinkled::WrinkledTexture;
use crate::textures::fbm::FBmTexture;
use crate::textures::windy::WindyTexture;
use crate::textures::checkerboard::{Checkerboard3DTexture, Checkerboard2DTexture};
use crate::core::geometry::vector::{Vector2f, Vector3f};
use crate::core::geometry::point::{Point2f, Point3f};
use crate::core::pbrt::{Float, INV_PI, INV2_PI, PI, lerp, clamp, log2};
use crate::core::transform::Transform;
use crate::core::geometry::geometry::{spherical_theta, spherical_phi};
use crate::core::spectrum::{Spectrum, RGBSpectrum, SampledSpectrum};
use std::ops::{Mul, Add, AddAssign, Div};
use crate::core::mipmap::Clampable;
use crate::core::paramset::TextureParams;
const NOISE_PERM_SIZE: usize = 256;
const NOISE_PERM: [usize; 2 * NOISE_PERM_SIZE] = [
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140,
36, 103, 30, 69, 142,
// Remainder of the noise permutation table
8, 99, 37, 240, 21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62,
94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174,
20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77,
146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55,
46, 245, 40, 244, 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76,
132, 187, 208, 89, 18, 169, 200, 196, 135, 130, 116, 188, 159, 86, 164, 100,
109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123, 5, 202, 38, 147,
118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28,
42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232,
178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12,
191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31,
181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254,
138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66,
215, 61, 156, 180, 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194,
233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6,
148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32,
57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74,
165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60,
211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25,
63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196, 135,
130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226,
250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59,
227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2,
44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19,
98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251,
34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249,
14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115,
121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72,
243, 141, 128, 195, 78, 66, 215, 61, 156, 180
];
pub type TextureFloat = Textures<Float, Float>;
pub type TextureSpec = Textures<Spectrum, Spectrum>;
#[enum_dispatch]
pub trait Texture<T2> {
fn evaluate(&self, s: &SurfaceInteraction) -> T2;
}
// All Texture generic types must implement these traits
pub trait SpectrumT<T>:
Copy +
Send +
Sync +
num::Zero +
Clampable +
AddAssign +
From<Float> +
From<SampledSpectrum> +
From<RGBSpectrum> +
Mul<T, Output = T> +
Mul<Float, Output = T> +
Div<Float, Output = T> +
Add<T, Output = T>{}
// Implementations for valid Texture generic types
impl SpectrumT<Float> for Float{}
impl SpectrumT<RGBSpectrum> for RGBSpectrum{}
impl SpectrumT<SampledSpectrum> for SampledSpectrum{}
#[enum_dispatch(Texture<T2>)]
pub enum Textures<T1, T2>
where T1: SpectrumT<T1> + Mul<T2, Output = T2>,
T2: SpectrumT<T2> + From<T1>
{
MarbleTexture,
UVTexture,
FBmTexture,
WrinkledTexture,
WindyTexture,
MixTexture(MixTexture<T2>),
BilerTexture(BilerTexture<T2>),
ScaleTexture(ScaleTexture<T1, T2>),
DotsTexture(DotsTexture<T2>),
ImageTextureFloat(ImageTextureFloat),
ImageTextureRGB(ImageTextureRGB),
ConstantTexture(ConstantTexture<T2>),
Checkerboard2DTexture(Checkerboard2DTexture<T2>),
Checkerboard3DTexture(Checkerboard3DTexture<T2>)
}
#[enum_dispatch]
pub trait TextureMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f;
}
#[enum_dispatch(TextureMapping2D)]
pub enum TextureMapping2Ds {
UVMapping2D,
PlannarMapping2D,
SphericalMapping2D,
CylindricalMapping2D
}
pub struct UVMapping2D {
su: Float,
sv: Float,
du: Float,
dv: Float,
}
impl UVMapping2D {
pub fn new(su: Float, sv: Float, du: Float, dv: Float) -> Self |
}
impl TextureMapping2D for UVMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
// Compute texture differentials for sphere (u, v) mapping
*dstdx = Vector2f::new(self.su * si.dudx.get(), self.sv * si.dvdx.get());
*dstdy = Vector2f::new(self.su * si.dudy.get(), self.sv * si.dvdy.get());
Point2f::new(self.su * si.uv[0] + self.du, self.sv * si.uv[1] + self.dv)
}
}
impl Default for UVMapping2D {
fn default() -> Self {
Self {
su: 1.0,
sv: 1.0,
du: 0.0,
dv: 0.0
}
}
}
pub struct SphericalMapping2D {
world_to_texture: Transform
}
impl SphericalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn sphere(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
let theta = spherical_theta(&vec);
let phi = spherical_phi(&vec);
Point2f::new(theta * INV_PI, phi * INV2_PI)
}
}
impl TextureMapping2D for SphericalMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let st = self.sphere(&si.p);
// Compute texture coordinate differentials for sphere (u, v) mapping
let delta = 0.1;
let st_deltax = self.sphere(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.sphere(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct CylindricalMapping2D {
world_to_texture: Transform
}
impl CylindricalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn cylinder(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
Point2f::new(PI + vec.y.atan2(vec.x) * INV2_PI, vec.z)
}
}
impl TextureMapping2D for CylindricalMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
let st = self.cylinder(&si.p);
// Compute texture coordinate differentials for cylinder (u, v) mapping
let delta = 0.1;
let st_deltax = self.cylinder(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.cylinder(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct PlannarMapping2D {
vs: Vector3f,
vt: Vector3f,
ds: Float,
dt: Float
}
impl PlannarMapping2D {
pub fn new(vs: &Vector3f, vt: &Vector3f,
ds: Float, dt: Float) -> Self {
Self {
ds,
dt,
vs: *vs,
vt: *vt
}
}
}
impl TextureMapping2D for PlannarMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let vec = Vector3f::from(si.p);
*dstdx = Vector2f::new(si.dpdx.get().dot(&self.vs), si.dpdx.get().dot(&self.vt));
*dstdy = Vector2f::new(si.dpdy.get().dot(&self.vs), si.dpdy.get().dot(&self.vt));
Point2f::new(self.ds + vec.dot(&self.vs), self.dt + vec.dot(&self.vt))
}
}
#[enum_dispatch]
pub trait TextureMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f;
}
#[enum_dispatch(TextureMapping3D)]
pub enum TextureMapping3Ds {
IdentityMapping3D
}
pub struct IdentityMapping3D {
world_to_texture: Transform
}
impl IdentityMapping3D {
pub fn new(w2t: &Transform) -> Self {
Self { world_to_texture: *w2t }
}
}
impl TextureMapping3D for IdentityMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f {
*dpdx = self.world_to_texture.transform_vector(&si.dpdx.get());
*dpdy = self.world_to_texture.transform_vector(&si.dpdy.get());
self.world_to_texture.transform_point(&si.p)
}
}
pub fn lanczos(mut x: Float, tau: Float) -> Float {
x = x.abs();
if x < 1.0e-5 { return 1.0; }
if x > 1.0 { return 0.0; }
x *= PI;
let s = (x * tau).sin() / ( x * tau);
let lanc = x.sin() / x;
s * lanc
}
pub fn noise(x: Float, y: Float, z: Float) -> Float {
let mut ix = x.floor() as usize;
let mut iy = y.floor() as usize;
let mut iz = z.floor() as usize;
let dx = x - ix as Float;
let dy = y - iy as Float;
let dz = z - iz as Float;
// Compute gradient weights
ix &= NOISE_PERM_SIZE - 1;
iy &= NOISE_PERM_SIZE - 1;
iz &= NOISE_PERM_SIZE - 1;
let w000 = grad(ix, iy, iz, dx, dy, dz);
let w100 = grad(ix + 1, iy, iz, dx - 1.0, dy, dz);
let w010 = grad(ix, iy + 1, iz, dx, dy - 1.0, dz);
let w110 = grad(ix + 1, iy + 1, iz, dx - 1.0, dy - 1.0, dz);
let w001 = grad(ix, iy, iz + 1, dx, dy, dz - 1.0);
let w101 = grad(ix + 1, iy, iz + 1, dx - 1.0, dy, dz - 1.0);
let w011 = grad(ix, iy + 1, iz + 1, dx, dy - 1.0, dz - 1.0);
let w111 = grad(ix + 1, iy + 1, iz + 1, dx - 1.0, dy - 1.0, dz - 1.0);
// Compute trilinear interpolation of weights
let wx = noise_weight(dx);
let wy = noise_weight(dy);
let wz = noise_weight(dz);
let x00 = lerp(wx, w000, w100);
let x10 = lerp(wx, w010, w110);
let x01 = lerp(wx, w001, w101);
let x11 = lerp(wx, w011, w111);
let y0 = lerp(wy, x00, x10);
let y1 = lerp(wy, x01, x11);
lerp(wz, y0, y1)
}
pub fn noisep(p: Point3f) -> Float {
noise(p.x, p.y, p.z)
}
fn grad(x: usize, y: usize, z: usize, dx: Float, dy: Float, dz: Float) -> Float {
let mut h = NOISE_PERM[NOISE_PERM[NOISE_PERM[x] + y] + z];
h &= 15;
let u = if h < 8 || h == 12 || h == 13 { dx } else { dy };
let v = if h < 4 || h == 12 || h == 13 { dy } else { dz };
(if (h & 1)!= 0 { -u } else { u }) + (if (h & 2)!= 0 { -v } else { v })
}
fn noise_weight(t: Float) -> Float {
let t3 = t * t * t;
let t4 = t3 * t;
6.0 * t4 * t - 15.0 * t4 + 10.0 * t3
}
pub fn fbm(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * log2(len2), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for fbm
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o * noisep(*p * lambda);
lambda *= 1.99;
o *= omega;
}
let npartial = n - nint as Float;
sum += o * smooth_step(0.3, 0.7, npartial) * noisep(*p * lambda);
sum
}
pub fn turbulence(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * len2.log2(), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for turbulence
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o + noisep(*p * lambda).abs();
lambda *= 1.99;
o *= omega;
}
// Account for contributions of clamped octaves in turbulence
let npartial = n - nint as Float;
sum += o + lerp(
smooth_step(0.3, 0.7, npartial),
0.2,
noisep(*p * lambda).abs());
for _i in nint..max_octaves {
sum += o * 0.2;
o *= omega;
}
sum
}
fn smooth_step(min: Float, max: Float, value: Float) -> Float {
let v = clamp((value - min) / (max - min), 0.0, 1.0);
v * v * (-2.0 * v + 3.0)
}
pub fn get_mapping2d(t2w: &Transform, tp: &mut TextureParams) -> TextureMapping2Ds {
let ty = tp.find_string("mapping", "uv");
match ty.as_str() {
"uv" => {
let su = tp.find_float("uscale", 1.0);
let sv = tp.find_float("vscale", 1.0);
let du = tp.find_float("udelta", 0.0);
let dv = tp.find_float("vdelta", 0.0);
UVMapping2D::new(su, sv, du, dv).into()
},
"planar" => {
let vs = tp.find_vector3f("v1", Vector3f::new(1.0, 0.0, 0.0));
let vt = tp.find_vector3f("v2", Vector3f::new(0.0, 1.0, 0.0));
let ds = tp.find_float("udelta", 0.0);
let dt = tp.find_float("vdelta", 0.0);
PlannarMapping2D::new(&vs, &vt, ds, dt).into()
}
"spherical" => SphericalMapping2D::new(&Transform::inverse(t2w)).into(),
"cylindrical" => CylindricalMapping2D::new(&Transform::inverse(t2w)).into(),
_ => {
error!("2D texture mapping \"{}\" unknown", ty);
UVMapping2D::new(1.0, 1.0, 0.0, 0.0).into()
}
}
} | {
Self { su, sv, du, dv }
} | identifier_body |
batches.rs | #[macro_use]
extern crate clap;
use clap::{Command, Arg, ArgAction};
use rusqlite as rs;
use std::path::Path;
use std::error::Error;
use std::vec::Vec;
use std::ffi::OsString;
use std::collections::HashSet;
use chrono::Local;
use chrono::DateTime;
use chrono::Datelike;
use glob::glob;
use count_write::CountWrite;
use zstd::stream::copy_decode;
use zstd::stream::write::Encoder;
use zstd::stream::read::Decoder;
use tar::Builder;
use tar::Header;
use std::io::{SeekFrom, Seek};
use std::fs::File;
use std::fs::OpenOptions;
use std::ffi::OsStr;
fn main() {
let matches = Command::new("Karmator maintance batch")
.version(crate_version!())
.author(crate_authors!("\n"))
.about("Handles the maintance work for karmator")
.subcommand(
Command::new("runs")
.about("Detect runs of votes")
.arg(
Arg::new("min")
.short('m')
.help("Min count of runs before outputting")
.default_value("20")
.value_parser(value_parser!(u32).range(1..)),
)
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the runs detected")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("FILE")
.help("Database file to operate on")
.required(true),
),
)
.subcommand(
Command::new("prune")
.about("Prune and pack up old backups")
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("skip")
.long("skip")
.help("Skip compacting old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("BACKUPS")
.help("Backup directory to prune")
.required(true),
),
)
.get_matches();
match matches.subcommand() {
Some(("runs", m)) => {
let filename = m.get_one::<String>("FILE").unwrap();
let min = m.get_one::<u32>("min").unwrap();
let delete = m.contains_id("delete");
run(filename, *min, delete)
},
Some(("prune", m)) => {
let directory = m.get_one::<String>("BACKUPS").unwrap();
let delete = m.contains_id("delete");
let skip = m.contains_id("skip");
prune(directory, delete, skip)
}
_ => {
println!("meh do --help yourself");
Ok(())
},
}.unwrap();
}
#[derive(Debug, Clone)]
struct Vote {
id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
}
impl PartialEq for Vote {
fn eq(&self, other: &Self) -> bool {
(self.by_whom_name == other.by_whom_name)
&& (self.for_what_name == other.for_what_name)
&& (self.amount == other.amount)
}
}
#[derive(Debug)]
struct RunVal {
oldest_id: i32,
newest_id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
count: u32,
}
fn get_run_val(srv: &Vote, pv: &Vote, count: u32) -> RunVal {
RunVal {
oldest_id: srv.id,
newest_id: pv.id,
by_whom_name: srv.by_whom_name.clone(),
for_what_name: srv.for_what_name.clone(),
amount: srv.amount,
count: count,
}
}
fn str_amount(amount: i8) -> &'static str {
match amount {
-1 => "Down",
0 => "Side",
1 => "Up",
_ => panic!("invalid amount"),
}
}
fn run(filename: &str, min: u32, delete: bool) -> Result<(), Box<dyn Error>> {
let conn =
rs::Connection::open_with_flags(Path::new(filename), rs::OpenFlags::SQLITE_OPEN_READ_WRITE)
.expect(&format!("Connection error: {}", filename));
let mut stmt = conn.prepare("SELECT id, by_whom_name, for_what_name, amount FROM votes")?;
let vote_iter = stmt.query_map(rs::params![], |row| {
Ok(Vote {
id: row.get(0)?,
by_whom_name: row.get(1)?,
for_what_name: row.get(2)?,
amount: row.get(3)?,
})
})?;
// Time to compute the run
let mut runs = Vec::new();
let mut start_run_vote = None;
let mut prev_vote = None;
let mut count = 0;
for rvote in vote_iter {
let vote = rvote?;
match (&start_run_vote, &prev_vote) {
(None, None) => {
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
(Some(srv), Some(pv)) => {
if pv == &vote {
// Current vote + prev vote are the same, inc prev vote
prev_vote = Some(vote);
count += 1;
} else {
// Current vote!= prev vote, record the run, and reset
runs.push(get_run_val(srv, pv, count));
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
}
(_, _) => panic!("Shouldn't happen"),
};
}
// Record the last run
runs.push(get_run_val(
&start_run_vote.unwrap(),
&prev_vote.unwrap(),
count,
));
if delete {
// Scan and delete the offenders
let mut stmt = conn.prepare("DELETE FROM votes WHERE id >=? and id <=?")?;
for r in &runs {
if r.count > min {
let deleted = stmt.execute(rs::params![r.oldest_id, r.newest_id])?;
if (r.count as usize)!= deleted {
panic!("Expected: {} to be deleted, got {}", r.count, deleted);
}
}
}
} else {
// Now we can scan for anything that > min and print them
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
"start_id", "end_id", "by_whom_name", "for_what_name", "amount", "count"
);
for r in &runs {
if r.count > min {
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
r.oldest_id,
r.newest_id,
r.by_whom_name,
r.for_what_name,
str_amount(r.amount),
r.count
);
}
}
}
Ok(())
}
fn prune(directory: &str, delete: bool, skip: bool) -> Result<(), Box<dyn Error>> {
let now: DateTime<Local> = Local::now();
let year = now.year();
let month = now.month();
// Fetch a set of all of the files
let all_files = collect_glob(directory, "/db-backup-????-??-??.sqlite.zst");
// Fetch a set of all of the file in the current month+year
let current_month_year = collect_glob(directory, &format!("/db-backup-{}-{:02}-??.sqlite.zst", year, month));
// Fetch a set of all of the file that is in previous year + first of the month
let previous_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year - 1));
// Fetch a set of all of the file that is in current year + first of the month
let current_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year));
// Calculate the initial set of files to prune
let mut delete_files = delete_set(&all_files, vec![¤t_month_year, &previous_first_month, ¤t_first_month]);
// Compact pfm + cfm into their years
if skip {
println!("Compacting: Skipped");
} else {
if previous_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year-1);
print_compact(&previous_first_month, &tarfile)?;
delete_files.extend(previous_first_month.iter().map(|e| e.clone()));
}
if current_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year);
print_compact(¤t_first_month, &tarfile)?;
delete_files.extend(current_first_month.iter().map(|e| e.clone()));
}
}
// List the files we are going to delete
print_delete(&delete_files, delete)?;
Ok(())
}
fn collect_glob(directory: &str, glob_str: &str) -> HashSet<OsString> {
glob(&(directory.to_string() + glob_str)).unwrap()
.flatten()
.map(|e| e.into_os_string())
.collect::<HashSet<OsString>>()
}
fn delete_set(all: &HashSet<OsString>, keep: Vec<&HashSet<OsString>>) -> HashSet<OsString> {
let mut delete = all.clone();
for hs in keep {
let out = delete.difference(&hs).map(|e| e.clone()).collect();
delete = out;
}
delete
}
fn | (compact: &HashSet<OsString>, filename: &str) -> Result<(), Box<dyn Error>> {
let tarfile = OpenOptions::new()
.write(true)
.create_new(true)
.open(filename);
let mut tar = Builder::new(Encoder::new(tarfile?, 21)?.auto_finish());
for f in compact.iter() {
let mut file = File::open(f)?;
let filename = Path::new(f).file_name().unwrap();
let filesize = {
let mut count = CountWrite::from(std::io::sink());
copy_decode(&file, &mut count)?;
count.count()
};
let mut header = Header::new_gnu();
header.set_path(filename)?;
header.set_size(filesize);
header.set_cksum();
file.seek(SeekFrom::Start(0))?;
tar.append(
&header,
Decoder::new(std::fs::File::open(f)?)?
)?;
}
tar.finish()?;
Ok(())
}
fn print_compact(to_compact: &HashSet<OsString>, tarfile: &str) -> Result<(), Box<dyn Error>> {
println!("Compacting: {}", tarfile);
let mut print = to_compact.iter()
.map(|e| Path::new(e).file_name().unwrap())
.collect::<Vec<&OsStr>>();
print.sort();
for i in print.iter() {
println!("\t{:?}", i);
}
compact(&to_compact, &tarfile)?;
Ok(())
}
fn print_delete(to_delete: &HashSet<OsString>, delete: bool) -> Result<(), Box<dyn Error>> {
println!("Deleting:");
let mut print = to_delete.iter().collect::<Vec<&OsString>>();
print.sort();
for i in print.iter() {
let path = Path::new(i);
println!("\t{:?}", path.file_name().unwrap());
if delete {
std::fs::remove_file(path)?;
}
}
Ok(())
}
| compact | identifier_name |
batches.rs | #[macro_use]
extern crate clap;
use clap::{Command, Arg, ArgAction};
use rusqlite as rs;
use std::path::Path;
use std::error::Error;
use std::vec::Vec;
use std::ffi::OsString;
use std::collections::HashSet;
use chrono::Local;
use chrono::DateTime;
use chrono::Datelike;
use glob::glob;
use count_write::CountWrite;
use zstd::stream::copy_decode;
use zstd::stream::write::Encoder;
use zstd::stream::read::Decoder;
use tar::Builder;
use tar::Header;
use std::io::{SeekFrom, Seek};
use std::fs::File;
use std::fs::OpenOptions;
use std::ffi::OsStr;
fn main() {
let matches = Command::new("Karmator maintance batch")
.version(crate_version!())
.author(crate_authors!("\n"))
.about("Handles the maintance work for karmator")
.subcommand(
Command::new("runs")
.about("Detect runs of votes")
.arg(
Arg::new("min")
.short('m')
.help("Min count of runs before outputting")
.default_value("20")
.value_parser(value_parser!(u32).range(1..)),
)
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the runs detected")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("FILE")
.help("Database file to operate on")
.required(true),
),
)
.subcommand(
Command::new("prune")
.about("Prune and pack up old backups")
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("skip")
.long("skip")
.help("Skip compacting old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("BACKUPS")
.help("Backup directory to prune")
.required(true),
),
)
.get_matches();
match matches.subcommand() {
Some(("runs", m)) => {
let filename = m.get_one::<String>("FILE").unwrap();
let min = m.get_one::<u32>("min").unwrap();
let delete = m.contains_id("delete");
run(filename, *min, delete)
},
Some(("prune", m)) => {
let directory = m.get_one::<String>("BACKUPS").unwrap();
let delete = m.contains_id("delete");
let skip = m.contains_id("skip");
prune(directory, delete, skip)
}
_ => {
println!("meh do --help yourself");
Ok(())
},
}.unwrap();
}
#[derive(Debug, Clone)]
struct Vote {
id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
}
impl PartialEq for Vote {
fn eq(&self, other: &Self) -> bool {
(self.by_whom_name == other.by_whom_name)
&& (self.for_what_name == other.for_what_name)
&& (self.amount == other.amount)
}
}
#[derive(Debug)]
struct RunVal {
oldest_id: i32,
newest_id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
count: u32,
}
fn get_run_val(srv: &Vote, pv: &Vote, count: u32) -> RunVal {
RunVal {
oldest_id: srv.id,
newest_id: pv.id,
by_whom_name: srv.by_whom_name.clone(),
for_what_name: srv.for_what_name.clone(),
amount: srv.amount,
count: count,
}
}
fn str_amount(amount: i8) -> &'static str {
match amount {
-1 => "Down",
0 => "Side",
1 => "Up",
_ => panic!("invalid amount"),
}
}
fn run(filename: &str, min: u32, delete: bool) -> Result<(), Box<dyn Error>> {
let conn =
rs::Connection::open_with_flags(Path::new(filename), rs::OpenFlags::SQLITE_OPEN_READ_WRITE)
.expect(&format!("Connection error: {}", filename));
let mut stmt = conn.prepare("SELECT id, by_whom_name, for_what_name, amount FROM votes")?;
let vote_iter = stmt.query_map(rs::params![], |row| {
Ok(Vote {
id: row.get(0)?,
by_whom_name: row.get(1)?,
for_what_name: row.get(2)?,
amount: row.get(3)?,
})
})?;
// Time to compute the run
let mut runs = Vec::new();
let mut start_run_vote = None;
let mut prev_vote = None;
let mut count = 0;
for rvote in vote_iter {
let vote = rvote?;
match (&start_run_vote, &prev_vote) {
(None, None) => {
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
(Some(srv), Some(pv)) => {
if pv == &vote {
// Current vote + prev vote are the same, inc prev vote
prev_vote = Some(vote);
count += 1;
} else {
// Current vote!= prev vote, record the run, and reset
runs.push(get_run_val(srv, pv, count));
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
}
(_, _) => panic!("Shouldn't happen"),
};
}
// Record the last run
runs.push(get_run_val(
&start_run_vote.unwrap(),
&prev_vote.unwrap(),
count,
));
if delete {
// Scan and delete the offenders
let mut stmt = conn.prepare("DELETE FROM votes WHERE id >=? and id <=?")?;
for r in &runs {
if r.count > min {
let deleted = stmt.execute(rs::params![r.oldest_id, r.newest_id])?;
if (r.count as usize)!= deleted {
panic!("Expected: {} to be deleted, got {}", r.count, deleted);
}
}
}
} else {
// Now we can scan for anything that > min and print them
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
"start_id", "end_id", "by_whom_name", "for_what_name", "amount", "count"
);
for r in &runs {
if r.count > min {
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
r.oldest_id,
r.newest_id,
r.by_whom_name,
r.for_what_name,
str_amount(r.amount),
r.count
);
}
}
}
Ok(())
}
fn prune(directory: &str, delete: bool, skip: bool) -> Result<(), Box<dyn Error>> {
let now: DateTime<Local> = Local::now();
let year = now.year();
let month = now.month();
// Fetch a set of all of the files
let all_files = collect_glob(directory, "/db-backup-????-??-??.sqlite.zst");
// Fetch a set of all of the file in the current month+year
let current_month_year = collect_glob(directory, &format!("/db-backup-{}-{:02}-??.sqlite.zst", year, month));
// Fetch a set of all of the file that is in previous year + first of the month
let previous_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year - 1));
// Fetch a set of all of the file that is in current year + first of the month
let current_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year));
// Calculate the initial set of files to prune
let mut delete_files = delete_set(&all_files, vec![¤t_month_year, &previous_first_month, ¤t_first_month]);
// Compact pfm + cfm into their years
if skip {
println!("Compacting: Skipped");
} else {
if previous_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year-1);
print_compact(&previous_first_month, &tarfile)?;
delete_files.extend(previous_first_month.iter().map(|e| e.clone()));
}
if current_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year);
print_compact(¤t_first_month, &tarfile)?;
delete_files.extend(current_first_month.iter().map(|e| e.clone()));
}
}
// List the files we are going to delete
print_delete(&delete_files, delete)?;
Ok(())
}
fn collect_glob(directory: &str, glob_str: &str) -> HashSet<OsString> |
fn delete_set(all: &HashSet<OsString>, keep: Vec<&HashSet<OsString>>) -> HashSet<OsString> {
let mut delete = all.clone();
for hs in keep {
let out = delete.difference(&hs).map(|e| e.clone()).collect();
delete = out;
}
delete
}
fn compact(compact: &HashSet<OsString>, filename: &str) -> Result<(), Box<dyn Error>> {
let tarfile = OpenOptions::new()
.write(true)
.create_new(true)
.open(filename);
let mut tar = Builder::new(Encoder::new(tarfile?, 21)?.auto_finish());
for f in compact.iter() {
let mut file = File::open(f)?;
let filename = Path::new(f).file_name().unwrap();
let filesize = {
let mut count = CountWrite::from(std::io::sink());
copy_decode(&file, &mut count)?;
count.count()
};
let mut header = Header::new_gnu();
header.set_path(filename)?;
header.set_size(filesize);
header.set_cksum();
file.seek(SeekFrom::Start(0))?;
tar.append(
&header,
Decoder::new(std::fs::File::open(f)?)?
)?;
}
tar.finish()?;
Ok(())
}
fn print_compact(to_compact: &HashSet<OsString>, tarfile: &str) -> Result<(), Box<dyn Error>> {
println!("Compacting: {}", tarfile);
let mut print = to_compact.iter()
.map(|e| Path::new(e).file_name().unwrap())
.collect::<Vec<&OsStr>>();
print.sort();
for i in print.iter() {
println!("\t{:?}", i);
}
compact(&to_compact, &tarfile)?;
Ok(())
}
fn print_delete(to_delete: &HashSet<OsString>, delete: bool) -> Result<(), Box<dyn Error>> {
println!("Deleting:");
let mut print = to_delete.iter().collect::<Vec<&OsString>>();
print.sort();
for i in print.iter() {
let path = Path::new(i);
println!("\t{:?}", path.file_name().unwrap());
if delete {
std::fs::remove_file(path)?;
}
}
Ok(())
}
| {
glob(&(directory.to_string() + glob_str)).unwrap()
.flatten()
.map(|e| e.into_os_string())
.collect::<HashSet<OsString>>()
} | identifier_body |
batches.rs | #[macro_use]
extern crate clap;
use clap::{Command, Arg, ArgAction};
use rusqlite as rs;
use std::path::Path;
use std::error::Error;
use std::vec::Vec;
use std::ffi::OsString;
use std::collections::HashSet;
use chrono::Local;
use chrono::DateTime;
use chrono::Datelike;
use glob::glob;
use count_write::CountWrite;
use zstd::stream::copy_decode;
use zstd::stream::write::Encoder;
use zstd::stream::read::Decoder;
use tar::Builder;
use tar::Header;
use std::io::{SeekFrom, Seek};
use std::fs::File;
use std::fs::OpenOptions;
use std::ffi::OsStr;
fn main() {
let matches = Command::new("Karmator maintance batch")
.version(crate_version!())
.author(crate_authors!("\n"))
.about("Handles the maintance work for karmator")
.subcommand(
Command::new("runs")
.about("Detect runs of votes")
.arg(
Arg::new("min")
.short('m')
.help("Min count of runs before outputting")
.default_value("20")
.value_parser(value_parser!(u32).range(1..)),
)
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the runs detected")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("FILE")
.help("Database file to operate on")
.required(true),
),
)
.subcommand(
Command::new("prune")
.about("Prune and pack up old backups")
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("skip")
.long("skip")
.help("Skip compacting old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("BACKUPS")
.help("Backup directory to prune")
.required(true),
),
)
.get_matches();
match matches.subcommand() {
Some(("runs", m)) => { | let delete = m.contains_id("delete");
run(filename, *min, delete)
},
Some(("prune", m)) => {
let directory = m.get_one::<String>("BACKUPS").unwrap();
let delete = m.contains_id("delete");
let skip = m.contains_id("skip");
prune(directory, delete, skip)
}
_ => {
println!("meh do --help yourself");
Ok(())
},
}.unwrap();
}
#[derive(Debug, Clone)]
struct Vote {
id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
}
impl PartialEq for Vote {
fn eq(&self, other: &Self) -> bool {
(self.by_whom_name == other.by_whom_name)
&& (self.for_what_name == other.for_what_name)
&& (self.amount == other.amount)
}
}
#[derive(Debug)]
struct RunVal {
oldest_id: i32,
newest_id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
count: u32,
}
fn get_run_val(srv: &Vote, pv: &Vote, count: u32) -> RunVal {
RunVal {
oldest_id: srv.id,
newest_id: pv.id,
by_whom_name: srv.by_whom_name.clone(),
for_what_name: srv.for_what_name.clone(),
amount: srv.amount,
count: count,
}
}
fn str_amount(amount: i8) -> &'static str {
match amount {
-1 => "Down",
0 => "Side",
1 => "Up",
_ => panic!("invalid amount"),
}
}
fn run(filename: &str, min: u32, delete: bool) -> Result<(), Box<dyn Error>> {
let conn =
rs::Connection::open_with_flags(Path::new(filename), rs::OpenFlags::SQLITE_OPEN_READ_WRITE)
.expect(&format!("Connection error: {}", filename));
let mut stmt = conn.prepare("SELECT id, by_whom_name, for_what_name, amount FROM votes")?;
let vote_iter = stmt.query_map(rs::params![], |row| {
Ok(Vote {
id: row.get(0)?,
by_whom_name: row.get(1)?,
for_what_name: row.get(2)?,
amount: row.get(3)?,
})
})?;
// Time to compute the run
let mut runs = Vec::new();
let mut start_run_vote = None;
let mut prev_vote = None;
let mut count = 0;
for rvote in vote_iter {
let vote = rvote?;
match (&start_run_vote, &prev_vote) {
(None, None) => {
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
(Some(srv), Some(pv)) => {
if pv == &vote {
// Current vote + prev vote are the same, inc prev vote
prev_vote = Some(vote);
count += 1;
} else {
// Current vote!= prev vote, record the run, and reset
runs.push(get_run_val(srv, pv, count));
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
}
(_, _) => panic!("Shouldn't happen"),
};
}
// Record the last run
runs.push(get_run_val(
&start_run_vote.unwrap(),
&prev_vote.unwrap(),
count,
));
if delete {
// Scan and delete the offenders
let mut stmt = conn.prepare("DELETE FROM votes WHERE id >=? and id <=?")?;
for r in &runs {
if r.count > min {
let deleted = stmt.execute(rs::params![r.oldest_id, r.newest_id])?;
if (r.count as usize)!= deleted {
panic!("Expected: {} to be deleted, got {}", r.count, deleted);
}
}
}
} else {
// Now we can scan for anything that > min and print them
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
"start_id", "end_id", "by_whom_name", "for_what_name", "amount", "count"
);
for r in &runs {
if r.count > min {
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
r.oldest_id,
r.newest_id,
r.by_whom_name,
r.for_what_name,
str_amount(r.amount),
r.count
);
}
}
}
Ok(())
}
fn prune(directory: &str, delete: bool, skip: bool) -> Result<(), Box<dyn Error>> {
let now: DateTime<Local> = Local::now();
let year = now.year();
let month = now.month();
// Fetch a set of all of the files
let all_files = collect_glob(directory, "/db-backup-????-??-??.sqlite.zst");
// Fetch a set of all of the file in the current month+year
let current_month_year = collect_glob(directory, &format!("/db-backup-{}-{:02}-??.sqlite.zst", year, month));
// Fetch a set of all of the file that is in previous year + first of the month
let previous_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year - 1));
// Fetch a set of all of the file that is in current year + first of the month
let current_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year));
// Calculate the initial set of files to prune
let mut delete_files = delete_set(&all_files, vec![¤t_month_year, &previous_first_month, ¤t_first_month]);
// Compact pfm + cfm into their years
if skip {
println!("Compacting: Skipped");
} else {
if previous_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year-1);
print_compact(&previous_first_month, &tarfile)?;
delete_files.extend(previous_first_month.iter().map(|e| e.clone()));
}
if current_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year);
print_compact(¤t_first_month, &tarfile)?;
delete_files.extend(current_first_month.iter().map(|e| e.clone()));
}
}
// List the files we are going to delete
print_delete(&delete_files, delete)?;
Ok(())
}
fn collect_glob(directory: &str, glob_str: &str) -> HashSet<OsString> {
glob(&(directory.to_string() + glob_str)).unwrap()
.flatten()
.map(|e| e.into_os_string())
.collect::<HashSet<OsString>>()
}
fn delete_set(all: &HashSet<OsString>, keep: Vec<&HashSet<OsString>>) -> HashSet<OsString> {
let mut delete = all.clone();
for hs in keep {
let out = delete.difference(&hs).map(|e| e.clone()).collect();
delete = out;
}
delete
}
fn compact(compact: &HashSet<OsString>, filename: &str) -> Result<(), Box<dyn Error>> {
let tarfile = OpenOptions::new()
.write(true)
.create_new(true)
.open(filename);
let mut tar = Builder::new(Encoder::new(tarfile?, 21)?.auto_finish());
for f in compact.iter() {
let mut file = File::open(f)?;
let filename = Path::new(f).file_name().unwrap();
let filesize = {
let mut count = CountWrite::from(std::io::sink());
copy_decode(&file, &mut count)?;
count.count()
};
let mut header = Header::new_gnu();
header.set_path(filename)?;
header.set_size(filesize);
header.set_cksum();
file.seek(SeekFrom::Start(0))?;
tar.append(
&header,
Decoder::new(std::fs::File::open(f)?)?
)?;
}
tar.finish()?;
Ok(())
}
fn print_compact(to_compact: &HashSet<OsString>, tarfile: &str) -> Result<(), Box<dyn Error>> {
println!("Compacting: {}", tarfile);
let mut print = to_compact.iter()
.map(|e| Path::new(e).file_name().unwrap())
.collect::<Vec<&OsStr>>();
print.sort();
for i in print.iter() {
println!("\t{:?}", i);
}
compact(&to_compact, &tarfile)?;
Ok(())
}
fn print_delete(to_delete: &HashSet<OsString>, delete: bool) -> Result<(), Box<dyn Error>> {
println!("Deleting:");
let mut print = to_delete.iter().collect::<Vec<&OsString>>();
print.sort();
for i in print.iter() {
let path = Path::new(i);
println!("\t{:?}", path.file_name().unwrap());
if delete {
std::fs::remove_file(path)?;
}
}
Ok(())
} | let filename = m.get_one::<String>("FILE").unwrap();
let min = m.get_one::<u32>("min").unwrap(); | random_line_split |
batches.rs | #[macro_use]
extern crate clap;
use clap::{Command, Arg, ArgAction};
use rusqlite as rs;
use std::path::Path;
use std::error::Error;
use std::vec::Vec;
use std::ffi::OsString;
use std::collections::HashSet;
use chrono::Local;
use chrono::DateTime;
use chrono::Datelike;
use glob::glob;
use count_write::CountWrite;
use zstd::stream::copy_decode;
use zstd::stream::write::Encoder;
use zstd::stream::read::Decoder;
use tar::Builder;
use tar::Header;
use std::io::{SeekFrom, Seek};
use std::fs::File;
use std::fs::OpenOptions;
use std::ffi::OsStr;
fn main() {
let matches = Command::new("Karmator maintance batch")
.version(crate_version!())
.author(crate_authors!("\n"))
.about("Handles the maintance work for karmator")
.subcommand(
Command::new("runs")
.about("Detect runs of votes")
.arg(
Arg::new("min")
.short('m')
.help("Min count of runs before outputting")
.default_value("20")
.value_parser(value_parser!(u32).range(1..)),
)
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the runs detected")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("FILE")
.help("Database file to operate on")
.required(true),
),
)
.subcommand(
Command::new("prune")
.about("Prune and pack up old backups")
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("skip")
.long("skip")
.help("Skip compacting old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("BACKUPS")
.help("Backup directory to prune")
.required(true),
),
)
.get_matches();
match matches.subcommand() {
Some(("runs", m)) => {
let filename = m.get_one::<String>("FILE").unwrap();
let min = m.get_one::<u32>("min").unwrap();
let delete = m.contains_id("delete");
run(filename, *min, delete)
},
Some(("prune", m)) => {
let directory = m.get_one::<String>("BACKUPS").unwrap();
let delete = m.contains_id("delete");
let skip = m.contains_id("skip");
prune(directory, delete, skip)
}
_ => {
println!("meh do --help yourself");
Ok(())
},
}.unwrap();
}
#[derive(Debug, Clone)]
struct Vote {
id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
}
impl PartialEq for Vote {
fn eq(&self, other: &Self) -> bool {
(self.by_whom_name == other.by_whom_name)
&& (self.for_what_name == other.for_what_name)
&& (self.amount == other.amount)
}
}
#[derive(Debug)]
struct RunVal {
oldest_id: i32,
newest_id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
count: u32,
}
fn get_run_val(srv: &Vote, pv: &Vote, count: u32) -> RunVal {
RunVal {
oldest_id: srv.id,
newest_id: pv.id,
by_whom_name: srv.by_whom_name.clone(),
for_what_name: srv.for_what_name.clone(),
amount: srv.amount,
count: count,
}
}
fn str_amount(amount: i8) -> &'static str {
match amount {
-1 => "Down",
0 => "Side",
1 => "Up",
_ => panic!("invalid amount"),
}
}
fn run(filename: &str, min: u32, delete: bool) -> Result<(), Box<dyn Error>> {
let conn =
rs::Connection::open_with_flags(Path::new(filename), rs::OpenFlags::SQLITE_OPEN_READ_WRITE)
.expect(&format!("Connection error: {}", filename));
let mut stmt = conn.prepare("SELECT id, by_whom_name, for_what_name, amount FROM votes")?;
let vote_iter = stmt.query_map(rs::params![], |row| {
Ok(Vote {
id: row.get(0)?,
by_whom_name: row.get(1)?,
for_what_name: row.get(2)?,
amount: row.get(3)?,
})
})?;
// Time to compute the run
let mut runs = Vec::new();
let mut start_run_vote = None;
let mut prev_vote = None;
let mut count = 0;
for rvote in vote_iter {
let vote = rvote?;
match (&start_run_vote, &prev_vote) {
(None, None) => {
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
(Some(srv), Some(pv)) => {
if pv == &vote {
// Current vote + prev vote are the same, inc prev vote
prev_vote = Some(vote);
count += 1;
} else {
// Current vote!= prev vote, record the run, and reset
runs.push(get_run_val(srv, pv, count));
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
}
(_, _) => panic!("Shouldn't happen"),
};
}
// Record the last run
runs.push(get_run_val(
&start_run_vote.unwrap(),
&prev_vote.unwrap(),
count,
));
if delete {
// Scan and delete the offenders
let mut stmt = conn.prepare("DELETE FROM votes WHERE id >=? and id <=?")?;
for r in &runs {
if r.count > min {
let deleted = stmt.execute(rs::params![r.oldest_id, r.newest_id])?;
if (r.count as usize)!= deleted {
panic!("Expected: {} to be deleted, got {}", r.count, deleted);
}
}
}
} else {
// Now we can scan for anything that > min and print them
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
"start_id", "end_id", "by_whom_name", "for_what_name", "amount", "count"
);
for r in &runs {
if r.count > min {
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
r.oldest_id,
r.newest_id,
r.by_whom_name,
r.for_what_name,
str_amount(r.amount),
r.count
);
}
}
}
Ok(())
}
fn prune(directory: &str, delete: bool, skip: bool) -> Result<(), Box<dyn Error>> {
let now: DateTime<Local> = Local::now();
let year = now.year();
let month = now.month();
// Fetch a set of all of the files
let all_files = collect_glob(directory, "/db-backup-????-??-??.sqlite.zst");
// Fetch a set of all of the file in the current month+year
let current_month_year = collect_glob(directory, &format!("/db-backup-{}-{:02}-??.sqlite.zst", year, month));
// Fetch a set of all of the file that is in previous year + first of the month
let previous_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year - 1));
// Fetch a set of all of the file that is in current year + first of the month
let current_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year));
// Calculate the initial set of files to prune
let mut delete_files = delete_set(&all_files, vec![¤t_month_year, &previous_first_month, ¤t_first_month]);
// Compact pfm + cfm into their years
if skip {
println!("Compacting: Skipped");
} else {
if previous_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year-1);
print_compact(&previous_first_month, &tarfile)?;
delete_files.extend(previous_first_month.iter().map(|e| e.clone()));
}
if current_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year);
print_compact(¤t_first_month, &tarfile)?;
delete_files.extend(current_first_month.iter().map(|e| e.clone()));
}
}
// List the files we are going to delete
print_delete(&delete_files, delete)?;
Ok(())
}
fn collect_glob(directory: &str, glob_str: &str) -> HashSet<OsString> {
glob(&(directory.to_string() + glob_str)).unwrap()
.flatten()
.map(|e| e.into_os_string())
.collect::<HashSet<OsString>>()
}
fn delete_set(all: &HashSet<OsString>, keep: Vec<&HashSet<OsString>>) -> HashSet<OsString> {
let mut delete = all.clone();
for hs in keep {
let out = delete.difference(&hs).map(|e| e.clone()).collect();
delete = out;
}
delete
}
fn compact(compact: &HashSet<OsString>, filename: &str) -> Result<(), Box<dyn Error>> {
let tarfile = OpenOptions::new()
.write(true)
.create_new(true)
.open(filename);
let mut tar = Builder::new(Encoder::new(tarfile?, 21)?.auto_finish());
for f in compact.iter() {
let mut file = File::open(f)?;
let filename = Path::new(f).file_name().unwrap();
let filesize = {
let mut count = CountWrite::from(std::io::sink());
copy_decode(&file, &mut count)?;
count.count()
};
let mut header = Header::new_gnu();
header.set_path(filename)?;
header.set_size(filesize);
header.set_cksum();
file.seek(SeekFrom::Start(0))?;
tar.append(
&header,
Decoder::new(std::fs::File::open(f)?)?
)?;
}
tar.finish()?;
Ok(())
}
fn print_compact(to_compact: &HashSet<OsString>, tarfile: &str) -> Result<(), Box<dyn Error>> {
println!("Compacting: {}", tarfile);
let mut print = to_compact.iter()
.map(|e| Path::new(e).file_name().unwrap())
.collect::<Vec<&OsStr>>();
print.sort();
for i in print.iter() {
println!("\t{:?}", i);
}
compact(&to_compact, &tarfile)?;
Ok(())
}
fn print_delete(to_delete: &HashSet<OsString>, delete: bool) -> Result<(), Box<dyn Error>> {
println!("Deleting:");
let mut print = to_delete.iter().collect::<Vec<&OsString>>();
print.sort();
for i in print.iter() {
let path = Path::new(i);
println!("\t{:?}", path.file_name().unwrap());
if delete |
}
Ok(())
}
| {
std::fs::remove_file(path)?;
} | conditional_block |
data_farmer.rs | use lazy_static::lazy_static;
/// In charge of cleaning, processing, and managing data. I couldn't think of
/// a better name for the file. Since I called data collection "harvesting",
/// then this is the farmer I guess.
///
/// Essentially the main goal is to shift the initial calculation and distribution
/// of joiner points and data to one central location that will only do it
/// *once* upon receiving the data --- as opposed to doing it on canvas draw,
/// which will be a costly process.
///
/// This will also handle the *cleaning* of stale data. That should be done
/// in some manner (timer on another thread, some loop) that will occasionally
/// call the purging function. Failure to do so *will* result in a growing
/// memory usage and higher CPU usage - you will be trying to process more and
/// more points as this is used!
use std::{time::Instant, vec::Vec};
use crate::{
data_harvester::{battery_harvester, cpu, disks, mem, network, processes, temperature, Data},
utils::gen_util::get_simple_byte_values,
};
use regex::Regex;
pub type TimeOffset = f64;
pub type Value = f64;
#[derive(Debug, Default)]
pub struct TimedData {
pub rx_data: Value,
pub tx_data: Value,
pub cpu_data: Vec<Value>,
pub mem_data: Value,
pub swap_data: Value,
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
///
/// If the app is *frozen* - that is, we do not want to *display* any changing
/// data, keep updating this, don't convert to canvas displayable data!
///
/// Note that with this method, the *app* thread is responsible for cleaning -
/// not the data collector.
#[derive(Debug)]
pub struct DataCollection {
pub current_instant: Instant,
pub frozen_instant: Option<Instant>,
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
pub memory_harvest: mem::MemHarvest,
pub swap_harvest: mem::MemHarvest,
pub cpu_harvest: cpu::CpuHarvest,
pub process_harvest: Vec<processes::ProcessHarvest>,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IOHarvest,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub io_labels: Vec<(String, String)>,
pub temp_harvest: Vec<temperature::TempHarvest>,
pub battery_harvest: Vec<battery_harvester::BatteryHarvest>,
}
impl Default for DataCollection {
fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
frozen_instant: None,
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
memory_harvest: mem::MemHarvest::default(),
swap_harvest: mem::MemHarvest::default(),
cpu_harvest: cpu::CpuHarvest::default(),
process_harvest: Vec::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IOHarvest::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_harvest: Vec::default(),
battery_harvest: Vec::default(),
}
}
}
impl DataCollection {
pub fn reset(&mut self) {
self.timed_data_vec = Vec::default();
self.network_harvest = network::NetworkHarvest::default();
self.memory_harvest = mem::MemHarvest::default();
self.swap_harvest = mem::MemHarvest::default();
self.cpu_harvest = cpu::CpuHarvest::default();
self.process_harvest = Vec::default();
self.disk_harvest = Vec::default();
self.io_harvest = disks::IOHarvest::default();
self.io_labels_and_prev = Vec::default();
self.temp_harvest = Vec::default();
self.battery_harvest = Vec::default();
}
pub fn set_frozen_time(&mut self) {
self.frozen_instant = Some(self.current_instant);
}
pub fn clean_data(&mut self, max_time_millis: u64) {
trace!("Cleaning data.");
let current_time = Instant::now();
let mut remove_index = 0;
for entry in &self.timed_data_vec {
if current_time.duration_since(entry.0).as_millis() >= max_time_millis as u128 {
remove_index += 1;
} else {
break;
}
}
self.timed_data_vec.drain(0..remove_index);
}
pub fn eat_data(&mut self, harvested_data: &Data) {
trace!("Eating data now...");
let harvested_time = harvested_data.last_collection_time;
trace!("Harvested time: {:?}", harvested_time);
trace!("New current instant: {:?}", self.current_instant);
let mut new_entry = TimedData::default();
// Network
if let Some(network) = &harvested_data.network {
self.eat_network(network, &mut new_entry);
}
// Memory and Swap
if let Some(memory) = &harvested_data.memory {
if let Some(swap) = &harvested_data.swap {
self.eat_memory_and_swap(memory, swap, &mut new_entry);
}
}
// CPU
if let Some(cpu) = &harvested_data.cpu {
self.eat_cpu(cpu, &mut new_entry);
}
// Temp
if let Some(temperature_sensors) = &harvested_data.temperature_sensors {
self.eat_temp(temperature_sensors);
}
// Disks
if let Some(disks) = &harvested_data.disks {
if let Some(io) = &harvested_data.io {
self.eat_disks(disks, io, harvested_time);
}
}
// Processes
if let Some(list_of_processes) = &harvested_data.list_of_processes {
self.eat_proc(list_of_processes);
}
// Battery
if let Some(list_of_batteries) = &harvested_data.list_of_batteries {
self.eat_battery(list_of_batteries);
}
// And we're done eating. Update time and push the new entry!
self.current_instant = harvested_time;
self.timed_data_vec.push((harvested_time, new_entry));
}
fn eat_memory_and_swap(
&mut self, memory: &mem::MemHarvest, swap: &mem::MemHarvest, new_entry: &mut TimedData,
) {
trace!("Eating mem and swap.");
// Memory
let mem_percent = match memory.mem_total_in_mb {
0 => 0f64,
total => (memory.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.mem_data = mem_percent;
// Swap
if swap.mem_total_in_mb > 0 {
let swap_percent = match swap.mem_total_in_mb {
0 => 0f64,
total => (swap.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.swap_data = swap_percent;
}
// In addition copy over latest data for easy reference
self.memory_harvest = memory.clone();
self.swap_harvest = swap.clone();
}
fn eat_network(&mut self, network: &network::NetworkHarvest, new_entry: &mut TimedData) {
trace!("Eating network.");
// FIXME [NETWORKING; CONFIG]: The ability to config this?
// FIXME [NETWORKING]: Support bits, support switching between decimal and binary units (move the log part to conversion and switch on the fly)
// RX
new_entry.rx_data = if network.rx > 0 {
(network.rx as f64).log2()
} else {
0.0
};
// TX
new_entry.tx_data = if network.tx > 0 | else {
0.0
};
// In addition copy over latest data for easy reference
self.network_harvest = network.clone();
}
fn eat_cpu(&mut self, cpu: &[cpu::CpuData], new_entry: &mut TimedData) {
trace!("Eating CPU.");
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
cpu.iter()
.for_each(|cpu| new_entry.cpu_data.push(cpu.cpu_usage));
self.cpu_harvest = cpu.to_vec();
}
fn eat_temp(&mut self, temperature_sensors: &[temperature::TempHarvest]) {
trace!("Eating temps.");
// TODO: [PO] To implement
self.temp_harvest = temperature_sensors.to_vec();
}
fn eat_disks(
&mut self, disks: &[disks::DiskHarvest], io: &disks::IOHarvest, harvested_time: Instant,
) {
trace!("Eating disks.");
// TODO: [PO] To implement
let time_since_last_harvest = harvested_time
.duration_since(self.current_instant)
.as_secs_f64();
for (itx, device) in disks.iter().enumerate() {
if let Some(trim) = device.name.split('/').last() {
let io_device = if cfg!(target_os = "macos") {
// Must trim one level further!
lazy_static! {
static ref DISK_REGEX: Regex = Regex::new(r"disk\d+").unwrap();
}
if let Some(disk_trim) = DISK_REGEX.find(trim) {
io.get(disk_trim.as_str())
} else {
None
}
} else {
io.get(trim)
};
if let Some(io_device) = io_device {
let (io_r_pt, io_w_pt) = if let Some(io) = io_device {
(io.read_bytes, io.write_bytes)
} else {
(0, 0)
};
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
}
if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64
/ time_since_last_harvest)
.round() as u64;
*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);
if let Some(io_labels) = self.io_labels.get_mut(itx) {
let converted_read = get_simple_byte_values(r_rate, false);
let converted_write = get_simple_byte_values(w_rate, false);
*io_labels = (
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
);
}
}
} else {
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = ("N/A".to_string(), "N/A".to_string());
}
}
}
}
self.disk_harvest = disks.to_vec();
self.io_harvest = io.clone();
}
fn eat_proc(&mut self, list_of_processes: &[processes::ProcessHarvest]) {
trace!("Eating proc.");
self.process_harvest = list_of_processes.to_vec();
}
fn eat_battery(&mut self, list_of_batteries: &[battery_harvester::BatteryHarvest]) {
trace!("Eating batteries.");
self.battery_harvest = list_of_batteries.to_vec();
}
}
| {
(network.tx as f64).log2()
} | conditional_block |
data_farmer.rs | use lazy_static::lazy_static;
/// In charge of cleaning, processing, and managing data. I couldn't think of
/// a better name for the file. Since I called data collection "harvesting",
/// then this is the farmer I guess.
///
/// Essentially the main goal is to shift the initial calculation and distribution
/// of joiner points and data to one central location that will only do it
/// *once* upon receiving the data --- as opposed to doing it on canvas draw,
/// which will be a costly process.
///
/// This will also handle the *cleaning* of stale data. That should be done
/// in some manner (timer on another thread, some loop) that will occasionally
/// call the purging function. Failure to do so *will* result in a growing
/// memory usage and higher CPU usage - you will be trying to process more and
/// more points as this is used!
use std::{time::Instant, vec::Vec};
use crate::{
data_harvester::{battery_harvester, cpu, disks, mem, network, processes, temperature, Data},
utils::gen_util::get_simple_byte_values,
};
use regex::Regex;
pub type TimeOffset = f64;
pub type Value = f64;
#[derive(Debug, Default)]
pub struct TimedData {
pub rx_data: Value,
pub tx_data: Value,
pub cpu_data: Vec<Value>,
pub mem_data: Value,
pub swap_data: Value,
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
///
/// If the app is *frozen* - that is, we do not want to *display* any changing
/// data, keep updating this, don't convert to canvas displayable data!
///
/// Note that with this method, the *app* thread is responsible for cleaning -
/// not the data collector.
#[derive(Debug)]
pub struct DataCollection {
pub current_instant: Instant,
pub frozen_instant: Option<Instant>,
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
pub memory_harvest: mem::MemHarvest,
pub swap_harvest: mem::MemHarvest,
pub cpu_harvest: cpu::CpuHarvest,
pub process_harvest: Vec<processes::ProcessHarvest>,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IOHarvest,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub io_labels: Vec<(String, String)>,
pub temp_harvest: Vec<temperature::TempHarvest>,
pub battery_harvest: Vec<battery_harvester::BatteryHarvest>, | fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
frozen_instant: None,
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
memory_harvest: mem::MemHarvest::default(),
swap_harvest: mem::MemHarvest::default(),
cpu_harvest: cpu::CpuHarvest::default(),
process_harvest: Vec::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IOHarvest::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_harvest: Vec::default(),
battery_harvest: Vec::default(),
}
}
}
impl DataCollection {
pub fn reset(&mut self) {
self.timed_data_vec = Vec::default();
self.network_harvest = network::NetworkHarvest::default();
self.memory_harvest = mem::MemHarvest::default();
self.swap_harvest = mem::MemHarvest::default();
self.cpu_harvest = cpu::CpuHarvest::default();
self.process_harvest = Vec::default();
self.disk_harvest = Vec::default();
self.io_harvest = disks::IOHarvest::default();
self.io_labels_and_prev = Vec::default();
self.temp_harvest = Vec::default();
self.battery_harvest = Vec::default();
}
pub fn set_frozen_time(&mut self) {
self.frozen_instant = Some(self.current_instant);
}
pub fn clean_data(&mut self, max_time_millis: u64) {
trace!("Cleaning data.");
let current_time = Instant::now();
let mut remove_index = 0;
for entry in &self.timed_data_vec {
if current_time.duration_since(entry.0).as_millis() >= max_time_millis as u128 {
remove_index += 1;
} else {
break;
}
}
self.timed_data_vec.drain(0..remove_index);
}
pub fn eat_data(&mut self, harvested_data: &Data) {
trace!("Eating data now...");
let harvested_time = harvested_data.last_collection_time;
trace!("Harvested time: {:?}", harvested_time);
trace!("New current instant: {:?}", self.current_instant);
let mut new_entry = TimedData::default();
// Network
if let Some(network) = &harvested_data.network {
self.eat_network(network, &mut new_entry);
}
// Memory and Swap
if let Some(memory) = &harvested_data.memory {
if let Some(swap) = &harvested_data.swap {
self.eat_memory_and_swap(memory, swap, &mut new_entry);
}
}
// CPU
if let Some(cpu) = &harvested_data.cpu {
self.eat_cpu(cpu, &mut new_entry);
}
// Temp
if let Some(temperature_sensors) = &harvested_data.temperature_sensors {
self.eat_temp(temperature_sensors);
}
// Disks
if let Some(disks) = &harvested_data.disks {
if let Some(io) = &harvested_data.io {
self.eat_disks(disks, io, harvested_time);
}
}
// Processes
if let Some(list_of_processes) = &harvested_data.list_of_processes {
self.eat_proc(list_of_processes);
}
// Battery
if let Some(list_of_batteries) = &harvested_data.list_of_batteries {
self.eat_battery(list_of_batteries);
}
// And we're done eating. Update time and push the new entry!
self.current_instant = harvested_time;
self.timed_data_vec.push((harvested_time, new_entry));
}
fn eat_memory_and_swap(
&mut self, memory: &mem::MemHarvest, swap: &mem::MemHarvest, new_entry: &mut TimedData,
) {
trace!("Eating mem and swap.");
// Memory
let mem_percent = match memory.mem_total_in_mb {
0 => 0f64,
total => (memory.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.mem_data = mem_percent;
// Swap
if swap.mem_total_in_mb > 0 {
let swap_percent = match swap.mem_total_in_mb {
0 => 0f64,
total => (swap.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.swap_data = swap_percent;
}
// In addition copy over latest data for easy reference
self.memory_harvest = memory.clone();
self.swap_harvest = swap.clone();
}
fn eat_network(&mut self, network: &network::NetworkHarvest, new_entry: &mut TimedData) {
trace!("Eating network.");
// FIXME [NETWORKING; CONFIG]: The ability to config this?
// FIXME [NETWORKING]: Support bits, support switching between decimal and binary units (move the log part to conversion and switch on the fly)
// RX
new_entry.rx_data = if network.rx > 0 {
(network.rx as f64).log2()
} else {
0.0
};
// TX
new_entry.tx_data = if network.tx > 0 {
(network.tx as f64).log2()
} else {
0.0
};
// In addition copy over latest data for easy reference
self.network_harvest = network.clone();
}
fn eat_cpu(&mut self, cpu: &[cpu::CpuData], new_entry: &mut TimedData) {
trace!("Eating CPU.");
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
cpu.iter()
.for_each(|cpu| new_entry.cpu_data.push(cpu.cpu_usage));
self.cpu_harvest = cpu.to_vec();
}
fn eat_temp(&mut self, temperature_sensors: &[temperature::TempHarvest]) {
trace!("Eating temps.");
// TODO: [PO] To implement
self.temp_harvest = temperature_sensors.to_vec();
}
fn eat_disks(
&mut self, disks: &[disks::DiskHarvest], io: &disks::IOHarvest, harvested_time: Instant,
) {
trace!("Eating disks.");
// TODO: [PO] To implement
let time_since_last_harvest = harvested_time
.duration_since(self.current_instant)
.as_secs_f64();
for (itx, device) in disks.iter().enumerate() {
if let Some(trim) = device.name.split('/').last() {
let io_device = if cfg!(target_os = "macos") {
// Must trim one level further!
lazy_static! {
static ref DISK_REGEX: Regex = Regex::new(r"disk\d+").unwrap();
}
if let Some(disk_trim) = DISK_REGEX.find(trim) {
io.get(disk_trim.as_str())
} else {
None
}
} else {
io.get(trim)
};
if let Some(io_device) = io_device {
let (io_r_pt, io_w_pt) = if let Some(io) = io_device {
(io.read_bytes, io.write_bytes)
} else {
(0, 0)
};
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
}
if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64
/ time_since_last_harvest)
.round() as u64;
*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);
if let Some(io_labels) = self.io_labels.get_mut(itx) {
let converted_read = get_simple_byte_values(r_rate, false);
let converted_write = get_simple_byte_values(w_rate, false);
*io_labels = (
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
);
}
}
} else {
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = ("N/A".to_string(), "N/A".to_string());
}
}
}
}
self.disk_harvest = disks.to_vec();
self.io_harvest = io.clone();
}
fn eat_proc(&mut self, list_of_processes: &[processes::ProcessHarvest]) {
trace!("Eating proc.");
self.process_harvest = list_of_processes.to_vec();
}
fn eat_battery(&mut self, list_of_batteries: &[battery_harvester::BatteryHarvest]) {
trace!("Eating batteries.");
self.battery_harvest = list_of_batteries.to_vec();
}
} | }
impl Default for DataCollection { | random_line_split |
data_farmer.rs | use lazy_static::lazy_static;
/// In charge of cleaning, processing, and managing data. I couldn't think of
/// a better name for the file. Since I called data collection "harvesting",
/// then this is the farmer I guess.
///
/// Essentially the main goal is to shift the initial calculation and distribution
/// of joiner points and data to one central location that will only do it
/// *once* upon receiving the data --- as opposed to doing it on canvas draw,
/// which will be a costly process.
///
/// This will also handle the *cleaning* of stale data. That should be done
/// in some manner (timer on another thread, some loop) that will occasionally
/// call the purging function. Failure to do so *will* result in a growing
/// memory usage and higher CPU usage - you will be trying to process more and
/// more points as this is used!
use std::{time::Instant, vec::Vec};
use crate::{
data_harvester::{battery_harvester, cpu, disks, mem, network, processes, temperature, Data},
utils::gen_util::get_simple_byte_values,
};
use regex::Regex;
pub type TimeOffset = f64;
pub type Value = f64;
#[derive(Debug, Default)]
pub struct | {
pub rx_data: Value,
pub tx_data: Value,
pub cpu_data: Vec<Value>,
pub mem_data: Value,
pub swap_data: Value,
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
///
/// If the app is *frozen* - that is, we do not want to *display* any changing
/// data, keep updating this, don't convert to canvas displayable data!
///
/// Note that with this method, the *app* thread is responsible for cleaning -
/// not the data collector.
#[derive(Debug)]
pub struct DataCollection {
pub current_instant: Instant,
pub frozen_instant: Option<Instant>,
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
pub memory_harvest: mem::MemHarvest,
pub swap_harvest: mem::MemHarvest,
pub cpu_harvest: cpu::CpuHarvest,
pub process_harvest: Vec<processes::ProcessHarvest>,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IOHarvest,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub io_labels: Vec<(String, String)>,
pub temp_harvest: Vec<temperature::TempHarvest>,
pub battery_harvest: Vec<battery_harvester::BatteryHarvest>,
}
impl Default for DataCollection {
fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
frozen_instant: None,
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
memory_harvest: mem::MemHarvest::default(),
swap_harvest: mem::MemHarvest::default(),
cpu_harvest: cpu::CpuHarvest::default(),
process_harvest: Vec::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IOHarvest::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_harvest: Vec::default(),
battery_harvest: Vec::default(),
}
}
}
impl DataCollection {
pub fn reset(&mut self) {
self.timed_data_vec = Vec::default();
self.network_harvest = network::NetworkHarvest::default();
self.memory_harvest = mem::MemHarvest::default();
self.swap_harvest = mem::MemHarvest::default();
self.cpu_harvest = cpu::CpuHarvest::default();
self.process_harvest = Vec::default();
self.disk_harvest = Vec::default();
self.io_harvest = disks::IOHarvest::default();
self.io_labels_and_prev = Vec::default();
self.temp_harvest = Vec::default();
self.battery_harvest = Vec::default();
}
pub fn set_frozen_time(&mut self) {
self.frozen_instant = Some(self.current_instant);
}
pub fn clean_data(&mut self, max_time_millis: u64) {
trace!("Cleaning data.");
let current_time = Instant::now();
let mut remove_index = 0;
for entry in &self.timed_data_vec {
if current_time.duration_since(entry.0).as_millis() >= max_time_millis as u128 {
remove_index += 1;
} else {
break;
}
}
self.timed_data_vec.drain(0..remove_index);
}
pub fn eat_data(&mut self, harvested_data: &Data) {
trace!("Eating data now...");
let harvested_time = harvested_data.last_collection_time;
trace!("Harvested time: {:?}", harvested_time);
trace!("New current instant: {:?}", self.current_instant);
let mut new_entry = TimedData::default();
// Network
if let Some(network) = &harvested_data.network {
self.eat_network(network, &mut new_entry);
}
// Memory and Swap
if let Some(memory) = &harvested_data.memory {
if let Some(swap) = &harvested_data.swap {
self.eat_memory_and_swap(memory, swap, &mut new_entry);
}
}
// CPU
if let Some(cpu) = &harvested_data.cpu {
self.eat_cpu(cpu, &mut new_entry);
}
// Temp
if let Some(temperature_sensors) = &harvested_data.temperature_sensors {
self.eat_temp(temperature_sensors);
}
// Disks
if let Some(disks) = &harvested_data.disks {
if let Some(io) = &harvested_data.io {
self.eat_disks(disks, io, harvested_time);
}
}
// Processes
if let Some(list_of_processes) = &harvested_data.list_of_processes {
self.eat_proc(list_of_processes);
}
// Battery
if let Some(list_of_batteries) = &harvested_data.list_of_batteries {
self.eat_battery(list_of_batteries);
}
// And we're done eating. Update time and push the new entry!
self.current_instant = harvested_time;
self.timed_data_vec.push((harvested_time, new_entry));
}
fn eat_memory_and_swap(
&mut self, memory: &mem::MemHarvest, swap: &mem::MemHarvest, new_entry: &mut TimedData,
) {
trace!("Eating mem and swap.");
// Memory
let mem_percent = match memory.mem_total_in_mb {
0 => 0f64,
total => (memory.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.mem_data = mem_percent;
// Swap
if swap.mem_total_in_mb > 0 {
let swap_percent = match swap.mem_total_in_mb {
0 => 0f64,
total => (swap.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.swap_data = swap_percent;
}
// In addition copy over latest data for easy reference
self.memory_harvest = memory.clone();
self.swap_harvest = swap.clone();
}
fn eat_network(&mut self, network: &network::NetworkHarvest, new_entry: &mut TimedData) {
trace!("Eating network.");
// FIXME [NETWORKING; CONFIG]: The ability to config this?
// FIXME [NETWORKING]: Support bits, support switching between decimal and binary units (move the log part to conversion and switch on the fly)
// RX
new_entry.rx_data = if network.rx > 0 {
(network.rx as f64).log2()
} else {
0.0
};
// TX
new_entry.tx_data = if network.tx > 0 {
(network.tx as f64).log2()
} else {
0.0
};
// In addition copy over latest data for easy reference
self.network_harvest = network.clone();
}
fn eat_cpu(&mut self, cpu: &[cpu::CpuData], new_entry: &mut TimedData) {
trace!("Eating CPU.");
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
cpu.iter()
.for_each(|cpu| new_entry.cpu_data.push(cpu.cpu_usage));
self.cpu_harvest = cpu.to_vec();
}
fn eat_temp(&mut self, temperature_sensors: &[temperature::TempHarvest]) {
trace!("Eating temps.");
// TODO: [PO] To implement
self.temp_harvest = temperature_sensors.to_vec();
}
fn eat_disks(
&mut self, disks: &[disks::DiskHarvest], io: &disks::IOHarvest, harvested_time: Instant,
) {
trace!("Eating disks.");
// TODO: [PO] To implement
let time_since_last_harvest = harvested_time
.duration_since(self.current_instant)
.as_secs_f64();
for (itx, device) in disks.iter().enumerate() {
if let Some(trim) = device.name.split('/').last() {
let io_device = if cfg!(target_os = "macos") {
// Must trim one level further!
lazy_static! {
static ref DISK_REGEX: Regex = Regex::new(r"disk\d+").unwrap();
}
if let Some(disk_trim) = DISK_REGEX.find(trim) {
io.get(disk_trim.as_str())
} else {
None
}
} else {
io.get(trim)
};
if let Some(io_device) = io_device {
let (io_r_pt, io_w_pt) = if let Some(io) = io_device {
(io.read_bytes, io.write_bytes)
} else {
(0, 0)
};
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
}
if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64
/ time_since_last_harvest)
.round() as u64;
*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);
if let Some(io_labels) = self.io_labels.get_mut(itx) {
let converted_read = get_simple_byte_values(r_rate, false);
let converted_write = get_simple_byte_values(w_rate, false);
*io_labels = (
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
);
}
}
} else {
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = ("N/A".to_string(), "N/A".to_string());
}
}
}
}
self.disk_harvest = disks.to_vec();
self.io_harvest = io.clone();
}
fn eat_proc(&mut self, list_of_processes: &[processes::ProcessHarvest]) {
trace!("Eating proc.");
self.process_harvest = list_of_processes.to_vec();
}
fn eat_battery(&mut self, list_of_batteries: &[battery_harvester::BatteryHarvest]) {
trace!("Eating batteries.");
self.battery_harvest = list_of_batteries.to_vec();
}
}
| TimedData | identifier_name |
data_farmer.rs | use lazy_static::lazy_static;
/// In charge of cleaning, processing, and managing data. I couldn't think of
/// a better name for the file. Since I called data collection "harvesting",
/// then this is the farmer I guess.
///
/// Essentially the main goal is to shift the initial calculation and distribution
/// of joiner points and data to one central location that will only do it
/// *once* upon receiving the data --- as opposed to doing it on canvas draw,
/// which will be a costly process.
///
/// This will also handle the *cleaning* of stale data. That should be done
/// in some manner (timer on another thread, some loop) that will occasionally
/// call the purging function. Failure to do so *will* result in a growing
/// memory usage and higher CPU usage - you will be trying to process more and
/// more points as this is used!
use std::{time::Instant, vec::Vec};
use crate::{
data_harvester::{battery_harvester, cpu, disks, mem, network, processes, temperature, Data},
utils::gen_util::get_simple_byte_values,
};
use regex::Regex;
pub type TimeOffset = f64;
pub type Value = f64;
#[derive(Debug, Default)]
pub struct TimedData {
pub rx_data: Value,
pub tx_data: Value,
pub cpu_data: Vec<Value>,
pub mem_data: Value,
pub swap_data: Value,
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
///
/// If the app is *frozen* - that is, we do not want to *display* any changing
/// data, keep updating this, don't convert to canvas displayable data!
///
/// Note that with this method, the *app* thread is responsible for cleaning -
/// not the data collector.
#[derive(Debug)]
pub struct DataCollection {
pub current_instant: Instant,
pub frozen_instant: Option<Instant>,
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
pub memory_harvest: mem::MemHarvest,
pub swap_harvest: mem::MemHarvest,
pub cpu_harvest: cpu::CpuHarvest,
pub process_harvest: Vec<processes::ProcessHarvest>,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IOHarvest,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub io_labels: Vec<(String, String)>,
pub temp_harvest: Vec<temperature::TempHarvest>,
pub battery_harvest: Vec<battery_harvester::BatteryHarvest>,
}
impl Default for DataCollection {
fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
frozen_instant: None,
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
memory_harvest: mem::MemHarvest::default(),
swap_harvest: mem::MemHarvest::default(),
cpu_harvest: cpu::CpuHarvest::default(),
process_harvest: Vec::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IOHarvest::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_harvest: Vec::default(),
battery_harvest: Vec::default(),
}
}
}
impl DataCollection {
pub fn reset(&mut self) |
pub fn set_frozen_time(&mut self) {
self.frozen_instant = Some(self.current_instant);
}
pub fn clean_data(&mut self, max_time_millis: u64) {
trace!("Cleaning data.");
let current_time = Instant::now();
let mut remove_index = 0;
for entry in &self.timed_data_vec {
if current_time.duration_since(entry.0).as_millis() >= max_time_millis as u128 {
remove_index += 1;
} else {
break;
}
}
self.timed_data_vec.drain(0..remove_index);
}
pub fn eat_data(&mut self, harvested_data: &Data) {
trace!("Eating data now...");
let harvested_time = harvested_data.last_collection_time;
trace!("Harvested time: {:?}", harvested_time);
trace!("New current instant: {:?}", self.current_instant);
let mut new_entry = TimedData::default();
// Network
if let Some(network) = &harvested_data.network {
self.eat_network(network, &mut new_entry);
}
// Memory and Swap
if let Some(memory) = &harvested_data.memory {
if let Some(swap) = &harvested_data.swap {
self.eat_memory_and_swap(memory, swap, &mut new_entry);
}
}
// CPU
if let Some(cpu) = &harvested_data.cpu {
self.eat_cpu(cpu, &mut new_entry);
}
// Temp
if let Some(temperature_sensors) = &harvested_data.temperature_sensors {
self.eat_temp(temperature_sensors);
}
// Disks
if let Some(disks) = &harvested_data.disks {
if let Some(io) = &harvested_data.io {
self.eat_disks(disks, io, harvested_time);
}
}
// Processes
if let Some(list_of_processes) = &harvested_data.list_of_processes {
self.eat_proc(list_of_processes);
}
// Battery
if let Some(list_of_batteries) = &harvested_data.list_of_batteries {
self.eat_battery(list_of_batteries);
}
// And we're done eating. Update time and push the new entry!
self.current_instant = harvested_time;
self.timed_data_vec.push((harvested_time, new_entry));
}
fn eat_memory_and_swap(
&mut self, memory: &mem::MemHarvest, swap: &mem::MemHarvest, new_entry: &mut TimedData,
) {
trace!("Eating mem and swap.");
// Memory
let mem_percent = match memory.mem_total_in_mb {
0 => 0f64,
total => (memory.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.mem_data = mem_percent;
// Swap
if swap.mem_total_in_mb > 0 {
let swap_percent = match swap.mem_total_in_mb {
0 => 0f64,
total => (swap.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.swap_data = swap_percent;
}
// In addition copy over latest data for easy reference
self.memory_harvest = memory.clone();
self.swap_harvest = swap.clone();
}
fn eat_network(&mut self, network: &network::NetworkHarvest, new_entry: &mut TimedData) {
trace!("Eating network.");
// FIXME [NETWORKING; CONFIG]: The ability to config this?
// FIXME [NETWORKING]: Support bits, support switching between decimal and binary units (move the log part to conversion and switch on the fly)
// RX
new_entry.rx_data = if network.rx > 0 {
(network.rx as f64).log2()
} else {
0.0
};
// TX
new_entry.tx_data = if network.tx > 0 {
(network.tx as f64).log2()
} else {
0.0
};
// In addition copy over latest data for easy reference
self.network_harvest = network.clone();
}
fn eat_cpu(&mut self, cpu: &[cpu::CpuData], new_entry: &mut TimedData) {
trace!("Eating CPU.");
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
cpu.iter()
.for_each(|cpu| new_entry.cpu_data.push(cpu.cpu_usage));
self.cpu_harvest = cpu.to_vec();
}
fn eat_temp(&mut self, temperature_sensors: &[temperature::TempHarvest]) {
trace!("Eating temps.");
// TODO: [PO] To implement
self.temp_harvest = temperature_sensors.to_vec();
}
fn eat_disks(
&mut self, disks: &[disks::DiskHarvest], io: &disks::IOHarvest, harvested_time: Instant,
) {
trace!("Eating disks.");
// TODO: [PO] To implement
let time_since_last_harvest = harvested_time
.duration_since(self.current_instant)
.as_secs_f64();
for (itx, device) in disks.iter().enumerate() {
if let Some(trim) = device.name.split('/').last() {
let io_device = if cfg!(target_os = "macos") {
// Must trim one level further!
lazy_static! {
static ref DISK_REGEX: Regex = Regex::new(r"disk\d+").unwrap();
}
if let Some(disk_trim) = DISK_REGEX.find(trim) {
io.get(disk_trim.as_str())
} else {
None
}
} else {
io.get(trim)
};
if let Some(io_device) = io_device {
let (io_r_pt, io_w_pt) = if let Some(io) = io_device {
(io.read_bytes, io.write_bytes)
} else {
(0, 0)
};
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
}
if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64
/ time_since_last_harvest)
.round() as u64;
*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);
if let Some(io_labels) = self.io_labels.get_mut(itx) {
let converted_read = get_simple_byte_values(r_rate, false);
let converted_write = get_simple_byte_values(w_rate, false);
*io_labels = (
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
);
}
}
} else {
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = ("N/A".to_string(), "N/A".to_string());
}
}
}
}
self.disk_harvest = disks.to_vec();
self.io_harvest = io.clone();
}
fn eat_proc(&mut self, list_of_processes: &[processes::ProcessHarvest]) {
trace!("Eating proc.");
self.process_harvest = list_of_processes.to_vec();
}
fn eat_battery(&mut self, list_of_batteries: &[battery_harvester::BatteryHarvest]) {
trace!("Eating batteries.");
self.battery_harvest = list_of_batteries.to_vec();
}
}
| {
self.timed_data_vec = Vec::default();
self.network_harvest = network::NetworkHarvest::default();
self.memory_harvest = mem::MemHarvest::default();
self.swap_harvest = mem::MemHarvest::default();
self.cpu_harvest = cpu::CpuHarvest::default();
self.process_harvest = Vec::default();
self.disk_harvest = Vec::default();
self.io_harvest = disks::IOHarvest::default();
self.io_labels_and_prev = Vec::default();
self.temp_harvest = Vec::default();
self.battery_harvest = Vec::default();
} | identifier_body |
lib.rs | #![allow(non_snake_case)]
#![cfg_attr(test, feature(test))]
extern crate num_integer;
extern crate num_traits;
use num_integer::Integer;
use num_traits::{PrimInt,Signed,One,NumCast};
use ::std::collections::HashMap;
use ::std::hash::Hash;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate rand;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
pub struct GcdData<X> {
// greatest common divisor
pub gcd: X,
// least common multiple
pub lcm: X,
// bezout coefficients
pub coeffs: (X, X),
// quotients of the inputs by the GCD
pub quotients: (X, X),
}
// NOTE:
// The Signed bound is unavoidable for Extended GCD because the Bezout
// coefficients can be negative. This is unfortunate for plain old gcd(),
// which technically shouldn't require the Signed bound.
// Since the bezout coefficients have no impact on each other or on the gcd,
// a sufficiently smart compiler can rip their computations out entirely.
// And as luck would have it, rustc is sufficiently smart!
#[inline(always)]
fn extended_gcd__inline<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
let (a_sign, a) = (a.signum(), a.abs());
let (b_sign, b) = (b.signum(), b.abs());
// Silly trick because rust doesn't have true multiple assignment:
// Store the two targets in one variable! Order is (old, current).
let mut s = (X::one(), X::zero()); // a coefficient
let mut t = (X::zero(), X::one()); // b coefficient
let mut r = (a, b); // gcd
while r.1!= X::zero() {
let (div, rem) = (r.0/r.1, r.0%r.1);
r = (r.1, rem);
s = (s.1, s.0 - div * s.1);
t = (t.1, t.0 - div * t.1);
}
let quots = (a_sign * t.1.abs(), b_sign * s.1.abs());
GcdData {
gcd: r.0,
// FIXME think more about sign of LCM
// (current implementation preserves the property a*b == gcd*lcm
// which is nice, but I don't know if it is The Right Thing)
lcm: r.0*quots.0*quots.1,
coeffs: (a_sign*s.0, b_sign*t.0),
quotients: quots,
}
}
/// Compute a greatest common divisor with other miscellany.
pub fn extended_gcd<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
extended_gcd__inline(a, b)
}
/// Compute a greatest common divisor.
pub fn | <X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd,.. } = extended_gcd__inline(a, b);
gcd
}
/// Compute a least common multiple.
pub fn lcm<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { lcm,.. } = extended_gcd__inline(a, b);
lcm
}
/// Compute a modular multiplicative inverse, if it exists.
///
/// This implementation uses the extended Gcd algorithm,
pub fn inverse_mod<X>(a: X, m: X) -> Option<X> where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd: g, coeffs: (inv, _),.. } = extended_gcd__inline(a, m);
if g == X::one() { Some(inv.mod_floor(&m)) } else { None }
}
/// Merge many equations of the form `x = ai (mod ni)` into one.
///
/// The moduli don't need to be coprime;
/// ``None`` is returned if the equations are inconsistent.
///
/// `chinese_remainder(vec![])` is defined to be `Some((0,1))`.
pub fn chinese_remainder<X,I>(congruences: I) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
I: IntoIterator<Item=(X,X)>,
{
// something something "monadic" something "fold"
congruences.into_iter().fold(Some((X::zero(),X::one())),
|opt, new_pair| opt.and_then(|acc_pair|
chinese_remainder2(acc_pair, new_pair)
)
)
}
/// Merge two equations of the form ``x = ai (mod ni)`` into one.
///
/// The moduli don't need to be coprime;
/// `None` is returned if the equations are inconsistent.
///
/// Panics if a modulus is negative or zero.
pub fn chinese_remainder2<X>((a1,n1):(X,X), (a2,n2):(X,X)) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
{
// I'm too lazy right now to consider whether there is a
// reasonable behavior for negative moduli
assert!(n1.is_positive());
assert!(n2.is_positive());
let GcdData {
gcd: g,
lcm: n3,
coeffs: (c1,c2),
..
} = extended_gcd__inline(n1, n2);
let (a1div, a1rem) = a1.div_rem(&g);
let (a2div, a2rem) = a2.div_rem(&g);
if a1rem!= a2rem { None }
else {
let a3 = (a2div*c1*n1 + a1div*c2*n2 + a1rem).mod_floor(&n3);
Some((a3, n3))
}
}
// used for conversions of literals (which will clearly never fail)
fn lit<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
// slightly more verbose for use outside mathematical expressions
fn convert<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
/// An argument to tame function count explosion for functions
/// which can optionally deal with permutation symmetry.
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum OrderType {
/// Order matters; consider all distinct permutations.
Ordered,
/// Order does not matter; only consider distinct combinations.
Unordered,
}
/// Used as a half-baked alternative to writing a generic interface
/// over RangeTo and RangeToInclusive
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum UpperBound<X> { Upto(X), Under(X), }
impl<X> UpperBound<X> where X: Integer + One,
{
// FIXME: I think it is hard to read code that uses this.
/// a generic form of (min..x).next_back() or (min...x).next_back()
fn inclusive_limit_from(self, min: X) -> Option<X> {
match self {
Under(upper) => if min >= upper { None } else { Some(upper - One::one()) },
Upto(max) => if min > max { None } else { Some(max) },
}
}
}
#[test]
fn test_inclusive_limit_from() {
assert_eq!(Upto(4).inclusive_limit_from(3), Some(4));
assert_eq!(Upto(3).inclusive_limit_from(3), Some(3));
assert_eq!(Upto(2).inclusive_limit_from(3), None);
assert_eq!(Upto(1).inclusive_limit_from(3), None);
assert_eq!(Under(5).inclusive_limit_from(3), Some(4));
assert_eq!(Under(4).inclusive_limit_from(3), Some(3));
assert_eq!(Under(3).inclusive_limit_from(3), None);
assert_eq!(Under(2).inclusive_limit_from(3), None);
// no underflow please kthx
assert_eq!(Under(0).inclusive_limit_from(0), None);
}
use UpperBound::*;
use OrderType::*;
// NOTE: Further possible generalizations:
// * Take a Range instead of UpperBound so that zero can optionally be included
// (however it would also require solving how to produce correct results
// for all other lower bounds)
// * count_coprime_tuplets(max, n)
/// Counts coprime pairs of integers `>= 1`
///
/// # Notes
///
/// `(0,1)` and `(1,0)` are not counted.
/// `(1,1)` (the only symmetric pair) is counted once.
///
/// # Reference
///
/// http://laurentmazare.github.io/2014/09/14/counting-coprime-pairs/
pub fn count_coprime_pairs<X>(bound: UpperBound<X>, order_type: OrderType) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
let max = {
if let Some(max) = bound.inclusive_limit_from(lit(1)) { max }
else { return lit(0); } // catches Under(0), Upto(0), Under(1)
};
let ordered = count_ordered_coprime_pairs(max);
match order_type {
Ordered => ordered,
// Every combination was counted twice except for (1,1);
// Add 1 so that they are ALL double counted, then halve it.
// (this also fortituously produces 0 for max == 0)
Unordered => (ordered + lit(1)) / lit(2),
}
}
fn count_ordered_coprime_pairs<X>(max: X) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
// Function can be described with this recursion relation:
//
// c(n) = n**2 - sum_{k=2}^n c(n // k)
//
// where '//' is floored division.
//
// Many values of k share the same value of (n // k),
// thereby permitting a coarse-graining approach.
// unique values of m (=floor(n/k)) for small k
let fine_deps = |n|
(2..).map(convert::<X>)
.map(move |k| n/k).take_while(move |&m| m*m > n);
// values of m (=floor(n/k)) shared by many large k.
let coarse_deps = |n|
(1..).map(convert::<X>)
.take_while(move |&m| m*m <= n)
// don't produce m == 1 for n == 1
.skip_while(move |_| n == lit(1));
let coarse_multiplicity = |n,m| n/m - n/(m + lit(1));
// Get all values that need to be computed at some point.
//
// Interestingly, these are just'max' and its direct dependencies
// (which are of the form'max // k'). The reason the subproblems
// never introduce new dependencies is because integer division
// apparently satisfies the following property for x non-negative
// and a,b positive:
//
// (x // a) // b == (x // b) // a == x // (a*b)
//
// (NOTE: euclidean division wins *yet again*; it is the only sign convention
// under which this also works for negative 'x', 'a', and 'b'!)
let order = {
let mut vec = vec![max];
vec.extend(fine_deps(max));
vec.extend(coarse_deps(max));
vec.sort();
vec
};
let mut memo = HashMap::new();
let compute = |n, memo: &HashMap<X,X>| {
let acc = n*n;
let acc = coarse_deps(n)
.map(|m| memo[&m.into()] * coarse_multiplicity(n,m))
.fold(acc, |a,b| a-b);
let acc = fine_deps(n)
.map(|m| memo[&m.into()])
.fold(acc, |a,b| a-b);
acc
};
for x in order {
let value = compute(x, &memo);
memo.insert(x, value);
}
memo[&max]
}
#[cfg(test)]
mod tests {
use super::*;
use super::OrderType::*;
use super::UpperBound::*;
use ::num_integer::Integer;
use ::num_traits::{PrimInt,Signed};
use test;
use rand::{Rng};
#[test]
fn test_gcd() {
// swap left/right
// (using a pair that takes several iterations)
assert_eq!(gcd(234,123), 3);
assert_eq!(gcd(123,234), 3);
// negatives
assert_eq!(gcd(-15,20), 5);
assert_eq!(gcd(15,-20), 5);
assert_eq!(gcd(-15,-20), 5);
// zeroes
assert_eq!(gcd(0,17), 17);
assert_eq!(gcd(17,0), 17);
assert_eq!(gcd(0,0), 0);
}
#[test]
fn test_chinese_remainder() {
// test both interfaces
let eq1 = (2328,16256);
let eq2 = (410,5418);
let soln = (28450328, 44037504);
assert_eq!(chinese_remainder2(eq1,eq2), Some(soln));
assert_eq!(chinese_remainder(vec![eq1,eq2]), Some(soln));
// (0,1) serves as an "identity"
assert_eq!(chinese_remainder(vec![]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1)]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1),(13,36)]), Some((13,36)));
assert_eq!(chinese_remainder(vec![(13,36),(0,1)]), Some((13,36)));
// single equation
assert_eq!(chinese_remainder(vec![eq1]), Some(eq1));
// inconsistent equations
assert_eq!(chinese_remainder2((10,7),(4,14)), None);
assert_eq!(chinese_remainder(vec![(10,7),(4,14)]), None);
// FIXME: test more than 2 equations
// FIXME: do we specify behavior for when the input a_i are
// not already reduced modulo n_i?
}
#[test]
fn test_inverse_mod() {
let solns15 = vec![
None, Some(1), Some(8), None, Some(4),
None, None, Some(13), Some(2), None,
None, Some(11), None, Some(7), Some(14),
];
for x in -15..30 {
assert_eq!(inverse_mod(x,15), solns15[x.mod_floor(&15) as usize]);
}
}
#[test]
fn test_count_coprime_pairs() {
fn check<X>(bound: UpperBound<X>, expect_o: X, expect_u: X)
where X: ::std::fmt::Debug + PrimInt + ::std::hash::Hash + Integer {
let actual_o = count_coprime_pairs(bound, Ordered);
let actual_u = count_coprime_pairs(bound, Unordered);
assert!(actual_o == expect_o,
"g({:?}, Ordered) == {:?},!= {:?}", bound, actual_o, expect_o);
assert!(actual_u == expect_u,
"g({:?}, Unordered) == {:?},!= {:?}", bound, actual_u, expect_u);
};
// special-ish cases
check(Under(0u32), 0, 0); // unsigned to check for underflow
check(Under(0i32), 0, 0); // signed to check for poor usage of checked_sub
check(Upto(0u32), 0, 0);
check(Upto(0i32), 0, 0);
check(Upto(1u32), 1, 1);
check(Upto(1i32), 1, 1);
// a nontrivial coprime pair (2,3)
check(Upto(3u32), 7, 4);
// a nontrivial non-coprime pair (2,4)
check(Upto(4u32), 11, 6);
// problem size large enough to test both fine-graining and coarse-graining
check(Upto(100u32), 6087, 3044);
// a biggun
assert_eq!(count_coprime_pairs(Upto(10_000_000i64), Ordered), 60792712854483i64);
// try a variety of bounds in an attempt to get memo[&x] to panic
// on a missed dependency
let mut rng = ::rand::thread_rng();
test::black_box(
(0..100).map(|_| rng.gen_range(100, 100_000i64))
.map(|x| count_coprime_pairs(Upto(x), Ordered))
.sum::<i64>()
);
}
// Gold standard for binary comparison.
#[inline(never)]
fn gcd__reference<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let mut a = a.abs();
let mut b = b.abs();
while b!= X::zero() {
let tmp = b;
b = a % b;
a = tmp;
}
a
}
// Impressively, rustc grinds this down to a *byte-perfect match*
// against gcd__reference.
#[inline(never)]
fn gcd__optimized<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
gcd(a, b)
}
// force the two inline(never) functions above to be compiled
#[test]
fn dummy__compile_testfuncs() {
assert_eq!(gcd__reference(15,20), 5);
assert_eq!(gcd__optimized(20,15), 5);
// Interestingly, the compiled inline(never) functions will
// recieve optimizations based on their inputs.
// Without these following invocations, rustc will compile
// faster versions that only support positive arguments.
assert_eq!(gcd__reference(-15,-20), 5);
assert_eq!(gcd__optimized(-20,-15), 5);
}
}
| gcd | identifier_name |
lib.rs | #![allow(non_snake_case)]
#![cfg_attr(test, feature(test))]
extern crate num_integer;
extern crate num_traits;
use num_integer::Integer;
use num_traits::{PrimInt,Signed,One,NumCast};
use ::std::collections::HashMap;
use ::std::hash::Hash;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate rand;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
pub struct GcdData<X> {
// greatest common divisor
pub gcd: X,
// least common multiple
pub lcm: X,
// bezout coefficients
pub coeffs: (X, X),
// quotients of the inputs by the GCD
pub quotients: (X, X),
}
// NOTE:
// The Signed bound is unavoidable for Extended GCD because the Bezout
// coefficients can be negative. This is unfortunate for plain old gcd(),
// which technically shouldn't require the Signed bound.
// Since the bezout coefficients have no impact on each other or on the gcd,
// a sufficiently smart compiler can rip their computations out entirely.
// And as luck would have it, rustc is sufficiently smart!
#[inline(always)]
fn extended_gcd__inline<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
let (a_sign, a) = (a.signum(), a.abs());
let (b_sign, b) = (b.signum(), b.abs());
// Silly trick because rust doesn't have true multiple assignment:
// Store the two targets in one variable! Order is (old, current).
let mut s = (X::one(), X::zero()); // a coefficient
let mut t = (X::zero(), X::one()); // b coefficient
let mut r = (a, b); // gcd
while r.1!= X::zero() {
let (div, rem) = (r.0/r.1, r.0%r.1);
r = (r.1, rem);
s = (s.1, s.0 - div * s.1);
t = (t.1, t.0 - div * t.1);
}
let quots = (a_sign * t.1.abs(), b_sign * s.1.abs());
GcdData {
gcd: r.0,
// FIXME think more about sign of LCM
// (current implementation preserves the property a*b == gcd*lcm
// which is nice, but I don't know if it is The Right Thing)
lcm: r.0*quots.0*quots.1,
coeffs: (a_sign*s.0, b_sign*t.0),
quotients: quots,
}
}
/// Compute a greatest common divisor with other miscellany.
pub fn extended_gcd<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
extended_gcd__inline(a, b)
}
/// Compute a greatest common divisor.
pub fn gcd<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd,.. } = extended_gcd__inline(a, b);
gcd
}
/// Compute a least common multiple.
pub fn lcm<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { lcm,.. } = extended_gcd__inline(a, b);
lcm
}
/// Compute a modular multiplicative inverse, if it exists.
///
/// This implementation uses the extended Gcd algorithm,
pub fn inverse_mod<X>(a: X, m: X) -> Option<X> where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd: g, coeffs: (inv, _),.. } = extended_gcd__inline(a, m);
if g == X::one() { Some(inv.mod_floor(&m)) } else { None }
}
/// Merge many equations of the form `x = ai (mod ni)` into one.
///
/// The moduli don't need to be coprime;
/// ``None`` is returned if the equations are inconsistent.
///
/// `chinese_remainder(vec![])` is defined to be `Some((0,1))`.
pub fn chinese_remainder<X,I>(congruences: I) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
I: IntoIterator<Item=(X,X)>,
{
// something something "monadic" something "fold"
congruences.into_iter().fold(Some((X::zero(),X::one())),
|opt, new_pair| opt.and_then(|acc_pair|
chinese_remainder2(acc_pair, new_pair)
)
)
}
/// Merge two equations of the form ``x = ai (mod ni)`` into one.
///
/// The moduli don't need to be coprime;
/// `None` is returned if the equations are inconsistent.
///
/// Panics if a modulus is negative or zero.
pub fn chinese_remainder2<X>((a1,n1):(X,X), (a2,n2):(X,X)) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
{
// I'm too lazy right now to consider whether there is a
// reasonable behavior for negative moduli
assert!(n1.is_positive());
assert!(n2.is_positive());
let GcdData {
gcd: g,
lcm: n3,
coeffs: (c1,c2),
..
} = extended_gcd__inline(n1, n2);
let (a1div, a1rem) = a1.div_rem(&g);
let (a2div, a2rem) = a2.div_rem(&g);
if a1rem!= a2rem { None }
else {
let a3 = (a2div*c1*n1 + a1div*c2*n2 + a1rem).mod_floor(&n3);
Some((a3, n3))
}
}
// used for conversions of literals (which will clearly never fail)
fn lit<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
// slightly more verbose for use outside mathematical expressions
fn convert<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
/// An argument to tame function count explosion for functions
/// which can optionally deal with permutation symmetry.
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum OrderType {
/// Order matters; consider all distinct permutations.
Ordered,
/// Order does not matter; only consider distinct combinations.
Unordered,
}
/// Used as a half-baked alternative to writing a generic interface
/// over RangeTo and RangeToInclusive
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum UpperBound<X> { Upto(X), Under(X), }
impl<X> UpperBound<X> where X: Integer + One,
{
// FIXME: I think it is hard to read code that uses this.
/// a generic form of (min..x).next_back() or (min...x).next_back()
fn inclusive_limit_from(self, min: X) -> Option<X> {
match self {
Under(upper) => if min >= upper | else { Some(upper - One::one()) },
Upto(max) => if min > max { None } else { Some(max) },
}
}
}
#[test]
fn test_inclusive_limit_from() {
assert_eq!(Upto(4).inclusive_limit_from(3), Some(4));
assert_eq!(Upto(3).inclusive_limit_from(3), Some(3));
assert_eq!(Upto(2).inclusive_limit_from(3), None);
assert_eq!(Upto(1).inclusive_limit_from(3), None);
assert_eq!(Under(5).inclusive_limit_from(3), Some(4));
assert_eq!(Under(4).inclusive_limit_from(3), Some(3));
assert_eq!(Under(3).inclusive_limit_from(3), None);
assert_eq!(Under(2).inclusive_limit_from(3), None);
// no underflow please kthx
assert_eq!(Under(0).inclusive_limit_from(0), None);
}
use UpperBound::*;
use OrderType::*;
// NOTE: Further possible generalizations:
// * Take a Range instead of UpperBound so that zero can optionally be included
// (however it would also require solving how to produce correct results
// for all other lower bounds)
// * count_coprime_tuplets(max, n)
/// Counts coprime pairs of integers `>= 1`
///
/// # Notes
///
/// `(0,1)` and `(1,0)` are not counted.
/// `(1,1)` (the only symmetric pair) is counted once.
///
/// # Reference
///
/// http://laurentmazare.github.io/2014/09/14/counting-coprime-pairs/
pub fn count_coprime_pairs<X>(bound: UpperBound<X>, order_type: OrderType) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
let max = {
if let Some(max) = bound.inclusive_limit_from(lit(1)) { max }
else { return lit(0); } // catches Under(0), Upto(0), Under(1)
};
let ordered = count_ordered_coprime_pairs(max);
match order_type {
Ordered => ordered,
// Every combination was counted twice except for (1,1);
// Add 1 so that they are ALL double counted, then halve it.
// (this also fortituously produces 0 for max == 0)
Unordered => (ordered + lit(1)) / lit(2),
}
}
fn count_ordered_coprime_pairs<X>(max: X) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
// Function can be described with this recursion relation:
//
// c(n) = n**2 - sum_{k=2}^n c(n // k)
//
// where '//' is floored division.
//
// Many values of k share the same value of (n // k),
// thereby permitting a coarse-graining approach.
// unique values of m (=floor(n/k)) for small k
let fine_deps = |n|
(2..).map(convert::<X>)
.map(move |k| n/k).take_while(move |&m| m*m > n);
// values of m (=floor(n/k)) shared by many large k.
let coarse_deps = |n|
(1..).map(convert::<X>)
.take_while(move |&m| m*m <= n)
// don't produce m == 1 for n == 1
.skip_while(move |_| n == lit(1));
let coarse_multiplicity = |n,m| n/m - n/(m + lit(1));
// Get all values that need to be computed at some point.
//
// Interestingly, these are just'max' and its direct dependencies
// (which are of the form'max // k'). The reason the subproblems
// never introduce new dependencies is because integer division
// apparently satisfies the following property for x non-negative
// and a,b positive:
//
// (x // a) // b == (x // b) // a == x // (a*b)
//
// (NOTE: euclidean division wins *yet again*; it is the only sign convention
// under which this also works for negative 'x', 'a', and 'b'!)
let order = {
let mut vec = vec![max];
vec.extend(fine_deps(max));
vec.extend(coarse_deps(max));
vec.sort();
vec
};
let mut memo = HashMap::new();
let compute = |n, memo: &HashMap<X,X>| {
let acc = n*n;
let acc = coarse_deps(n)
.map(|m| memo[&m.into()] * coarse_multiplicity(n,m))
.fold(acc, |a,b| a-b);
let acc = fine_deps(n)
.map(|m| memo[&m.into()])
.fold(acc, |a,b| a-b);
acc
};
for x in order {
let value = compute(x, &memo);
memo.insert(x, value);
}
memo[&max]
}
#[cfg(test)]
mod tests {
use super::*;
use super::OrderType::*;
use super::UpperBound::*;
use ::num_integer::Integer;
use ::num_traits::{PrimInt,Signed};
use test;
use rand::{Rng};
#[test]
fn test_gcd() {
// swap left/right
// (using a pair that takes several iterations)
assert_eq!(gcd(234,123), 3);
assert_eq!(gcd(123,234), 3);
// negatives
assert_eq!(gcd(-15,20), 5);
assert_eq!(gcd(15,-20), 5);
assert_eq!(gcd(-15,-20), 5);
// zeroes
assert_eq!(gcd(0,17), 17);
assert_eq!(gcd(17,0), 17);
assert_eq!(gcd(0,0), 0);
}
#[test]
fn test_chinese_remainder() {
// test both interfaces
let eq1 = (2328,16256);
let eq2 = (410,5418);
let soln = (28450328, 44037504);
assert_eq!(chinese_remainder2(eq1,eq2), Some(soln));
assert_eq!(chinese_remainder(vec![eq1,eq2]), Some(soln));
// (0,1) serves as an "identity"
assert_eq!(chinese_remainder(vec![]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1)]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1),(13,36)]), Some((13,36)));
assert_eq!(chinese_remainder(vec![(13,36),(0,1)]), Some((13,36)));
// single equation
assert_eq!(chinese_remainder(vec![eq1]), Some(eq1));
// inconsistent equations
assert_eq!(chinese_remainder2((10,7),(4,14)), None);
assert_eq!(chinese_remainder(vec![(10,7),(4,14)]), None);
// FIXME: test more than 2 equations
// FIXME: do we specify behavior for when the input a_i are
// not already reduced modulo n_i?
}
#[test]
fn test_inverse_mod() {
let solns15 = vec![
None, Some(1), Some(8), None, Some(4),
None, None, Some(13), Some(2), None,
None, Some(11), None, Some(7), Some(14),
];
for x in -15..30 {
assert_eq!(inverse_mod(x,15), solns15[x.mod_floor(&15) as usize]);
}
}
#[test]
fn test_count_coprime_pairs() {
fn check<X>(bound: UpperBound<X>, expect_o: X, expect_u: X)
where X: ::std::fmt::Debug + PrimInt + ::std::hash::Hash + Integer {
let actual_o = count_coprime_pairs(bound, Ordered);
let actual_u = count_coprime_pairs(bound, Unordered);
assert!(actual_o == expect_o,
"g({:?}, Ordered) == {:?},!= {:?}", bound, actual_o, expect_o);
assert!(actual_u == expect_u,
"g({:?}, Unordered) == {:?},!= {:?}", bound, actual_u, expect_u);
};
// special-ish cases
check(Under(0u32), 0, 0); // unsigned to check for underflow
check(Under(0i32), 0, 0); // signed to check for poor usage of checked_sub
check(Upto(0u32), 0, 0);
check(Upto(0i32), 0, 0);
check(Upto(1u32), 1, 1);
check(Upto(1i32), 1, 1);
// a nontrivial coprime pair (2,3)
check(Upto(3u32), 7, 4);
// a nontrivial non-coprime pair (2,4)
check(Upto(4u32), 11, 6);
// problem size large enough to test both fine-graining and coarse-graining
check(Upto(100u32), 6087, 3044);
// a biggun
assert_eq!(count_coprime_pairs(Upto(10_000_000i64), Ordered), 60792712854483i64);
// try a variety of bounds in an attempt to get memo[&x] to panic
// on a missed dependency
let mut rng = ::rand::thread_rng();
test::black_box(
(0..100).map(|_| rng.gen_range(100, 100_000i64))
.map(|x| count_coprime_pairs(Upto(x), Ordered))
.sum::<i64>()
);
}
// Gold standard for binary comparison.
#[inline(never)]
fn gcd__reference<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let mut a = a.abs();
let mut b = b.abs();
while b!= X::zero() {
let tmp = b;
b = a % b;
a = tmp;
}
a
}
// Impressively, rustc grinds this down to a *byte-perfect match*
// against gcd__reference.
#[inline(never)]
fn gcd__optimized<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
gcd(a, b)
}
// force the two inline(never) functions above to be compiled
#[test]
fn dummy__compile_testfuncs() {
assert_eq!(gcd__reference(15,20), 5);
assert_eq!(gcd__optimized(20,15), 5);
// Interestingly, the compiled inline(never) functions will
// recieve optimizations based on their inputs.
// Without these following invocations, rustc will compile
// faster versions that only support positive arguments.
assert_eq!(gcd__reference(-15,-20), 5);
assert_eq!(gcd__optimized(-20,-15), 5);
}
}
| { None } | conditional_block |
lib.rs | #![allow(non_snake_case)]
#![cfg_attr(test, feature(test))]
extern crate num_integer;
extern crate num_traits;
use num_integer::Integer;
use num_traits::{PrimInt,Signed,One,NumCast};
use ::std::collections::HashMap;
use ::std::hash::Hash;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate rand;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
pub struct GcdData<X> {
// greatest common divisor
pub gcd: X,
// least common multiple
pub lcm: X,
// bezout coefficients
pub coeffs: (X, X),
// quotients of the inputs by the GCD
pub quotients: (X, X),
}
// NOTE:
// The Signed bound is unavoidable for Extended GCD because the Bezout
// coefficients can be negative. This is unfortunate for plain old gcd(),
// which technically shouldn't require the Signed bound.
// Since the bezout coefficients have no impact on each other or on the gcd,
// a sufficiently smart compiler can rip their computations out entirely.
// And as luck would have it, rustc is sufficiently smart!
#[inline(always)]
fn extended_gcd__inline<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
let (a_sign, a) = (a.signum(), a.abs());
let (b_sign, b) = (b.signum(), b.abs());
// Silly trick because rust doesn't have true multiple assignment:
// Store the two targets in one variable! Order is (old, current).
let mut s = (X::one(), X::zero()); // a coefficient
let mut t = (X::zero(), X::one()); // b coefficient
let mut r = (a, b); // gcd
while r.1!= X::zero() {
let (div, rem) = (r.0/r.1, r.0%r.1);
r = (r.1, rem);
s = (s.1, s.0 - div * s.1);
t = (t.1, t.0 - div * t.1);
}
let quots = (a_sign * t.1.abs(), b_sign * s.1.abs());
GcdData {
gcd: r.0,
// FIXME think more about sign of LCM
// (current implementation preserves the property a*b == gcd*lcm
// which is nice, but I don't know if it is The Right Thing)
lcm: r.0*quots.0*quots.1,
coeffs: (a_sign*s.0, b_sign*t.0),
quotients: quots,
}
}
/// Compute a greatest common divisor with other miscellany.
pub fn extended_gcd<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
extended_gcd__inline(a, b)
}
/// Compute a greatest common divisor.
pub fn gcd<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
|
/// Compute a least common multiple.
pub fn lcm<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { lcm,.. } = extended_gcd__inline(a, b);
lcm
}
/// Compute a modular multiplicative inverse, if it exists.
///
/// This implementation uses the extended Gcd algorithm,
pub fn inverse_mod<X>(a: X, m: X) -> Option<X> where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd: g, coeffs: (inv, _),.. } = extended_gcd__inline(a, m);
if g == X::one() { Some(inv.mod_floor(&m)) } else { None }
}
/// Merge many equations of the form `x = ai (mod ni)` into one.
///
/// The moduli don't need to be coprime;
/// ``None`` is returned if the equations are inconsistent.
///
/// `chinese_remainder(vec![])` is defined to be `Some((0,1))`.
pub fn chinese_remainder<X,I>(congruences: I) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
I: IntoIterator<Item=(X,X)>,
{
// something something "monadic" something "fold"
congruences.into_iter().fold(Some((X::zero(),X::one())),
|opt, new_pair| opt.and_then(|acc_pair|
chinese_remainder2(acc_pair, new_pair)
)
)
}
/// Merge two equations of the form ``x = ai (mod ni)`` into one.
///
/// The moduli don't need to be coprime;
/// `None` is returned if the equations are inconsistent.
///
/// Panics if a modulus is negative or zero.
pub fn chinese_remainder2<X>((a1,n1):(X,X), (a2,n2):(X,X)) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
{
// I'm too lazy right now to consider whether there is a
// reasonable behavior for negative moduli
assert!(n1.is_positive());
assert!(n2.is_positive());
let GcdData {
gcd: g,
lcm: n3,
coeffs: (c1,c2),
..
} = extended_gcd__inline(n1, n2);
let (a1div, a1rem) = a1.div_rem(&g);
let (a2div, a2rem) = a2.div_rem(&g);
if a1rem!= a2rem { None }
else {
let a3 = (a2div*c1*n1 + a1div*c2*n2 + a1rem).mod_floor(&n3);
Some((a3, n3))
}
}
// used for conversions of literals (which will clearly never fail)
fn lit<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
// slightly more verbose for use outside mathematical expressions
fn convert<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
/// An argument to tame function count explosion for functions
/// which can optionally deal with permutation symmetry.
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum OrderType {
/// Order matters; consider all distinct permutations.
Ordered,
/// Order does not matter; only consider distinct combinations.
Unordered,
}
/// Used as a half-baked alternative to writing a generic interface
/// over RangeTo and RangeToInclusive
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum UpperBound<X> { Upto(X), Under(X), }
impl<X> UpperBound<X> where X: Integer + One,
{
// FIXME: I think it is hard to read code that uses this.
/// a generic form of (min..x).next_back() or (min...x).next_back()
fn inclusive_limit_from(self, min: X) -> Option<X> {
match self {
Under(upper) => if min >= upper { None } else { Some(upper - One::one()) },
Upto(max) => if min > max { None } else { Some(max) },
}
}
}
#[test]
fn test_inclusive_limit_from() {
assert_eq!(Upto(4).inclusive_limit_from(3), Some(4));
assert_eq!(Upto(3).inclusive_limit_from(3), Some(3));
assert_eq!(Upto(2).inclusive_limit_from(3), None);
assert_eq!(Upto(1).inclusive_limit_from(3), None);
assert_eq!(Under(5).inclusive_limit_from(3), Some(4));
assert_eq!(Under(4).inclusive_limit_from(3), Some(3));
assert_eq!(Under(3).inclusive_limit_from(3), None);
assert_eq!(Under(2).inclusive_limit_from(3), None);
// no underflow please kthx
assert_eq!(Under(0).inclusive_limit_from(0), None);
}
use UpperBound::*;
use OrderType::*;
// NOTE: Further possible generalizations:
// * Take a Range instead of UpperBound so that zero can optionally be included
// (however it would also require solving how to produce correct results
// for all other lower bounds)
// * count_coprime_tuplets(max, n)
/// Counts coprime pairs of integers `>= 1`
///
/// # Notes
///
/// `(0,1)` and `(1,0)` are not counted.
/// `(1,1)` (the only symmetric pair) is counted once.
///
/// # Reference
///
/// http://laurentmazare.github.io/2014/09/14/counting-coprime-pairs/
pub fn count_coprime_pairs<X>(bound: UpperBound<X>, order_type: OrderType) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
let max = {
if let Some(max) = bound.inclusive_limit_from(lit(1)) { max }
else { return lit(0); } // catches Under(0), Upto(0), Under(1)
};
let ordered = count_ordered_coprime_pairs(max);
match order_type {
Ordered => ordered,
// Every combination was counted twice except for (1,1);
// Add 1 so that they are ALL double counted, then halve it.
// (this also fortituously produces 0 for max == 0)
Unordered => (ordered + lit(1)) / lit(2),
}
}
fn count_ordered_coprime_pairs<X>(max: X) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
// Function can be described with this recursion relation:
//
// c(n) = n**2 - sum_{k=2}^n c(n // k)
//
// where '//' is floored division.
//
// Many values of k share the same value of (n // k),
// thereby permitting a coarse-graining approach.
// unique values of m (=floor(n/k)) for small k
let fine_deps = |n|
(2..).map(convert::<X>)
.map(move |k| n/k).take_while(move |&m| m*m > n);
// values of m (=floor(n/k)) shared by many large k.
let coarse_deps = |n|
(1..).map(convert::<X>)
.take_while(move |&m| m*m <= n)
// don't produce m == 1 for n == 1
.skip_while(move |_| n == lit(1));
let coarse_multiplicity = |n,m| n/m - n/(m + lit(1));
// Get all values that need to be computed at some point.
//
// Interestingly, these are just'max' and its direct dependencies
// (which are of the form'max // k'). The reason the subproblems
// never introduce new dependencies is because integer division
// apparently satisfies the following property for x non-negative
// and a,b positive:
//
// (x // a) // b == (x // b) // a == x // (a*b)
//
// (NOTE: euclidean division wins *yet again*; it is the only sign convention
// under which this also works for negative 'x', 'a', and 'b'!)
let order = {
let mut vec = vec![max];
vec.extend(fine_deps(max));
vec.extend(coarse_deps(max));
vec.sort();
vec
};
let mut memo = HashMap::new();
let compute = |n, memo: &HashMap<X,X>| {
let acc = n*n;
let acc = coarse_deps(n)
.map(|m| memo[&m.into()] * coarse_multiplicity(n,m))
.fold(acc, |a,b| a-b);
let acc = fine_deps(n)
.map(|m| memo[&m.into()])
.fold(acc, |a,b| a-b);
acc
};
for x in order {
let value = compute(x, &memo);
memo.insert(x, value);
}
memo[&max]
}
#[cfg(test)]
mod tests {
use super::*;
use super::OrderType::*;
use super::UpperBound::*;
use ::num_integer::Integer;
use ::num_traits::{PrimInt,Signed};
use test;
use rand::{Rng};
#[test]
fn test_gcd() {
// swap left/right
// (using a pair that takes several iterations)
assert_eq!(gcd(234,123), 3);
assert_eq!(gcd(123,234), 3);
// negatives
assert_eq!(gcd(-15,20), 5);
assert_eq!(gcd(15,-20), 5);
assert_eq!(gcd(-15,-20), 5);
// zeroes
assert_eq!(gcd(0,17), 17);
assert_eq!(gcd(17,0), 17);
assert_eq!(gcd(0,0), 0);
}
#[test]
fn test_chinese_remainder() {
// test both interfaces
let eq1 = (2328,16256);
let eq2 = (410,5418);
let soln = (28450328, 44037504);
assert_eq!(chinese_remainder2(eq1,eq2), Some(soln));
assert_eq!(chinese_remainder(vec![eq1,eq2]), Some(soln));
// (0,1) serves as an "identity"
assert_eq!(chinese_remainder(vec![]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1)]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1),(13,36)]), Some((13,36)));
assert_eq!(chinese_remainder(vec![(13,36),(0,1)]), Some((13,36)));
// single equation
assert_eq!(chinese_remainder(vec![eq1]), Some(eq1));
// inconsistent equations
assert_eq!(chinese_remainder2((10,7),(4,14)), None);
assert_eq!(chinese_remainder(vec![(10,7),(4,14)]), None);
// FIXME: test more than 2 equations
// FIXME: do we specify behavior for when the input a_i are
// not already reduced modulo n_i?
}
#[test]
fn test_inverse_mod() {
let solns15 = vec![
None, Some(1), Some(8), None, Some(4),
None, None, Some(13), Some(2), None,
None, Some(11), None, Some(7), Some(14),
];
for x in -15..30 {
assert_eq!(inverse_mod(x,15), solns15[x.mod_floor(&15) as usize]);
}
}
#[test]
fn test_count_coprime_pairs() {
fn check<X>(bound: UpperBound<X>, expect_o: X, expect_u: X)
where X: ::std::fmt::Debug + PrimInt + ::std::hash::Hash + Integer {
let actual_o = count_coprime_pairs(bound, Ordered);
let actual_u = count_coprime_pairs(bound, Unordered);
assert!(actual_o == expect_o,
"g({:?}, Ordered) == {:?},!= {:?}", bound, actual_o, expect_o);
assert!(actual_u == expect_u,
"g({:?}, Unordered) == {:?},!= {:?}", bound, actual_u, expect_u);
};
// special-ish cases
check(Under(0u32), 0, 0); // unsigned to check for underflow
check(Under(0i32), 0, 0); // signed to check for poor usage of checked_sub
check(Upto(0u32), 0, 0);
check(Upto(0i32), 0, 0);
check(Upto(1u32), 1, 1);
check(Upto(1i32), 1, 1);
// a nontrivial coprime pair (2,3)
check(Upto(3u32), 7, 4);
// a nontrivial non-coprime pair (2,4)
check(Upto(4u32), 11, 6);
// problem size large enough to test both fine-graining and coarse-graining
check(Upto(100u32), 6087, 3044);
// a biggun
assert_eq!(count_coprime_pairs(Upto(10_000_000i64), Ordered), 60792712854483i64);
// try a variety of bounds in an attempt to get memo[&x] to panic
// on a missed dependency
let mut rng = ::rand::thread_rng();
test::black_box(
(0..100).map(|_| rng.gen_range(100, 100_000i64))
.map(|x| count_coprime_pairs(Upto(x), Ordered))
.sum::<i64>()
);
}
// Gold standard for binary comparison.
#[inline(never)]
fn gcd__reference<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let mut a = a.abs();
let mut b = b.abs();
while b!= X::zero() {
let tmp = b;
b = a % b;
a = tmp;
}
a
}
// Impressively, rustc grinds this down to a *byte-perfect match*
// against gcd__reference.
#[inline(never)]
fn gcd__optimized<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
gcd(a, b)
}
// force the two inline(never) functions above to be compiled
#[test]
fn dummy__compile_testfuncs() {
assert_eq!(gcd__reference(15,20), 5);
assert_eq!(gcd__optimized(20,15), 5);
// Interestingly, the compiled inline(never) functions will
// recieve optimizations based on their inputs.
// Without these following invocations, rustc will compile
// faster versions that only support positive arguments.
assert_eq!(gcd__reference(-15,-20), 5);
assert_eq!(gcd__optimized(-20,-15), 5);
}
}
| {
let GcdData { gcd, .. } = extended_gcd__inline(a, b);
gcd
} | identifier_body |
lib.rs | #![allow(non_snake_case)]
#![cfg_attr(test, feature(test))]
extern crate num_integer;
extern crate num_traits;
use num_integer::Integer;
use num_traits::{PrimInt,Signed,One,NumCast};
use ::std::collections::HashMap;
use ::std::hash::Hash;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate rand;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
pub struct GcdData<X> {
// greatest common divisor
pub gcd: X,
// least common multiple
pub lcm: X,
// bezout coefficients
pub coeffs: (X, X),
// quotients of the inputs by the GCD
pub quotients: (X, X),
}
// NOTE:
// The Signed bound is unavoidable for Extended GCD because the Bezout
// coefficients can be negative. This is unfortunate for plain old gcd(),
// which technically shouldn't require the Signed bound.
// Since the bezout coefficients have no impact on each other or on the gcd,
// a sufficiently smart compiler can rip their computations out entirely.
// And as luck would have it, rustc is sufficiently smart!
#[inline(always)]
fn extended_gcd__inline<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
let (a_sign, a) = (a.signum(), a.abs());
let (b_sign, b) = (b.signum(), b.abs());
// Silly trick because rust doesn't have true multiple assignment:
// Store the two targets in one variable! Order is (old, current).
let mut s = (X::one(), X::zero()); // a coefficient
let mut t = (X::zero(), X::one()); // b coefficient
let mut r = (a, b); // gcd
while r.1!= X::zero() {
let (div, rem) = (r.0/r.1, r.0%r.1);
r = (r.1, rem);
s = (s.1, s.0 - div * s.1);
t = (t.1, t.0 - div * t.1);
}
let quots = (a_sign * t.1.abs(), b_sign * s.1.abs());
GcdData {
gcd: r.0,
// FIXME think more about sign of LCM
// (current implementation preserves the property a*b == gcd*lcm
// which is nice, but I don't know if it is The Right Thing)
lcm: r.0*quots.0*quots.1,
coeffs: (a_sign*s.0, b_sign*t.0),
quotients: quots,
}
}
/// Compute a greatest common divisor with other miscellany.
pub fn extended_gcd<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
extended_gcd__inline(a, b)
}
/// Compute a greatest common divisor.
pub fn gcd<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd,.. } = extended_gcd__inline(a, b);
gcd
}
/// Compute a least common multiple.
pub fn lcm<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { lcm,.. } = extended_gcd__inline(a, b);
lcm
}
/// Compute a modular multiplicative inverse, if it exists.
///
/// This implementation uses the extended Gcd algorithm,
pub fn inverse_mod<X>(a: X, m: X) -> Option<X> where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd: g, coeffs: (inv, _),.. } = extended_gcd__inline(a, m);
if g == X::one() { Some(inv.mod_floor(&m)) } else { None }
}
/// Merge many equations of the form `x = ai (mod ni)` into one.
///
/// The moduli don't need to be coprime;
/// ``None`` is returned if the equations are inconsistent.
///
/// `chinese_remainder(vec![])` is defined to be `Some((0,1))`.
pub fn chinese_remainder<X,I>(congruences: I) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
I: IntoIterator<Item=(X,X)>,
{
// something something "monadic" something "fold"
congruences.into_iter().fold(Some((X::zero(),X::one())),
|opt, new_pair| opt.and_then(|acc_pair|
chinese_remainder2(acc_pair, new_pair)
)
)
}
/// Merge two equations of the form ``x = ai (mod ni)`` into one.
///
/// The moduli don't need to be coprime;
/// `None` is returned if the equations are inconsistent.
///
/// Panics if a modulus is negative or zero.
pub fn chinese_remainder2<X>((a1,n1):(X,X), (a2,n2):(X,X)) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
{
// I'm too lazy right now to consider whether there is a
// reasonable behavior for negative moduli
assert!(n1.is_positive());
assert!(n2.is_positive());
let GcdData {
gcd: g,
lcm: n3,
coeffs: (c1,c2),
..
} = extended_gcd__inline(n1, n2);
let (a1div, a1rem) = a1.div_rem(&g);
let (a2div, a2rem) = a2.div_rem(&g);
if a1rem!= a2rem { None }
else {
let a3 = (a2div*c1*n1 + a1div*c2*n2 + a1rem).mod_floor(&n3);
Some((a3, n3))
}
}
// used for conversions of literals (which will clearly never fail)
fn lit<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
// slightly more verbose for use outside mathematical expressions
fn convert<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
/// An argument to tame function count explosion for functions
/// which can optionally deal with permutation symmetry.
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum OrderType {
/// Order matters; consider all distinct permutations.
Ordered,
/// Order does not matter; only consider distinct combinations.
Unordered,
}
/// Used as a half-baked alternative to writing a generic interface
/// over RangeTo and RangeToInclusive
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum UpperBound<X> { Upto(X), Under(X), }
impl<X> UpperBound<X> where X: Integer + One,
{
// FIXME: I think it is hard to read code that uses this.
/// a generic form of (min..x).next_back() or (min...x).next_back()
fn inclusive_limit_from(self, min: X) -> Option<X> {
match self {
Under(upper) => if min >= upper { None } else { Some(upper - One::one()) },
Upto(max) => if min > max { None } else { Some(max) },
}
}
}
#[test]
fn test_inclusive_limit_from() {
assert_eq!(Upto(4).inclusive_limit_from(3), Some(4));
assert_eq!(Upto(3).inclusive_limit_from(3), Some(3));
assert_eq!(Upto(2).inclusive_limit_from(3), None);
assert_eq!(Upto(1).inclusive_limit_from(3), None);
assert_eq!(Under(5).inclusive_limit_from(3), Some(4));
assert_eq!(Under(4).inclusive_limit_from(3), Some(3));
assert_eq!(Under(3).inclusive_limit_from(3), None);
assert_eq!(Under(2).inclusive_limit_from(3), None);
// no underflow please kthx
assert_eq!(Under(0).inclusive_limit_from(0), None);
}
use UpperBound::*;
use OrderType::*;
// NOTE: Further possible generalizations:
// * Take a Range instead of UpperBound so that zero can optionally be included
// (however it would also require solving how to produce correct results
// for all other lower bounds)
// * count_coprime_tuplets(max, n)
/// Counts coprime pairs of integers `>= 1`
///
/// # Notes
///
/// `(0,1)` and `(1,0)` are not counted.
/// `(1,1)` (the only symmetric pair) is counted once.
///
/// # Reference
///
/// http://laurentmazare.github.io/2014/09/14/counting-coprime-pairs/
pub fn count_coprime_pairs<X>(bound: UpperBound<X>, order_type: OrderType) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
let max = {
if let Some(max) = bound.inclusive_limit_from(lit(1)) { max }
else { return lit(0); } // catches Under(0), Upto(0), Under(1)
};
let ordered = count_ordered_coprime_pairs(max);
match order_type {
Ordered => ordered,
// Every combination was counted twice except for (1,1);
// Add 1 so that they are ALL double counted, then halve it.
// (this also fortituously produces 0 for max == 0)
Unordered => (ordered + lit(1)) / lit(2),
}
}
fn count_ordered_coprime_pairs<X>(max: X) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
// Function can be described with this recursion relation:
//
// c(n) = n**2 - sum_{k=2}^n c(n // k)
//
// where '//' is floored division.
//
// Many values of k share the same value of (n // k),
// thereby permitting a coarse-graining approach.
// unique values of m (=floor(n/k)) for small k
let fine_deps = |n|
(2..).map(convert::<X>)
.map(move |k| n/k).take_while(move |&m| m*m > n);
// values of m (=floor(n/k)) shared by many large k.
let coarse_deps = |n|
(1..).map(convert::<X>)
.take_while(move |&m| m*m <= n)
// don't produce m == 1 for n == 1
.skip_while(move |_| n == lit(1));
let coarse_multiplicity = |n,m| n/m - n/(m + lit(1));
// Get all values that need to be computed at some point.
//
// Interestingly, these are just'max' and its direct dependencies
// (which are of the form'max // k'). The reason the subproblems
// never introduce new dependencies is because integer division
// apparently satisfies the following property for x non-negative
// and a,b positive:
//
// (x // a) // b == (x // b) // a == x // (a*b)
//
// (NOTE: euclidean division wins *yet again*; it is the only sign convention
// under which this also works for negative 'x', 'a', and 'b'!)
let order = {
let mut vec = vec![max];
vec.extend(fine_deps(max));
vec.extend(coarse_deps(max));
vec.sort();
vec
};
let mut memo = HashMap::new();
let compute = |n, memo: &HashMap<X,X>| {
let acc = n*n;
let acc = coarse_deps(n)
.map(|m| memo[&m.into()] * coarse_multiplicity(n,m))
.fold(acc, |a,b| a-b);
let acc = fine_deps(n)
.map(|m| memo[&m.into()])
.fold(acc, |a,b| a-b);
acc
};
for x in order {
let value = compute(x, &memo);
memo.insert(x, value);
}
memo[&max]
}
#[cfg(test)]
mod tests {
use super::*;
use super::OrderType::*;
use super::UpperBound::*;
use ::num_integer::Integer;
use ::num_traits::{PrimInt,Signed};
use test;
use rand::{Rng};
#[test]
fn test_gcd() {
// swap left/right
// (using a pair that takes several iterations)
assert_eq!(gcd(234,123), 3);
assert_eq!(gcd(123,234), 3);
// negatives
assert_eq!(gcd(-15,20), 5);
assert_eq!(gcd(15,-20), 5);
assert_eq!(gcd(-15,-20), 5);
// zeroes
assert_eq!(gcd(0,17), 17);
assert_eq!(gcd(17,0), 17);
assert_eq!(gcd(0,0), 0);
}
#[test]
fn test_chinese_remainder() {
// test both interfaces
let eq1 = (2328,16256);
let eq2 = (410,5418);
let soln = (28450328, 44037504);
assert_eq!(chinese_remainder2(eq1,eq2), Some(soln));
assert_eq!(chinese_remainder(vec![eq1,eq2]), Some(soln));
// (0,1) serves as an "identity"
assert_eq!(chinese_remainder(vec![]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1)]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1),(13,36)]), Some((13,36)));
assert_eq!(chinese_remainder(vec![(13,36),(0,1)]), Some((13,36)));
// single equation
assert_eq!(chinese_remainder(vec![eq1]), Some(eq1));
// inconsistent equations
assert_eq!(chinese_remainder2((10,7),(4,14)), None);
assert_eq!(chinese_remainder(vec![(10,7),(4,14)]), None);
// FIXME: test more than 2 equations
// FIXME: do we specify behavior for when the input a_i are | // not already reduced modulo n_i?
}
#[test]
fn test_inverse_mod() {
let solns15 = vec![
None, Some(1), Some(8), None, Some(4),
None, None, Some(13), Some(2), None,
None, Some(11), None, Some(7), Some(14),
];
for x in -15..30 {
assert_eq!(inverse_mod(x,15), solns15[x.mod_floor(&15) as usize]);
}
}
#[test]
fn test_count_coprime_pairs() {
fn check<X>(bound: UpperBound<X>, expect_o: X, expect_u: X)
where X: ::std::fmt::Debug + PrimInt + ::std::hash::Hash + Integer {
let actual_o = count_coprime_pairs(bound, Ordered);
let actual_u = count_coprime_pairs(bound, Unordered);
assert!(actual_o == expect_o,
"g({:?}, Ordered) == {:?},!= {:?}", bound, actual_o, expect_o);
assert!(actual_u == expect_u,
"g({:?}, Unordered) == {:?},!= {:?}", bound, actual_u, expect_u);
};
// special-ish cases
check(Under(0u32), 0, 0); // unsigned to check for underflow
check(Under(0i32), 0, 0); // signed to check for poor usage of checked_sub
check(Upto(0u32), 0, 0);
check(Upto(0i32), 0, 0);
check(Upto(1u32), 1, 1);
check(Upto(1i32), 1, 1);
// a nontrivial coprime pair (2,3)
check(Upto(3u32), 7, 4);
// a nontrivial non-coprime pair (2,4)
check(Upto(4u32), 11, 6);
// problem size large enough to test both fine-graining and coarse-graining
check(Upto(100u32), 6087, 3044);
// a biggun
assert_eq!(count_coprime_pairs(Upto(10_000_000i64), Ordered), 60792712854483i64);
// try a variety of bounds in an attempt to get memo[&x] to panic
// on a missed dependency
let mut rng = ::rand::thread_rng();
test::black_box(
(0..100).map(|_| rng.gen_range(100, 100_000i64))
.map(|x| count_coprime_pairs(Upto(x), Ordered))
.sum::<i64>()
);
}
// Gold standard for binary comparison.
#[inline(never)]
fn gcd__reference<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let mut a = a.abs();
let mut b = b.abs();
while b!= X::zero() {
let tmp = b;
b = a % b;
a = tmp;
}
a
}
// Impressively, rustc grinds this down to a *byte-perfect match*
// against gcd__reference.
#[inline(never)]
fn gcd__optimized<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
gcd(a, b)
}
// force the two inline(never) functions above to be compiled
#[test]
fn dummy__compile_testfuncs() {
assert_eq!(gcd__reference(15,20), 5);
assert_eq!(gcd__optimized(20,15), 5);
// Interestingly, the compiled inline(never) functions will
// recieve optimizations based on their inputs.
// Without these following invocations, rustc will compile
// faster versions that only support positive arguments.
assert_eq!(gcd__reference(-15,-20), 5);
assert_eq!(gcd__optimized(-20,-15), 5);
}
} | random_line_split |
|
main.rs | let [a, b, c] = self.0;
if a == b && b == c {
write!(f, "mono")
}
else {
let mut res = ['?'; 3];
res[a] = 'A';
res[b] = 'B';
res[c] = 'C';
let [l, r, c] = res;
write!(f, "{l}{c}{r}")
}
}
}
impl Default for ChannelMap {
fn default() -> Self {
ChannelMap([0, 1, 2]) // ACB
}
}
const MONO_CHANNEL_MAP: ChannelMap = ChannelMap([0, 0, 0]);
/* How to mix YM audio channels */
#[derive(Debug, Clone, Copy)]
enum ChannelMode {
/// Center channel is mixed-in with stereo channels.
MixedStereo(f32),
/// All channels are mixed-in together into a single audio channel.
Mono,
/// Left and right channel are played in stereo, redirect a center channel into a specific audio channel.
Channel(u32)
}
impl Default for ChannelMode {
fn default() -> Self {
ChannelMode::MixedStereo(0.8)
}
}
impl fmt::Display for ChannelMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ChannelMode::MixedStereo(ampl) => write!(f, "{ampl}"),
ChannelMode::Mono => write!(f, "m"),
ChannelMode::Channel(n) => write!(f, "{n}"),
}
}
}
fn print_time(secs: u32) |
fn print_current(last_secs: &mut u32, cur_secs: f32, total_secs: f32) {
let secs = cur_secs.trunc() as u32;
if *last_secs == secs {
return;
}
*last_secs = secs;
print!("\r");
print_time(secs);
print!(" -> ");
print_time((total_secs - cur_secs).trunc() as u32);
stdout().flush().unwrap();
}
/****************************************************************************/
/* PLAYER */
/****************************************************************************/
struct PlayEnv {
ym_file: YmSong,
ampl_level: f32,
repeat: u32,
channel_map: ChannelMap,
track: bool,
}
fn play_with_blep<A, B, SD, S>(
PlayEnv { mut ym_file, ampl_level, repeat, channel_map, track }: PlayEnv,
mut audio: AudioHandle<S>,
bandlim: &mut B,
render_audio: &dyn Fn(&mut BlepAmpFilter<&mut B>, &mut Vec<S>)
)
where A: AmpLevels<SD>,
B: BandLimitedExt<SD, S> +?Sized,
SD: SampleDelta + FromSample<f32> + MulNorm,
S: AudioSample + cpal::SizedSample
{
log::debug!("Channels: {channel_map} {:?}", channel_map.0);
/* Spectrusty's emulated AY is clocked at a half frequency of a host CPU clock,
we need to adjust cycles counter */
let host_frame_cycles = (ym_file.frame_cycles() * HOST_CLOCK_RATIO as f32) as i32;
let host_frequency = ym_file.chipset_frequency as f64 * HOST_CLOCK_RATIO as f64;
log::trace!("AY host frequency: {} Hz, frame: {} cycles", host_frequency, host_frame_cycles);
/* create a BLEP amplitude filter wrapper */
let mut bandlim = BlepAmpFilter::new(SD::from_sample(ampl_level), bandlim);
/* ensure BLEP has enough space to fit a single audio frame
(there is no margin - our frames will have constant size). */
bandlim.ensure_frame_time(audio.sample_rate, host_frequency, host_frame_cycles, 0);
/* number of audio output channels */
let channels = audio.channels as usize;
/* create an emulator instance */
let mut ay = Ay3_891xAudio::default();
/* buffered frame changes to AY-3-891x registers */
let mut changes = Vec::new();
/* play counter */
let mut counter = repeat;
/* total seconds */
let total_secs = ym_file.frames.len() as f32 / ym_file.frame_frequency as f32;
let mut last_secs: u32 = u32::MAX;
loop {
if track {
let cur_secs = ym_file.cursor() as f32 / ym_file.frame_frequency as f32;
print_current(&mut last_secs, cur_secs, total_secs);
}
/* produce YM chipset changes */
let finished = ym_file.produce_next_ay_frame(|ts, reg, val| {
changes.push(
AyRegChange::new(
(ts * HOST_CLOCK_RATIO as f32).trunc() as i32,
AyRegister::from(reg),
val))
});
/* render audio into BLEP */
ay.render_audio::<A,_,_>(changes.drain(..),
&mut bandlim,
host_frame_cycles,
host_frame_cycles,
channel_map.0);
/* close frame */
let frame_sample_count = bandlim.end_frame(host_frame_cycles);
/* render BLEP frame into the sample buffer */
audio.producer.render_frame(|ref mut buf| {
/* ensure the BLEP frame fits into the sample buffer */
buf.resize(frame_sample_count * channels, S::silence());
render_audio(&mut bandlim, buf);
});
/* send a rendered sample buffer to the consumer */
audio.producer.send_frame().unwrap();
if finished {
log::info!("Finished.");
if repeat!= 0 {
counter -= 1;
if counter == 0 {
break;
}
}
}
}
/* let the audio thread finish playing */
for _ in 0..50 {
audio.producer.render_frame(|ref mut buf| {
buf.fill(S::silence());
});
audio.producer.send_frame().unwrap();
}
audio.close();
}
fn play_with_amps<A, SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where A: AmpLevels<SD>,
SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm +'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample
{
let Args { volume, repeat, channels: channel_map, mode, track, hpass, lpass,.. } = args;
log::debug!("Repeat: {repeat}, volume: {volume}%");
let ampl_level = amplitude_level(args.volume);
log::trace!("Amplitude filter: {ampl_level}");
let mut env = PlayEnv { ym_file, ampl_level, repeat, channel_map, track };
let channels = audio.channels as usize;
match mode {
ChannelMode::MixedStereo(mono_filter) if channels >= 2 => {
/* a multi-channel to stereo mixer */
let mut blep = BlepStereo::new(mono_filter.into_sample(),
/* a stereo band-limited pulse buffer */
BandLimitedAny::new(2, lpass, hpass));
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
ChannelMode::Channel(channel) if channels >= channel as usize => {
/* a multi-channel band-limited pulse buffer */
let third_chan = (channel - 1) as usize;
let mut blep = BandLimitedAny::new(3, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1, third_chan]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
_ => {
/* a monophonic band-limited pulse buffer */
let mut blep = BandLimitedAny::new(1, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
env.channel_map = MONO_CHANNEL_MAP;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_fill_interleaved(buf, channels, 0);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
}
}
fn play<SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm +'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample,
AyFuseAmps<SD>: AmpLevels<SD>,
AyAmps<SD>: AmpLevels<SD>
{
if args.fuse {
log::debug!("YM amplitide levels: fuse (measured)");
play_with_amps::<AyFuseAmps<_>, _, _>(audio, ym_file, args)
}
else {
log::debug!("YM amplitide levels: default (specs)");
play_with_amps::<AyAmps<_>, _, _>(audio, ym_file, args)
}
}
/****************************************************************************/
/* MAIN */
/****************************************************************************/
#[derive(Default, Debug, Clone, Copy, PartialEq)]
struct StreamConfigHint {
channels: Option<cpal::ChannelCount>,
sample_rate: Option<cpal::SampleRate>,
sample_format: Option<cpal::SampleFormat>
}
impl fmt::Display for StreamConfigHint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self == &StreamConfigHint::default() {
return f.write_str("*");
}
if let Some(format) = self.sample_format {
write!(f, "{:?}", format)?;
}
if self.channels.is_some() && self.sample_rate.is_some() {
f.write_str(",")?;
}
if let Some(channels) = self.channels {
write!(f, "{}", channels)?;
}
if let Some(rate) = self.sample_rate {
write!(f, "@{}", rate.0)?;
}
Ok(())
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// A file path to an YM song.
ym_file: Option<String>,
/// Audio mixer volume: 0 - 100.
#[arg(short, long, default_value_t = 50, value_parser = volume_in_range)]
volume: u8,
/// Play counter, 0 to play forever.
#[arg(short, long, default_value_t = 0)]
repeat: u32,
/// YM channels map: Left Center Right.
#[arg(short, long, default_value_t = ChannelMap::default(), value_parser = parse_channels)]
channels: ChannelMap,
/// Channel mode: s|m|0.s|N.
///
/// "s" - stereo mode with a center channel mixed with an amplitude of 0.8
///
/// "m" - monophonic mode, played on all audio channels
///
/// "0.s" - stereo mode, center channel amplitude: 0.s
///
/// "N" - multi-channel mode, redirect center channel to Nth (3+) audio channel
#[arg(short, long, default_value_t = ChannelMode::default(), value_parser = parse_channel_mode)]
mode: ChannelMode,
/// Switch to alternative YM amplitude levels (measured vs specs).
#[arg(short, long, default_value_t = false)]
fuse: bool,
/// Enable low-pass audio band filter.
#[arg(long, default_value_t = false)]
lpass: bool,
/// Enable high-pass audio band filter.
#[arg(long, default_value_t = false)]
hpass: bool,
/// Desired audio output parameters: ST,CHANS@RATE.
///
/// ST is a sample type, e.g.: U8, I16, U32, F32.
///
/// CHANS is the number of channels and RATE is the sample rate.
#[arg(short, long, default_value_t = StreamConfigHint::default(), value_parser = parse_stream_config)]
audio: StreamConfigHint,
/// Track the current song time.
#[arg(short, long, default_value_t = false)]
track: bool,
/// Log verbosity level.
///
/// -d for INFO, -dd for DEBUG, -ddd for TRACE
#[arg(short, long, action = clap::ArgAction::Count)]
debug: u8
}
fn volume_in_range(s: &str) -> Result<u8, String> {
let volume: usize = s
.parse()
.map_err(|_| format!("`{s}` isn't a volume"))?;
if (0..=NORMAL_AMPLITUDE as usize).contains(&volume) {
Ok(volume as u8)
} else {
Err(format!("volume not in range 0 - {NORMAL_AMPLITUDE}"))
}
}
fn parse_channel_mode(s: &str) -> Result<ChannelMode, String> {
Ok(match s {
"s"|"S" => ChannelMode::MixedStereo(0.8),
"m"|"M" => ChannelMode::Mono,
s if s.starts_with("0.") => {
let amp: f32 = s.parse().map_err(|_| format!("`{s}` isn't a stereo mixer amplitude"))?;
ChannelMode::MixedStereo(amp)
}
s => {
let channel: u32 = s.parse().map_err(|_| format!("`{s}` isn't a mixer mode channel"))?;
if channel < 3 {
return Err("mixer mode channel must be >= 3".into());
}
ChannelMode::Channel(channel)
}
})
}
fn parse_channels(s: &str) -> Result<ChannelMap, String> {
const ERROR_MSG: &str = "channel mapping should be a permutation of ABC characters";
if s.len()!= 3 {
return Err(ERROR_MSG.into());
}
let mut channels = [usize::MAX; 3];
// [A, B, C], where N -> 0: left, 1: right, 2: center
for (ch, chan) in s.chars().zip([0, 2, 1].into_iter()) {
let pos = match ch.to_ascii_uppercase() {
'A' => 0,
'B' => 1,
'C' => 2,
_ => return Err(ERROR_MSG.into())
};
if channels[pos]!= usize::MAX {
return Err(ERROR_MSG.into());
}
channels[pos] = chan;
}
Ok(ChannelMap(channels))
}
fn parse_stream_config(mut s: &str) -> Result<StreamConfigHint, String> {
let mut config = StreamConfigHint::default();
if s == "*" {
return Ok(config);
}
const FORMATS: &[([&str;2], cpal::SampleFormat)] = &[
(["i8", "I8"], cpal::SampleFormat::I8),
(["u8", "U8"], cpal::SampleFormat::U8),
(["i16", "I16"], cpal::SampleFormat::I16),
(["u16", "U16"], cpal::SampleFormat::U16),
(["i32", "I32"], cpal::SampleFormat::I32),
(["u32", "U32"], cpal::SampleFormat::U32),
(["f32", "F32"], cpal::SampleFormat::F32),
(["i64", "I64"], cpal::SampleFormat::I64),
(["u64", "U64"], cpal::SampleFormat::U64),
(["f64", "F64"], cpal::SampleFormat::F64)];
for ([lc, uc], format) in FORMATS.into_iter() {
if s.starts_with(lc) || s.starts_with(uc) {
config.sample_format = Some(*format);
(_, s) = s.split_at(lc.len());
break;
}
}
if s.starts_with(",") {
(_, s) = s.split_at(1);
}
let chan = match s.split_once("@") {
Some((chan, rate)) => {
if!rate.is_empty() {
config.sample_rate = Some(cpal::SampleRate(u32::from_str_radix(rate, 10)
.map_err(|_| "expected sample rate")?));
}
chan
},
None => s
};
if!chan.is_empty() {
config.channels = Some(u16::from_str_radix(chan, 10)
.map_err(|_| "expected number of channels")?);
}
Ok(config)
}
fn find_best_audio_config(device: &cpal::Device, request: StreamConfigHint) -> Result<cpal::SupportedStreamConfig, Box<dyn std::error::Error>>
{
log::trace!("Audio device: {}", device.name().unwrap_or_else(|e| e.to_string()));
let default_config = device.default_output_config()?;
if request == StreamConfigHint::default() {
return Ok(default_config);
}
let channels = request.channels.unwrap_or(default_config.channels());
for config in device.supported_output_configs()? {
if config.channels()!= channels {
continue;
}
if let Some(sample_format) = request.sample_format {
if config.sample_format()!= sample_format {
continue;
}
}
else if config.sample_format()!= default_config.sample_format() {
continue;
}
let sample_rate = match request.sample_rate {
Some(sample_rate) => if!(config.min_sample_rate()..=config.max_sample_rate()).contains(&sample_rate) {
continue;
}
else {
sample_rate
}
None => default_config.sample_rate()
};
return Ok(config.with_sample_rate(sample_rate));
}
Err("Could not find the audio configuration matching given parameters")?
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
simple_logger::init_with_level(match args.debug {
0 => log::Level::Warn,
1 => log::Level::Info,
2 => log::Level::Debug,
_ => log::Level::Trace
})?;
let ym_file = match args.ym_file {
Some(ref ym_path) => {
log::info!("Loading YM file: {}", ym_path);
ym_file_parser::parse_file(ym_path)?
}
None => YmSong::parse(BUZZ_YM)?
};
log::info!(r#"{} "{}" by {}"#,
ym_file.version,
ym_file.title.trim(),
ym_file.author.trim());
log::info!(r#"Duration: {:?} {}"#,
ym_file.song_duration(),
ym_file.comments.trim());
log::debug!("Chip: {} Hz, frame: {} Hz, {} cycles each",
ym_file.clock_frequency(),
ym_file.frame_frequency,
ym_file.frame_cycles());
log::debug!("Frames total: {}, loop to: {}, {:?}",
ym_file.frames.len(),
ym_file.loop_frame,
ym_file.song_attrs);
if log::log_enabled!(log::Level::Debug) &&!ym_file.dd_samples.is_empty() {
let mut sample_lens = Vec::with_capacity(ym_file.dd_samples_ends.len());
ym_file.dd_samples_ends.iter().try_fold(0,
| | {
let hours = secs / 3600;
let minutes = (secs % 3600) / 60;
let secs = secs % 60;
if hours != 0 {
print!("{hours}:{minutes:02}:{secs:02}");
}
else {
print!("{minutes:02}:{secs:02}");
}
} | identifier_body |
main.rs | let [a, b, c] = self.0;
if a == b && b == c {
write!(f, "mono")
}
else {
let mut res = ['?'; 3];
res[a] = 'A';
res[b] = 'B';
res[c] = 'C';
let [l, r, c] = res;
write!(f, "{l}{c}{r}")
}
}
}
impl Default for ChannelMap {
fn default() -> Self {
ChannelMap([0, 1, 2]) // ACB
}
}
const MONO_CHANNEL_MAP: ChannelMap = ChannelMap([0, 0, 0]);
/* How to mix YM audio channels */
#[derive(Debug, Clone, Copy)]
enum ChannelMode {
/// Center channel is mixed-in with stereo channels.
MixedStereo(f32),
/// All channels are mixed-in together into a single audio channel.
Mono,
/// Left and right channel are played in stereo, redirect a center channel into a specific audio channel.
Channel(u32)
}
impl Default for ChannelMode {
fn default() -> Self {
ChannelMode::MixedStereo(0.8)
}
}
impl fmt::Display for ChannelMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ChannelMode::MixedStereo(ampl) => write!(f, "{ampl}"),
ChannelMode::Mono => write!(f, "m"),
ChannelMode::Channel(n) => write!(f, "{n}"),
}
}
}
fn print_time(secs: u32) {
let hours = secs / 3600;
let minutes = (secs % 3600) / 60;
let secs = secs % 60;
if hours!= 0 {
print!("{hours}:{minutes:02}:{secs:02}");
}
else {
print!("{minutes:02}:{secs:02}");
}
}
fn print_current(last_secs: &mut u32, cur_secs: f32, total_secs: f32) {
let secs = cur_secs.trunc() as u32;
if *last_secs == secs {
return;
}
*last_secs = secs;
print!("\r");
print_time(secs);
print!(" -> ");
print_time((total_secs - cur_secs).trunc() as u32);
stdout().flush().unwrap();
}
/****************************************************************************/
/* PLAYER */
/****************************************************************************/
struct PlayEnv {
ym_file: YmSong,
ampl_level: f32,
repeat: u32,
channel_map: ChannelMap,
track: bool,
}
fn play_with_blep<A, B, SD, S>(
PlayEnv { mut ym_file, ampl_level, repeat, channel_map, track }: PlayEnv,
mut audio: AudioHandle<S>,
bandlim: &mut B,
render_audio: &dyn Fn(&mut BlepAmpFilter<&mut B>, &mut Vec<S>)
)
where A: AmpLevels<SD>,
B: BandLimitedExt<SD, S> +?Sized,
SD: SampleDelta + FromSample<f32> + MulNorm,
S: AudioSample + cpal::SizedSample
{
log::debug!("Channels: {channel_map} {:?}", channel_map.0);
/* Spectrusty's emulated AY is clocked at a half frequency of a host CPU clock,
we need to adjust cycles counter */
let host_frame_cycles = (ym_file.frame_cycles() * HOST_CLOCK_RATIO as f32) as i32;
let host_frequency = ym_file.chipset_frequency as f64 * HOST_CLOCK_RATIO as f64;
log::trace!("AY host frequency: {} Hz, frame: {} cycles", host_frequency, host_frame_cycles);
/* create a BLEP amplitude filter wrapper */
let mut bandlim = BlepAmpFilter::new(SD::from_sample(ampl_level), bandlim);
/* ensure BLEP has enough space to fit a single audio frame
(there is no margin - our frames will have constant size). */
bandlim.ensure_frame_time(audio.sample_rate, host_frequency, host_frame_cycles, 0);
/* number of audio output channels */
let channels = audio.channels as usize;
/* create an emulator instance */
let mut ay = Ay3_891xAudio::default();
/* buffered frame changes to AY-3-891x registers */
let mut changes = Vec::new();
/* play counter */
let mut counter = repeat;
/* total seconds */
let total_secs = ym_file.frames.len() as f32 / ym_file.frame_frequency as f32;
let mut last_secs: u32 = u32::MAX;
loop {
if track {
let cur_secs = ym_file.cursor() as f32 / ym_file.frame_frequency as f32;
print_current(&mut last_secs, cur_secs, total_secs);
}
/* produce YM chipset changes */
let finished = ym_file.produce_next_ay_frame(|ts, reg, val| {
changes.push(
AyRegChange::new(
(ts * HOST_CLOCK_RATIO as f32).trunc() as i32,
AyRegister::from(reg),
val))
});
/* render audio into BLEP */
ay.render_audio::<A,_,_>(changes.drain(..),
&mut bandlim,
host_frame_cycles,
host_frame_cycles,
channel_map.0);
/* close frame */
let frame_sample_count = bandlim.end_frame(host_frame_cycles);
/* render BLEP frame into the sample buffer */
audio.producer.render_frame(|ref mut buf| {
/* ensure the BLEP frame fits into the sample buffer */
buf.resize(frame_sample_count * channels, S::silence());
render_audio(&mut bandlim, buf);
});
/* send a rendered sample buffer to the consumer */
audio.producer.send_frame().unwrap();
if finished {
log::info!("Finished.");
if repeat!= 0 {
counter -= 1;
if counter == 0 {
break;
}
}
}
}
/* let the audio thread finish playing */
for _ in 0..50 {
audio.producer.render_frame(|ref mut buf| {
buf.fill(S::silence());
});
audio.producer.send_frame().unwrap();
}
audio.close();
}
fn play_with_amps<A, SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where A: AmpLevels<SD>,
SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm +'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample
{
let Args { volume, repeat, channels: channel_map, mode, track, hpass, lpass,.. } = args;
log::debug!("Repeat: {repeat}, volume: {volume}%");
let ampl_level = amplitude_level(args.volume);
log::trace!("Amplitude filter: {ampl_level}");
let mut env = PlayEnv { ym_file, ampl_level, repeat, channel_map, track };
let channels = audio.channels as usize;
match mode {
ChannelMode::MixedStereo(mono_filter) if channels >= 2 => {
/* a multi-channel to stereo mixer */
let mut blep = BlepStereo::new(mono_filter.into_sample(),
/* a stereo band-limited pulse buffer */
BandLimitedAny::new(2, lpass, hpass));
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
ChannelMode::Channel(channel) if channels >= channel as usize => {
/* a multi-channel band-limited pulse buffer */
let third_chan = (channel - 1) as usize;
let mut blep = BandLimitedAny::new(3, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1, third_chan]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
_ => {
/* a monophonic band-limited pulse buffer */
let mut blep = BandLimitedAny::new(1, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
env.channel_map = MONO_CHANNEL_MAP;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_fill_interleaved(buf, channels, 0);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
}
}
fn play<SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm +'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample,
AyFuseAmps<SD>: AmpLevels<SD>,
AyAmps<SD>: AmpLevels<SD>
{
if args.fuse {
log::debug!("YM amplitide levels: fuse (measured)");
play_with_amps::<AyFuseAmps<_>, _, _>(audio, ym_file, args)
}
else {
log::debug!("YM amplitide levels: default (specs)");
play_with_amps::<AyAmps<_>, _, _>(audio, ym_file, args)
}
}
/****************************************************************************/
/* MAIN */
/****************************************************************************/
#[derive(Default, Debug, Clone, Copy, PartialEq)]
struct StreamConfigHint {
channels: Option<cpal::ChannelCount>,
sample_rate: Option<cpal::SampleRate>,
sample_format: Option<cpal::SampleFormat>
}
impl fmt::Display for StreamConfigHint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self == &StreamConfigHint::default() {
return f.write_str("*");
}
if let Some(format) = self.sample_format {
write!(f, "{:?}", format)?;
}
if self.channels.is_some() && self.sample_rate.is_some() {
f.write_str(",")?;
}
if let Some(channels) = self.channels {
write!(f, "{}", channels)?;
}
if let Some(rate) = self.sample_rate {
write!(f, "@{}", rate.0)?;
}
Ok(())
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// A file path to an YM song.
ym_file: Option<String>,
/// Audio mixer volume: 0 - 100.
#[arg(short, long, default_value_t = 50, value_parser = volume_in_range)]
volume: u8,
/// Play counter, 0 to play forever.
#[arg(short, long, default_value_t = 0)]
repeat: u32,
/// YM channels map: Left Center Right.
#[arg(short, long, default_value_t = ChannelMap::default(), value_parser = parse_channels)]
channels: ChannelMap,
/// Channel mode: s|m|0.s|N.
///
/// "s" - stereo mode with a center channel mixed with an amplitude of 0.8
///
/// "m" - monophonic mode, played on all audio channels
///
/// "0.s" - stereo mode, center channel amplitude: 0.s
///
/// "N" - multi-channel mode, redirect center channel to Nth (3+) audio channel
#[arg(short, long, default_value_t = ChannelMode::default(), value_parser = parse_channel_mode)]
mode: ChannelMode,
/// Switch to alternative YM amplitude levels (measured vs specs).
#[arg(short, long, default_value_t = false)]
fuse: bool,
/// Enable low-pass audio band filter.
#[arg(long, default_value_t = false)]
lpass: bool,
/// Enable high-pass audio band filter.
#[arg(long, default_value_t = false)]
hpass: bool,
/// Desired audio output parameters: ST,CHANS@RATE.
///
/// ST is a sample type, e.g.: U8, I16, U32, F32.
///
/// CHANS is the number of channels and RATE is the sample rate.
#[arg(short, long, default_value_t = StreamConfigHint::default(), value_parser = parse_stream_config)]
audio: StreamConfigHint,
/// Track the current song time.
#[arg(short, long, default_value_t = false)]
track: bool,
/// Log verbosity level.
///
/// -d for INFO, -dd for DEBUG, -ddd for TRACE
#[arg(short, long, action = clap::ArgAction::Count)]
debug: u8
}
fn volume_in_range(s: &str) -> Result<u8, String> {
let volume: usize = s
.parse()
.map_err(|_| format!("`{s}` isn't a volume"))?;
if (0..=NORMAL_AMPLITUDE as usize).contains(&volume) {
Ok(volume as u8)
} else {
Err(format!("volume not in range 0 - {NORMAL_AMPLITUDE}"))
}
}
fn parse_channel_mode(s: &str) -> Result<ChannelMode, String> {
Ok(match s {
"s"|"S" => ChannelMode::MixedStereo(0.8),
"m"|"M" => ChannelMode::Mono,
s if s.starts_with("0.") => {
let amp: f32 = s.parse().map_err(|_| format!("`{s}` isn't a stereo mixer amplitude"))?;
ChannelMode::MixedStereo(amp)
}
s => {
let channel: u32 = s.parse().map_err(|_| format!("`{s}` isn't a mixer mode channel"))?;
if channel < 3 {
return Err("mixer mode channel must be >= 3".into());
}
ChannelMode::Channel(channel)
}
})
}
fn parse_channels(s: &str) -> Result<ChannelMap, String> {
const ERROR_MSG: &str = "channel mapping should be a permutation of ABC characters";
if s.len()!= 3 {
return Err(ERROR_MSG.into());
}
let mut channels = [usize::MAX; 3];
// [A, B, C], where N -> 0: left, 1: right, 2: center
for (ch, chan) in s.chars().zip([0, 2, 1].into_iter()) {
let pos = match ch.to_ascii_uppercase() {
'A' => 0,
'B' => 1,
'C' => 2,
_ => return Err(ERROR_MSG.into())
};
if channels[pos]!= usize::MAX {
return Err(ERROR_MSG.into());
}
channels[pos] = chan;
}
Ok(ChannelMap(channels))
}
fn | (mut s: &str) -> Result<StreamConfigHint, String> {
let mut config = StreamConfigHint::default();
if s == "*" {
return Ok(config);
}
const FORMATS: &[([&str;2], cpal::SampleFormat)] = &[
(["i8", "I8"], cpal::SampleFormat::I8),
(["u8", "U8"], cpal::SampleFormat::U8),
(["i16", "I16"], cpal::SampleFormat::I16),
(["u16", "U16"], cpal::SampleFormat::U16),
(["i32", "I32"], cpal::SampleFormat::I32),
(["u32", "U32"], cpal::SampleFormat::U32),
(["f32", "F32"], cpal::SampleFormat::F32),
(["i64", "I64"], cpal::SampleFormat::I64),
(["u64", "U64"], cpal::SampleFormat::U64),
(["f64", "F64"], cpal::SampleFormat::F64)];
for ([lc, uc], format) in FORMATS.into_iter() {
if s.starts_with(lc) || s.starts_with(uc) {
config.sample_format = Some(*format);
(_, s) = s.split_at(lc.len());
break;
}
}
if s.starts_with(",") {
(_, s) = s.split_at(1);
}
let chan = match s.split_once("@") {
Some((chan, rate)) => {
if!rate.is_empty() {
config.sample_rate = Some(cpal::SampleRate(u32::from_str_radix(rate, 10)
.map_err(|_| "expected sample rate")?));
}
chan
},
None => s
};
if!chan.is_empty() {
config.channels = Some(u16::from_str_radix(chan, 10)
.map_err(|_| "expected number of channels")?);
}
Ok(config)
}
fn find_best_audio_config(device: &cpal::Device, request: StreamConfigHint) -> Result<cpal::SupportedStreamConfig, Box<dyn std::error::Error>>
{
log::trace!("Audio device: {}", device.name().unwrap_or_else(|e| e.to_string()));
let default_config = device.default_output_config()?;
if request == StreamConfigHint::default() {
return Ok(default_config);
}
let channels = request.channels.unwrap_or(default_config.channels());
for config in device.supported_output_configs()? {
if config.channels()!= channels {
continue;
}
if let Some(sample_format) = request.sample_format {
if config.sample_format()!= sample_format {
continue;
}
}
else if config.sample_format()!= default_config.sample_format() {
continue;
}
let sample_rate = match request.sample_rate {
Some(sample_rate) => if!(config.min_sample_rate()..=config.max_sample_rate()).contains(&sample_rate) {
continue;
}
else {
sample_rate
}
None => default_config.sample_rate()
};
return Ok(config.with_sample_rate(sample_rate));
}
Err("Could not find the audio configuration matching given parameters")?
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
simple_logger::init_with_level(match args.debug {
0 => log::Level::Warn,
1 => log::Level::Info,
2 => log::Level::Debug,
_ => log::Level::Trace
})?;
let ym_file = match args.ym_file {
Some(ref ym_path) => {
log::info!("Loading YM file: {}", ym_path);
ym_file_parser::parse_file(ym_path)?
}
None => YmSong::parse(BUZZ_YM)?
};
log::info!(r#"{} "{}" by {}"#,
ym_file.version,
ym_file.title.trim(),
ym_file.author.trim());
log::info!(r#"Duration: {:?} {}"#,
ym_file.song_duration(),
ym_file.comments.trim());
log::debug!("Chip: {} Hz, frame: {} Hz, {} cycles each",
ym_file.clock_frequency(),
ym_file.frame_frequency,
ym_file.frame_cycles());
log::debug!("Frames total: {}, loop to: {}, {:?}",
ym_file.frames.len(),
ym_file.loop_frame,
ym_file.song_attrs);
if log::log_enabled!(log::Level::Debug) &&!ym_file.dd_samples.is_empty() {
let mut sample_lens = Vec::with_capacity(ym_file.dd_samples_ends.len());
ym_file.dd_samples_ends.iter().try_fold(0,
| | parse_stream_config | identifier_name |
main.rs | let [a, b, c] = self.0;
if a == b && b == c {
write!(f, "mono")
}
else {
let mut res = ['?'; 3];
res[a] = 'A';
res[b] = 'B';
res[c] = 'C';
let [l, r, c] = res;
write!(f, "{l}{c}{r}")
}
}
}
impl Default for ChannelMap {
fn default() -> Self {
ChannelMap([0, 1, 2]) // ACB
}
}
const MONO_CHANNEL_MAP: ChannelMap = ChannelMap([0, 0, 0]);
/* How to mix YM audio channels */
#[derive(Debug, Clone, Copy)]
enum ChannelMode {
/// Center channel is mixed-in with stereo channels.
MixedStereo(f32),
/// All channels are mixed-in together into a single audio channel.
Mono,
/// Left and right channel are played in stereo, redirect a center channel into a specific audio channel.
Channel(u32)
}
impl Default for ChannelMode {
fn default() -> Self {
ChannelMode::MixedStereo(0.8)
}
}
impl fmt::Display for ChannelMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ChannelMode::MixedStereo(ampl) => write!(f, "{ampl}"),
ChannelMode::Mono => write!(f, "m"),
ChannelMode::Channel(n) => write!(f, "{n}"),
}
}
}
fn print_time(secs: u32) {
let hours = secs / 3600;
let minutes = (secs % 3600) / 60;
let secs = secs % 60;
if hours!= 0 {
print!("{hours}:{minutes:02}:{secs:02}");
}
else {
print!("{minutes:02}:{secs:02}");
}
}
fn print_current(last_secs: &mut u32, cur_secs: f32, total_secs: f32) {
let secs = cur_secs.trunc() as u32;
if *last_secs == secs {
return;
}
*last_secs = secs;
print!("\r");
print_time(secs);
print!(" -> ");
print_time((total_secs - cur_secs).trunc() as u32);
stdout().flush().unwrap();
}
/****************************************************************************/
/* PLAYER */
/****************************************************************************/
struct PlayEnv {
ym_file: YmSong,
ampl_level: f32,
repeat: u32,
channel_map: ChannelMap,
track: bool,
}
fn play_with_blep<A, B, SD, S>(
PlayEnv { mut ym_file, ampl_level, repeat, channel_map, track }: PlayEnv,
mut audio: AudioHandle<S>,
bandlim: &mut B,
render_audio: &dyn Fn(&mut BlepAmpFilter<&mut B>, &mut Vec<S>)
)
where A: AmpLevels<SD>,
B: BandLimitedExt<SD, S> +?Sized,
SD: SampleDelta + FromSample<f32> + MulNorm,
S: AudioSample + cpal::SizedSample
{
log::debug!("Channels: {channel_map} {:?}", channel_map.0);
/* Spectrusty's emulated AY is clocked at a half frequency of a host CPU clock,
we need to adjust cycles counter */
let host_frame_cycles = (ym_file.frame_cycles() * HOST_CLOCK_RATIO as f32) as i32;
let host_frequency = ym_file.chipset_frequency as f64 * HOST_CLOCK_RATIO as f64;
log::trace!("AY host frequency: {} Hz, frame: {} cycles", host_frequency, host_frame_cycles);
/* create a BLEP amplitude filter wrapper */
let mut bandlim = BlepAmpFilter::new(SD::from_sample(ampl_level), bandlim);
/* ensure BLEP has enough space to fit a single audio frame
(there is no margin - our frames will have constant size). */
bandlim.ensure_frame_time(audio.sample_rate, host_frequency, host_frame_cycles, 0);
/* number of audio output channels */
let channels = audio.channels as usize;
/* create an emulator instance */
let mut ay = Ay3_891xAudio::default();
/* buffered frame changes to AY-3-891x registers */
let mut changes = Vec::new();
/* play counter */
let mut counter = repeat;
/* total seconds */
let total_secs = ym_file.frames.len() as f32 / ym_file.frame_frequency as f32;
let mut last_secs: u32 = u32::MAX;
loop {
if track {
let cur_secs = ym_file.cursor() as f32 / ym_file.frame_frequency as f32;
print_current(&mut last_secs, cur_secs, total_secs);
}
/* produce YM chipset changes */
let finished = ym_file.produce_next_ay_frame(|ts, reg, val| {
changes.push(
AyRegChange::new(
(ts * HOST_CLOCK_RATIO as f32).trunc() as i32,
AyRegister::from(reg),
val))
});
/* render audio into BLEP */
ay.render_audio::<A,_,_>(changes.drain(..),
&mut bandlim,
host_frame_cycles,
host_frame_cycles,
channel_map.0);
/* close frame */
let frame_sample_count = bandlim.end_frame(host_frame_cycles);
/* render BLEP frame into the sample buffer */
audio.producer.render_frame(|ref mut buf| {
/* ensure the BLEP frame fits into the sample buffer */
buf.resize(frame_sample_count * channels, S::silence());
render_audio(&mut bandlim, buf);
});
/* send a rendered sample buffer to the consumer */
audio.producer.send_frame().unwrap();
if finished {
log::info!("Finished.");
if repeat!= 0 {
counter -= 1;
if counter == 0 {
break;
}
}
}
}
/* let the audio thread finish playing */
for _ in 0..50 {
audio.producer.render_frame(|ref mut buf| {
buf.fill(S::silence());
});
audio.producer.send_frame().unwrap();
}
audio.close();
}
fn play_with_amps<A, SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where A: AmpLevels<SD>,
SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm +'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample
{
let Args { volume, repeat, channels: channel_map, mode, track, hpass, lpass,.. } = args;
log::debug!("Repeat: {repeat}, volume: {volume}%");
let ampl_level = amplitude_level(args.volume);
log::trace!("Amplitude filter: {ampl_level}");
let mut env = PlayEnv { ym_file, ampl_level, repeat, channel_map, track };
let channels = audio.channels as usize;
match mode {
ChannelMode::MixedStereo(mono_filter) if channels >= 2 => {
/* a multi-channel to stereo mixer */
let mut blep = BlepStereo::new(mono_filter.into_sample(),
/* a stereo band-limited pulse buffer */
BandLimitedAny::new(2, lpass, hpass));
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
ChannelMode::Channel(channel) if channels >= channel as usize => {
/* a multi-channel band-limited pulse buffer */
let third_chan = (channel - 1) as usize;
let mut blep = BandLimitedAny::new(3, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1, third_chan]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
_ => {
/* a monophonic band-limited pulse buffer */
let mut blep = BandLimitedAny::new(1, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
env.channel_map = MONO_CHANNEL_MAP;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_fill_interleaved(buf, channels, 0);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
}
}
fn play<SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm +'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample,
AyFuseAmps<SD>: AmpLevels<SD>,
AyAmps<SD>: AmpLevels<SD>
{
if args.fuse {
log::debug!("YM amplitide levels: fuse (measured)");
play_with_amps::<AyFuseAmps<_>, _, _>(audio, ym_file, args)
}
else {
log::debug!("YM amplitide levels: default (specs)");
play_with_amps::<AyAmps<_>, _, _>(audio, ym_file, args)
}
}
/****************************************************************************/
/* MAIN */
/****************************************************************************/
#[derive(Default, Debug, Clone, Copy, PartialEq)]
struct StreamConfigHint {
channels: Option<cpal::ChannelCount>,
sample_rate: Option<cpal::SampleRate>,
sample_format: Option<cpal::SampleFormat>
}
impl fmt::Display for StreamConfigHint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self == &StreamConfigHint::default() {
return f.write_str("*");
}
if let Some(format) = self.sample_format {
write!(f, "{:?}", format)?;
}
if self.channels.is_some() && self.sample_rate.is_some() {
f.write_str(",")?;
}
if let Some(channels) = self.channels {
write!(f, "{}", channels)?;
}
if let Some(rate) = self.sample_rate {
write!(f, "@{}", rate.0)?;
}
Ok(())
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// A file path to an YM song.
ym_file: Option<String>,
/// Audio mixer volume: 0 - 100.
#[arg(short, long, default_value_t = 50, value_parser = volume_in_range)]
volume: u8,
/// Play counter, 0 to play forever.
#[arg(short, long, default_value_t = 0)]
repeat: u32,
/// YM channels map: Left Center Right.
#[arg(short, long, default_value_t = ChannelMap::default(), value_parser = parse_channels)]
channels: ChannelMap,
/// Channel mode: s|m|0.s|N.
///
/// "s" - stereo mode with a center channel mixed with an amplitude of 0.8
///
/// "m" - monophonic mode, played on all audio channels
///
/// "0.s" - stereo mode, center channel amplitude: 0.s
///
/// "N" - multi-channel mode, redirect center channel to Nth (3+) audio channel
#[arg(short, long, default_value_t = ChannelMode::default(), value_parser = parse_channel_mode)]
mode: ChannelMode,
/// Switch to alternative YM amplitude levels (measured vs specs).
#[arg(short, long, default_value_t = false)]
fuse: bool,
/// Enable low-pass audio band filter.
#[arg(long, default_value_t = false)]
lpass: bool,
/// Enable high-pass audio band filter.
#[arg(long, default_value_t = false)]
hpass: bool,
/// Desired audio output parameters: ST,CHANS@RATE.
///
/// ST is a sample type, e.g.: U8, I16, U32, F32.
///
/// CHANS is the number of channels and RATE is the sample rate.
#[arg(short, long, default_value_t = StreamConfigHint::default(), value_parser = parse_stream_config)]
audio: StreamConfigHint,
/// Track the current song time.
#[arg(short, long, default_value_t = false)]
track: bool,
/// Log verbosity level.
///
/// -d for INFO, -dd for DEBUG, -ddd for TRACE
#[arg(short, long, action = clap::ArgAction::Count)]
debug: u8
}
fn volume_in_range(s: &str) -> Result<u8, String> {
let volume: usize = s
.parse()
.map_err(|_| format!("`{s}` isn't a volume"))?;
if (0..=NORMAL_AMPLITUDE as usize).contains(&volume) {
Ok(volume as u8)
} else {
Err(format!("volume not in range 0 - {NORMAL_AMPLITUDE}"))
}
}
fn parse_channel_mode(s: &str) -> Result<ChannelMode, String> {
Ok(match s {
"s"|"S" => ChannelMode::MixedStereo(0.8),
"m"|"M" => ChannelMode::Mono,
s if s.starts_with("0.") => {
let amp: f32 = s.parse().map_err(|_| format!("`{s}` isn't a stereo mixer amplitude"))?;
ChannelMode::MixedStereo(amp)
}
s => {
let channel: u32 = s.parse().map_err(|_| format!("`{s}` isn't a mixer mode channel"))?;
if channel < 3 {
return Err("mixer mode channel must be >= 3".into());
} | })
}
fn parse_channels(s: &str) -> Result<ChannelMap, String> {
const ERROR_MSG: &str = "channel mapping should be a permutation of ABC characters";
if s.len()!= 3 {
return Err(ERROR_MSG.into());
}
let mut channels = [usize::MAX; 3];
// [A, B, C], where N -> 0: left, 1: right, 2: center
for (ch, chan) in s.chars().zip([0, 2, 1].into_iter()) {
let pos = match ch.to_ascii_uppercase() {
'A' => 0,
'B' => 1,
'C' => 2,
_ => return Err(ERROR_MSG.into())
};
if channels[pos]!= usize::MAX {
return Err(ERROR_MSG.into());
}
channels[pos] = chan;
}
Ok(ChannelMap(channels))
}
fn parse_stream_config(mut s: &str) -> Result<StreamConfigHint, String> {
let mut config = StreamConfigHint::default();
if s == "*" {
return Ok(config);
}
const FORMATS: &[([&str;2], cpal::SampleFormat)] = &[
(["i8", "I8"], cpal::SampleFormat::I8),
(["u8", "U8"], cpal::SampleFormat::U8),
(["i16", "I16"], cpal::SampleFormat::I16),
(["u16", "U16"], cpal::SampleFormat::U16),
(["i32", "I32"], cpal::SampleFormat::I32),
(["u32", "U32"], cpal::SampleFormat::U32),
(["f32", "F32"], cpal::SampleFormat::F32),
(["i64", "I64"], cpal::SampleFormat::I64),
(["u64", "U64"], cpal::SampleFormat::U64),
(["f64", "F64"], cpal::SampleFormat::F64)];
for ([lc, uc], format) in FORMATS.into_iter() {
if s.starts_with(lc) || s.starts_with(uc) {
config.sample_format = Some(*format);
(_, s) = s.split_at(lc.len());
break;
}
}
if s.starts_with(",") {
(_, s) = s.split_at(1);
}
let chan = match s.split_once("@") {
Some((chan, rate)) => {
if!rate.is_empty() {
config.sample_rate = Some(cpal::SampleRate(u32::from_str_radix(rate, 10)
.map_err(|_| "expected sample rate")?));
}
chan
},
None => s
};
if!chan.is_empty() {
config.channels = Some(u16::from_str_radix(chan, 10)
.map_err(|_| "expected number of channels")?);
}
Ok(config)
}
fn find_best_audio_config(device: &cpal::Device, request: StreamConfigHint) -> Result<cpal::SupportedStreamConfig, Box<dyn std::error::Error>>
{
log::trace!("Audio device: {}", device.name().unwrap_or_else(|e| e.to_string()));
let default_config = device.default_output_config()?;
if request == StreamConfigHint::default() {
return Ok(default_config);
}
let channels = request.channels.unwrap_or(default_config.channels());
for config in device.supported_output_configs()? {
if config.channels()!= channels {
continue;
}
if let Some(sample_format) = request.sample_format {
if config.sample_format()!= sample_format {
continue;
}
}
else if config.sample_format()!= default_config.sample_format() {
continue;
}
let sample_rate = match request.sample_rate {
Some(sample_rate) => if!(config.min_sample_rate()..=config.max_sample_rate()).contains(&sample_rate) {
continue;
}
else {
sample_rate
}
None => default_config.sample_rate()
};
return Ok(config.with_sample_rate(sample_rate));
}
Err("Could not find the audio configuration matching given parameters")?
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
simple_logger::init_with_level(match args.debug {
0 => log::Level::Warn,
1 => log::Level::Info,
2 => log::Level::Debug,
_ => log::Level::Trace
})?;
let ym_file = match args.ym_file {
Some(ref ym_path) => {
log::info!("Loading YM file: {}", ym_path);
ym_file_parser::parse_file(ym_path)?
}
None => YmSong::parse(BUZZ_YM)?
};
log::info!(r#"{} "{}" by {}"#,
ym_file.version,
ym_file.title.trim(),
ym_file.author.trim());
log::info!(r#"Duration: {:?} {}"#,
ym_file.song_duration(),
ym_file.comments.trim());
log::debug!("Chip: {} Hz, frame: {} Hz, {} cycles each",
ym_file.clock_frequency(),
ym_file.frame_frequency,
ym_file.frame_cycles());
log::debug!("Frames total: {}, loop to: {}, {:?}",
ym_file.frames.len(),
ym_file.loop_frame,
ym_file.song_attrs);
if log::log_enabled!(log::Level::Debug) &&!ym_file.dd_samples.is_empty() {
let mut sample_lens = Vec::with_capacity(ym_file.dd_samples_ends.len());
ym_file.dd_samples_ends.iter().try_fold(0,
|prev | ChannelMode::Channel(channel)
} | random_line_split |
mod.rs | mod light;
pub use light::*;
use crate::StandardMaterial;
use bevy_asset::{Assets, Handle};
use bevy_ecs::{prelude::*, system::SystemState};
use bevy_math::Mat4;
use bevy_render2::{
core_pipeline::Transparent3dPhase,
mesh::Mesh,
pipeline::*,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_phase::{Draw, DrawFunctions, Drawable, RenderPhase, TrackedRenderPass},
render_resource::{BindGroupBuilder, BindGroupId, BufferId, DynamicUniformVec},
renderer::{RenderContext, RenderResources},
shader::{Shader, ShaderStage, ShaderStages},
texture::{TextureFormat, TextureSampleType},
view::{ViewMeta, ViewUniform},
};
use bevy_transform::components::GlobalTransform;
pub struct PbrShaders {
pipeline: PipelineId,
pipeline_descriptor: RenderPipelineDescriptor,
}
// TODO: this pattern for initializing the shaders / pipeline isn't ideal. this should be handled by the asset system
impl FromWorld for PbrShaders {
fn from_world(world: &mut World) -> Self {
let render_resources = world.get_resource::<RenderResources>().unwrap();
let vertex_shader = Shader::from_glsl(ShaderStage::Vertex, include_str!("pbr.vert"))
.get_spirv_shader(None)
.unwrap();
let fragment_shader = Shader::from_glsl(ShaderStage::Fragment, include_str!("pbr.frag"))
.get_spirv_shader(None)
.unwrap();
let vertex_layout = vertex_shader.reflect_layout(&Default::default()).unwrap();
let fragment_layout = fragment_shader.reflect_layout(&Default::default()).unwrap();
let mut pipeline_layout =
PipelineLayout::from_shader_layouts(&mut [vertex_layout, fragment_layout]);
let vertex = render_resources.create_shader_module(&vertex_shader);
let fragment = render_resources.create_shader_module(&fragment_shader);
pipeline_layout.vertex_buffer_descriptors = vec![VertexBufferLayout {
stride: 32,
name: "Vertex".into(),
step_mode: InputStepMode::Vertex,
attributes: vec![
// GOTCHA! Vertex_Position isn't first in the buffer due to how Mesh sorts attributes (alphabetically)
VertexAttribute {
name: "Vertex_Position".into(),
format: VertexFormat::Float32x3,
offset: 12,
shader_location: 0,
},
VertexAttribute {
name: "Vertex_Normals".into(),
format: VertexFormat::Float32x3,
offset: 0,
shader_location: 1,
},
VertexAttribute {
name: "Vertex_Uv".into(),
format: VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
},
],
}];
pipeline_layout.bind_group_mut(0).bindings[0].set_dynamic(true);
pipeline_layout.bind_group_mut(0).bindings[1].set_dynamic(true);
if let BindType::Texture { sample_type,.. } =
&mut pipeline_layout.bind_group_mut(0).bindings[2].bind_type
{
*sample_type = TextureSampleType::Depth;
}
if let BindType::Sampler { comparison,.. } =
&mut pipeline_layout.bind_group_mut(0).bindings[3].bind_type
{
*comparison = true;
}
pipeline_layout.bind_group_mut(1).bindings[0].set_dynamic(true);
pipeline_layout.update_bind_group_ids();
let pipeline_descriptor = RenderPipelineDescriptor {
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: CompareFunction::Less,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
color_target_states: vec![ColorTargetState {
format: TextureFormat::default(),
blend: Some(BlendState {
color: BlendComponent {
src_factor: BlendFactor::SrcAlpha,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha: BlendComponent {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::One,
operation: BlendOperation::Add,
},
}),
write_mask: ColorWrite::ALL,
}],
..RenderPipelineDescriptor::new(
ShaderStages {
vertex,
fragment: Some(fragment),
},
pipeline_layout,
)
};
let pipeline = render_resources.create_render_pipeline(&pipeline_descriptor);
PbrShaders {
pipeline,
pipeline_descriptor,
}
}
}
struct ExtractedMesh {
transform: Mat4,
vertex_buffer: BufferId,
index_info: Option<IndexInfo>,
transform_binding_offset: u32,
}
struct IndexInfo {
buffer: BufferId,
count: u32,
}
pub struct ExtractedMeshes {
meshes: Vec<ExtractedMesh>,
}
pub fn extract_meshes(
mut commands: Commands,
meshes: Res<Assets<Mesh>>,
_materials: Res<Assets<StandardMaterial>>,
query: Query<(&GlobalTransform, &Handle<Mesh>, &Handle<StandardMaterial>)>,
) {
let mut extracted_meshes = Vec::new();
for (transform, mesh_handle, _material_handle) in query.iter() {
if let Some(mesh) = meshes.get(mesh_handle) |
}
commands.insert_resource(ExtractedMeshes {
meshes: extracted_meshes,
});
}
#[derive(Default)]
pub struct MeshMeta {
transform_uniforms: DynamicUniformVec<Mat4>,
}
pub fn prepare_meshes(
render_resources: Res<RenderResources>,
mut mesh_meta: ResMut<MeshMeta>,
mut extracted_meshes: ResMut<ExtractedMeshes>,
) {
mesh_meta
.transform_uniforms
.reserve_and_clear(extracted_meshes.meshes.len(), &render_resources);
for extracted_mesh in extracted_meshes.meshes.iter_mut() {
extracted_mesh.transform_binding_offset =
mesh_meta.transform_uniforms.push(extracted_mesh.transform);
}
mesh_meta
.transform_uniforms
.write_to_staging_buffer(&render_resources);
}
// TODO: This is temporary. Once we expose BindGroupLayouts directly, we can create view bind groups without specific shader context
struct MeshViewBindGroups {
view_bind_group: BindGroupId,
mesh_transform_bind_group: BindGroupId,
}
pub fn queue_meshes(
mut commands: Commands,
draw_functions: Res<DrawFunctions>,
render_resources: Res<RenderResources>,
pbr_shaders: Res<PbrShaders>,
shadow_shaders: Res<ShadowShaders>,
mesh_meta: Res<MeshMeta>,
light_meta: Res<LightMeta>,
view_meta: Res<ViewMeta>,
extracted_meshes: Res<ExtractedMeshes>,
mut views: Query<(Entity, &ViewLights, &mut RenderPhase<Transparent3dPhase>)>,
mut view_light_shadow_phases: Query<&mut RenderPhase<ShadowPhase>>,
) {
if extracted_meshes.meshes.is_empty() {
return;
}
for (entity, view_lights, mut transparent_phase) in views.iter_mut() {
let layout = &pbr_shaders.pipeline_descriptor.layout;
let view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.add_binding(1, light_meta.view_gpu_lights.binding())
.add_binding(2, view_lights.light_depth_texture_view)
.add_binding(3, shadow_shaders.light_sampler)
.finish();
// TODO: this will only create the bind group if it isn't already created. this is a bit nasty
render_resources.create_bind_group(layout.bind_group(0).id, &view_bind_group);
let mesh_transform_bind_group = BindGroupBuilder::default()
.add_binding(0, mesh_meta.transform_uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(1).id, &mesh_transform_bind_group);
commands.entity(entity).insert(MeshViewBindGroups {
view_bind_group: view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
let draw_pbr = draw_functions.read().get_id::<DrawPbr>().unwrap();
for i in 0..extracted_meshes.meshes.len() {
// TODO: currently there is only "transparent phase". this should pick transparent vs opaque according to the mesh material
transparent_phase.add(Drawable {
draw_function: draw_pbr,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
});
}
// ultimately lights should check meshes for relevancy (ex: light views can "see" different meshes than the main view can)
let draw_shadow_mesh = draw_functions.read().get_id::<DrawShadowMesh>().unwrap();
for view_light_entity in view_lights.lights.iter().copied() {
let mut shadow_phase = view_light_shadow_phases.get_mut(view_light_entity).unwrap();
let layout = &shadow_shaders.pipeline_descriptor.layout;
let shadow_view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(0).id, &shadow_view_bind_group);
// TODO: this should only queue up meshes that are actually visible by each "light view"
for i in 0..extracted_meshes.meshes.len() {
shadow_phase.add(Drawable {
draw_function: draw_shadow_mesh,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
})
}
commands
.entity(view_light_entity)
.insert(MeshViewBindGroups {
view_bind_group: shadow_view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
}
}
}
// TODO: this logic can be moved to prepare_meshes once wgpu::Queue is exposed directly
pub struct PbrNode;
impl Node for PbrNode {
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut dyn RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let mesh_meta = world.get_resource::<MeshMeta>().unwrap();
let light_meta = world.get_resource::<LightMeta>().unwrap();
mesh_meta
.transform_uniforms
.write_to_uniform_buffer(render_context);
light_meta
.view_gpu_lights
.write_to_uniform_buffer(render_context);
Ok(())
}
}
type DrawPbrParams<'a> = (
Res<'a, PbrShaders>,
Res<'a, ExtractedMeshes>,
Query<'a, (&'a ViewUniform, &'a MeshViewBindGroups, &'a ViewLights)>,
);
pub struct DrawPbr {
params: SystemState<DrawPbrParams<'static>>,
}
impl DrawPbr {
pub fn new(world: &mut World) -> Self {
Self {
params: SystemState::new(world),
}
}
}
impl Draw for DrawPbr {
fn draw(
&mut self,
world: &World,
pass: &mut TrackedRenderPass,
view: Entity,
draw_key: usize,
_sort_key: usize,
) {
let (pbr_shaders, extracted_meshes, views) = self.params.get(world);
let (view_uniforms, mesh_view_bind_groups, view_lights) = views.get(view).unwrap();
let layout = &pbr_shaders.pipeline_descriptor.layout;
let extracted_mesh = &extracted_meshes.meshes[draw_key];
pass.set_pipeline(pbr_shaders.pipeline);
pass.set_bind_group(
0,
layout.bind_group(0).id,
mesh_view_bind_groups.view_bind_group,
Some(&[
view_uniforms.view_uniform_offset,
view_lights.gpu_light_binding_index,
]),
);
pass.set_bind_group(
1,
layout.bind_group(1).id,
mesh_view_bind_groups.mesh_transform_bind_group,
Some(&[extracted_mesh.transform_binding_offset]),
);
pass.set_vertex_buffer(0, extracted_mesh.vertex_buffer, 0);
if let Some(index_info) = &extracted_mesh.index_info {
pass.set_index_buffer(index_info.buffer, 0, IndexFormat::Uint32);
pass.draw_indexed(0..index_info.count, 0, 0..1);
} else {
panic!("non-indexed drawing not supported yet")
}
}
}
| {
if let Some(gpu_data) = &mesh.gpu_data() {
extracted_meshes.push(ExtractedMesh {
transform: transform.compute_matrix(),
vertex_buffer: gpu_data.vertex_buffer,
index_info: gpu_data.index_buffer.map(|i| IndexInfo {
buffer: i,
count: mesh.indices().unwrap().len() as u32,
}),
transform_binding_offset: 0,
})
}
} | conditional_block |
mod.rs | mod light;
pub use light::*;
use crate::StandardMaterial;
use bevy_asset::{Assets, Handle};
use bevy_ecs::{prelude::*, system::SystemState};
use bevy_math::Mat4;
use bevy_render2::{
core_pipeline::Transparent3dPhase,
mesh::Mesh,
pipeline::*,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_phase::{Draw, DrawFunctions, Drawable, RenderPhase, TrackedRenderPass},
render_resource::{BindGroupBuilder, BindGroupId, BufferId, DynamicUniformVec},
renderer::{RenderContext, RenderResources},
shader::{Shader, ShaderStage, ShaderStages},
texture::{TextureFormat, TextureSampleType},
view::{ViewMeta, ViewUniform},
};
use bevy_transform::components::GlobalTransform;
pub struct PbrShaders {
pipeline: PipelineId,
pipeline_descriptor: RenderPipelineDescriptor,
}
// TODO: this pattern for initializing the shaders / pipeline isn't ideal. this should be handled by the asset system
impl FromWorld for PbrShaders {
fn from_world(world: &mut World) -> Self {
let render_resources = world.get_resource::<RenderResources>().unwrap();
let vertex_shader = Shader::from_glsl(ShaderStage::Vertex, include_str!("pbr.vert"))
.get_spirv_shader(None)
.unwrap();
let fragment_shader = Shader::from_glsl(ShaderStage::Fragment, include_str!("pbr.frag"))
.get_spirv_shader(None)
.unwrap();
let vertex_layout = vertex_shader.reflect_layout(&Default::default()).unwrap();
let fragment_layout = fragment_shader.reflect_layout(&Default::default()).unwrap();
let mut pipeline_layout =
PipelineLayout::from_shader_layouts(&mut [vertex_layout, fragment_layout]);
let vertex = render_resources.create_shader_module(&vertex_shader);
let fragment = render_resources.create_shader_module(&fragment_shader);
pipeline_layout.vertex_buffer_descriptors = vec![VertexBufferLayout {
stride: 32,
name: "Vertex".into(),
step_mode: InputStepMode::Vertex,
attributes: vec![
// GOTCHA! Vertex_Position isn't first in the buffer due to how Mesh sorts attributes (alphabetically)
VertexAttribute {
name: "Vertex_Position".into(),
format: VertexFormat::Float32x3,
offset: 12,
shader_location: 0,
},
VertexAttribute {
name: "Vertex_Normals".into(),
format: VertexFormat::Float32x3,
offset: 0,
shader_location: 1,
},
VertexAttribute {
name: "Vertex_Uv".into(),
format: VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
},
],
}];
pipeline_layout.bind_group_mut(0).bindings[0].set_dynamic(true);
pipeline_layout.bind_group_mut(0).bindings[1].set_dynamic(true);
if let BindType::Texture { sample_type,.. } =
&mut pipeline_layout.bind_group_mut(0).bindings[2].bind_type
{
*sample_type = TextureSampleType::Depth;
}
if let BindType::Sampler { comparison,.. } =
&mut pipeline_layout.bind_group_mut(0).bindings[3].bind_type
{
*comparison = true;
}
pipeline_layout.bind_group_mut(1).bindings[0].set_dynamic(true);
pipeline_layout.update_bind_group_ids();
let pipeline_descriptor = RenderPipelineDescriptor {
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: CompareFunction::Less,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
color_target_states: vec![ColorTargetState {
format: TextureFormat::default(),
blend: Some(BlendState {
color: BlendComponent {
src_factor: BlendFactor::SrcAlpha,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha: BlendComponent {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::One,
operation: BlendOperation::Add,
},
}),
write_mask: ColorWrite::ALL,
}],
..RenderPipelineDescriptor::new(
ShaderStages {
vertex,
fragment: Some(fragment),
},
pipeline_layout,
)
};
let pipeline = render_resources.create_render_pipeline(&pipeline_descriptor);
PbrShaders {
pipeline,
pipeline_descriptor,
}
}
}
struct ExtractedMesh {
transform: Mat4,
vertex_buffer: BufferId,
index_info: Option<IndexInfo>,
transform_binding_offset: u32,
}
struct IndexInfo {
buffer: BufferId,
count: u32,
}
pub struct ExtractedMeshes {
meshes: Vec<ExtractedMesh>,
}
pub fn extract_meshes(
mut commands: Commands,
meshes: Res<Assets<Mesh>>,
_materials: Res<Assets<StandardMaterial>>,
query: Query<(&GlobalTransform, &Handle<Mesh>, &Handle<StandardMaterial>)>,
) {
let mut extracted_meshes = Vec::new();
for (transform, mesh_handle, _material_handle) in query.iter() {
if let Some(mesh) = meshes.get(mesh_handle) {
if let Some(gpu_data) = &mesh.gpu_data() {
extracted_meshes.push(ExtractedMesh {
transform: transform.compute_matrix(),
vertex_buffer: gpu_data.vertex_buffer,
index_info: gpu_data.index_buffer.map(|i| IndexInfo {
buffer: i,
count: mesh.indices().unwrap().len() as u32,
}),
transform_binding_offset: 0,
})
}
}
}
commands.insert_resource(ExtractedMeshes {
meshes: extracted_meshes,
});
}
#[derive(Default)]
pub struct MeshMeta {
transform_uniforms: DynamicUniformVec<Mat4>,
}
pub fn prepare_meshes(
render_resources: Res<RenderResources>,
mut mesh_meta: ResMut<MeshMeta>,
mut extracted_meshes: ResMut<ExtractedMeshes>,
) {
mesh_meta
.transform_uniforms
.reserve_and_clear(extracted_meshes.meshes.len(), &render_resources);
for extracted_mesh in extracted_meshes.meshes.iter_mut() {
extracted_mesh.transform_binding_offset =
mesh_meta.transform_uniforms.push(extracted_mesh.transform);
}
mesh_meta
.transform_uniforms
.write_to_staging_buffer(&render_resources);
}
// TODO: This is temporary. Once we expose BindGroupLayouts directly, we can create view bind groups without specific shader context
struct MeshViewBindGroups {
view_bind_group: BindGroupId,
mesh_transform_bind_group: BindGroupId,
}
pub fn queue_meshes(
mut commands: Commands,
draw_functions: Res<DrawFunctions>,
render_resources: Res<RenderResources>,
pbr_shaders: Res<PbrShaders>,
shadow_shaders: Res<ShadowShaders>,
mesh_meta: Res<MeshMeta>,
light_meta: Res<LightMeta>,
view_meta: Res<ViewMeta>,
extracted_meshes: Res<ExtractedMeshes>,
mut views: Query<(Entity, &ViewLights, &mut RenderPhase<Transparent3dPhase>)>,
mut view_light_shadow_phases: Query<&mut RenderPhase<ShadowPhase>>,
) {
if extracted_meshes.meshes.is_empty() {
return;
}
for (entity, view_lights, mut transparent_phase) in views.iter_mut() {
let layout = &pbr_shaders.pipeline_descriptor.layout;
let view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.add_binding(1, light_meta.view_gpu_lights.binding())
.add_binding(2, view_lights.light_depth_texture_view)
.add_binding(3, shadow_shaders.light_sampler)
.finish();
// TODO: this will only create the bind group if it isn't already created. this is a bit nasty
render_resources.create_bind_group(layout.bind_group(0).id, &view_bind_group);
let mesh_transform_bind_group = BindGroupBuilder::default()
.add_binding(0, mesh_meta.transform_uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(1).id, &mesh_transform_bind_group);
commands.entity(entity).insert(MeshViewBindGroups {
view_bind_group: view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
let draw_pbr = draw_functions.read().get_id::<DrawPbr>().unwrap();
for i in 0..extracted_meshes.meshes.len() {
// TODO: currently there is only "transparent phase". this should pick transparent vs opaque according to the mesh material
transparent_phase.add(Drawable {
draw_function: draw_pbr,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
});
}
// ultimately lights should check meshes for relevancy (ex: light views can "see" different meshes than the main view can)
let draw_shadow_mesh = draw_functions.read().get_id::<DrawShadowMesh>().unwrap();
for view_light_entity in view_lights.lights.iter().copied() {
let mut shadow_phase = view_light_shadow_phases.get_mut(view_light_entity).unwrap();
let layout = &shadow_shaders.pipeline_descriptor.layout;
let shadow_view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(0).id, &shadow_view_bind_group);
// TODO: this should only queue up meshes that are actually visible by each "light view"
for i in 0..extracted_meshes.meshes.len() {
shadow_phase.add(Drawable {
draw_function: draw_shadow_mesh,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
})
}
commands
.entity(view_light_entity)
.insert(MeshViewBindGroups {
view_bind_group: shadow_view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
}
}
}
// TODO: this logic can be moved to prepare_meshes once wgpu::Queue is exposed directly
pub struct PbrNode;
impl Node for PbrNode {
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut dyn RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let mesh_meta = world.get_resource::<MeshMeta>().unwrap();
let light_meta = world.get_resource::<LightMeta>().unwrap();
mesh_meta
.transform_uniforms
.write_to_uniform_buffer(render_context);
light_meta
.view_gpu_lights
.write_to_uniform_buffer(render_context);
Ok(())
}
}
type DrawPbrParams<'a> = (
Res<'a, PbrShaders>,
Res<'a, ExtractedMeshes>,
Query<'a, (&'a ViewUniform, &'a MeshViewBindGroups, &'a ViewLights)>,
);
pub struct DrawPbr {
params: SystemState<DrawPbrParams<'static>>,
}
impl DrawPbr {
pub fn new(world: &mut World) -> Self |
}
impl Draw for DrawPbr {
fn draw(
&mut self,
world: &World,
pass: &mut TrackedRenderPass,
view: Entity,
draw_key: usize,
_sort_key: usize,
) {
let (pbr_shaders, extracted_meshes, views) = self.params.get(world);
let (view_uniforms, mesh_view_bind_groups, view_lights) = views.get(view).unwrap();
let layout = &pbr_shaders.pipeline_descriptor.layout;
let extracted_mesh = &extracted_meshes.meshes[draw_key];
pass.set_pipeline(pbr_shaders.pipeline);
pass.set_bind_group(
0,
layout.bind_group(0).id,
mesh_view_bind_groups.view_bind_group,
Some(&[
view_uniforms.view_uniform_offset,
view_lights.gpu_light_binding_index,
]),
);
pass.set_bind_group(
1,
layout.bind_group(1).id,
mesh_view_bind_groups.mesh_transform_bind_group,
Some(&[extracted_mesh.transform_binding_offset]),
);
pass.set_vertex_buffer(0, extracted_mesh.vertex_buffer, 0);
if let Some(index_info) = &extracted_mesh.index_info {
pass.set_index_buffer(index_info.buffer, 0, IndexFormat::Uint32);
pass.draw_indexed(0..index_info.count, 0, 0..1);
} else {
panic!("non-indexed drawing not supported yet")
}
}
}
| {
Self {
params: SystemState::new(world),
}
} | identifier_body |
mod.rs | mod light;
pub use light::*;
use crate::StandardMaterial;
use bevy_asset::{Assets, Handle};
use bevy_ecs::{prelude::*, system::SystemState};
use bevy_math::Mat4;
use bevy_render2::{
core_pipeline::Transparent3dPhase,
mesh::Mesh,
pipeline::*,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_phase::{Draw, DrawFunctions, Drawable, RenderPhase, TrackedRenderPass},
render_resource::{BindGroupBuilder, BindGroupId, BufferId, DynamicUniformVec},
renderer::{RenderContext, RenderResources},
shader::{Shader, ShaderStage, ShaderStages},
texture::{TextureFormat, TextureSampleType},
view::{ViewMeta, ViewUniform},
};
use bevy_transform::components::GlobalTransform;
pub struct PbrShaders {
pipeline: PipelineId,
pipeline_descriptor: RenderPipelineDescriptor,
}
// TODO: this pattern for initializing the shaders / pipeline isn't ideal. this should be handled by the asset system
impl FromWorld for PbrShaders {
fn from_world(world: &mut World) -> Self {
let render_resources = world.get_resource::<RenderResources>().unwrap();
let vertex_shader = Shader::from_glsl(ShaderStage::Vertex, include_str!("pbr.vert"))
.get_spirv_shader(None)
.unwrap();
let fragment_shader = Shader::from_glsl(ShaderStage::Fragment, include_str!("pbr.frag"))
.get_spirv_shader(None)
.unwrap();
let vertex_layout = vertex_shader.reflect_layout(&Default::default()).unwrap();
let fragment_layout = fragment_shader.reflect_layout(&Default::default()).unwrap();
let mut pipeline_layout =
PipelineLayout::from_shader_layouts(&mut [vertex_layout, fragment_layout]);
let vertex = render_resources.create_shader_module(&vertex_shader);
let fragment = render_resources.create_shader_module(&fragment_shader);
pipeline_layout.vertex_buffer_descriptors = vec![VertexBufferLayout {
stride: 32,
name: "Vertex".into(),
step_mode: InputStepMode::Vertex,
attributes: vec![
// GOTCHA! Vertex_Position isn't first in the buffer due to how Mesh sorts attributes (alphabetically)
VertexAttribute {
name: "Vertex_Position".into(),
format: VertexFormat::Float32x3,
offset: 12,
shader_location: 0,
},
VertexAttribute {
name: "Vertex_Normals".into(),
format: VertexFormat::Float32x3,
offset: 0,
shader_location: 1,
},
VertexAttribute {
name: "Vertex_Uv".into(),
format: VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
},
],
}];
pipeline_layout.bind_group_mut(0).bindings[0].set_dynamic(true);
pipeline_layout.bind_group_mut(0).bindings[1].set_dynamic(true);
if let BindType::Texture { sample_type,.. } =
&mut pipeline_layout.bind_group_mut(0).bindings[2].bind_type
{
*sample_type = TextureSampleType::Depth;
}
if let BindType::Sampler { comparison,.. } =
&mut pipeline_layout.bind_group_mut(0).bindings[3].bind_type
{
*comparison = true;
}
pipeline_layout.bind_group_mut(1).bindings[0].set_dynamic(true);
pipeline_layout.update_bind_group_ids();
let pipeline_descriptor = RenderPipelineDescriptor {
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: CompareFunction::Less,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
color_target_states: vec![ColorTargetState {
format: TextureFormat::default(),
blend: Some(BlendState {
color: BlendComponent {
src_factor: BlendFactor::SrcAlpha,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha: BlendComponent {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::One,
operation: BlendOperation::Add,
},
}),
write_mask: ColorWrite::ALL,
}],
..RenderPipelineDescriptor::new(
ShaderStages {
vertex,
fragment: Some(fragment),
},
pipeline_layout,
)
};
let pipeline = render_resources.create_render_pipeline(&pipeline_descriptor);
PbrShaders {
pipeline,
pipeline_descriptor,
}
}
}
struct ExtractedMesh {
transform: Mat4,
vertex_buffer: BufferId,
index_info: Option<IndexInfo>,
transform_binding_offset: u32,
}
struct IndexInfo {
buffer: BufferId,
count: u32,
}
pub struct ExtractedMeshes {
meshes: Vec<ExtractedMesh>,
}
pub fn extract_meshes(
mut commands: Commands,
meshes: Res<Assets<Mesh>>,
_materials: Res<Assets<StandardMaterial>>,
query: Query<(&GlobalTransform, &Handle<Mesh>, &Handle<StandardMaterial>)>,
) {
let mut extracted_meshes = Vec::new();
for (transform, mesh_handle, _material_handle) in query.iter() {
if let Some(mesh) = meshes.get(mesh_handle) {
if let Some(gpu_data) = &mesh.gpu_data() {
extracted_meshes.push(ExtractedMesh {
transform: transform.compute_matrix(),
vertex_buffer: gpu_data.vertex_buffer,
index_info: gpu_data.index_buffer.map(|i| IndexInfo {
buffer: i,
count: mesh.indices().unwrap().len() as u32,
}),
transform_binding_offset: 0,
})
}
}
}
commands.insert_resource(ExtractedMeshes {
meshes: extracted_meshes,
});
}
#[derive(Default)]
pub struct MeshMeta {
transform_uniforms: DynamicUniformVec<Mat4>,
}
pub fn prepare_meshes(
render_resources: Res<RenderResources>,
mut mesh_meta: ResMut<MeshMeta>,
mut extracted_meshes: ResMut<ExtractedMeshes>,
) {
mesh_meta
.transform_uniforms
.reserve_and_clear(extracted_meshes.meshes.len(), &render_resources);
for extracted_mesh in extracted_meshes.meshes.iter_mut() {
extracted_mesh.transform_binding_offset =
mesh_meta.transform_uniforms.push(extracted_mesh.transform);
}
mesh_meta
.transform_uniforms
.write_to_staging_buffer(&render_resources);
}
// TODO: This is temporary. Once we expose BindGroupLayouts directly, we can create view bind groups without specific shader context
struct MeshViewBindGroups {
view_bind_group: BindGroupId,
mesh_transform_bind_group: BindGroupId,
}
pub fn queue_meshes(
mut commands: Commands,
draw_functions: Res<DrawFunctions>,
render_resources: Res<RenderResources>,
pbr_shaders: Res<PbrShaders>,
shadow_shaders: Res<ShadowShaders>,
mesh_meta: Res<MeshMeta>,
light_meta: Res<LightMeta>,
view_meta: Res<ViewMeta>,
extracted_meshes: Res<ExtractedMeshes>,
mut views: Query<(Entity, &ViewLights, &mut RenderPhase<Transparent3dPhase>)>,
mut view_light_shadow_phases: Query<&mut RenderPhase<ShadowPhase>>,
) {
if extracted_meshes.meshes.is_empty() {
return;
}
for (entity, view_lights, mut transparent_phase) in views.iter_mut() {
let layout = &pbr_shaders.pipeline_descriptor.layout;
let view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.add_binding(1, light_meta.view_gpu_lights.binding())
.add_binding(2, view_lights.light_depth_texture_view)
.add_binding(3, shadow_shaders.light_sampler)
.finish();
// TODO: this will only create the bind group if it isn't already created. this is a bit nasty
render_resources.create_bind_group(layout.bind_group(0).id, &view_bind_group);
let mesh_transform_bind_group = BindGroupBuilder::default()
.add_binding(0, mesh_meta.transform_uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(1).id, &mesh_transform_bind_group);
commands.entity(entity).insert(MeshViewBindGroups {
view_bind_group: view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
let draw_pbr = draw_functions.read().get_id::<DrawPbr>().unwrap();
for i in 0..extracted_meshes.meshes.len() {
// TODO: currently there is only "transparent phase". this should pick transparent vs opaque according to the mesh material
transparent_phase.add(Drawable {
draw_function: draw_pbr,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
});
}
// ultimately lights should check meshes for relevancy (ex: light views can "see" different meshes than the main view can)
let draw_shadow_mesh = draw_functions.read().get_id::<DrawShadowMesh>().unwrap();
for view_light_entity in view_lights.lights.iter().copied() {
let mut shadow_phase = view_light_shadow_phases.get_mut(view_light_entity).unwrap();
let layout = &shadow_shaders.pipeline_descriptor.layout;
let shadow_view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(0).id, &shadow_view_bind_group);
// TODO: this should only queue up meshes that are actually visible by each "light view"
for i in 0..extracted_meshes.meshes.len() {
shadow_phase.add(Drawable {
draw_function: draw_shadow_mesh,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
})
}
commands
.entity(view_light_entity)
.insert(MeshViewBindGroups {
view_bind_group: shadow_view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
}
}
}
// TODO: this logic can be moved to prepare_meshes once wgpu::Queue is exposed directly
pub struct PbrNode;
impl Node for PbrNode { | _graph: &mut RenderGraphContext,
render_context: &mut dyn RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let mesh_meta = world.get_resource::<MeshMeta>().unwrap();
let light_meta = world.get_resource::<LightMeta>().unwrap();
mesh_meta
.transform_uniforms
.write_to_uniform_buffer(render_context);
light_meta
.view_gpu_lights
.write_to_uniform_buffer(render_context);
Ok(())
}
}
type DrawPbrParams<'a> = (
Res<'a, PbrShaders>,
Res<'a, ExtractedMeshes>,
Query<'a, (&'a ViewUniform, &'a MeshViewBindGroups, &'a ViewLights)>,
);
pub struct DrawPbr {
params: SystemState<DrawPbrParams<'static>>,
}
impl DrawPbr {
pub fn new(world: &mut World) -> Self {
Self {
params: SystemState::new(world),
}
}
}
impl Draw for DrawPbr {
fn draw(
&mut self,
world: &World,
pass: &mut TrackedRenderPass,
view: Entity,
draw_key: usize,
_sort_key: usize,
) {
let (pbr_shaders, extracted_meshes, views) = self.params.get(world);
let (view_uniforms, mesh_view_bind_groups, view_lights) = views.get(view).unwrap();
let layout = &pbr_shaders.pipeline_descriptor.layout;
let extracted_mesh = &extracted_meshes.meshes[draw_key];
pass.set_pipeline(pbr_shaders.pipeline);
pass.set_bind_group(
0,
layout.bind_group(0).id,
mesh_view_bind_groups.view_bind_group,
Some(&[
view_uniforms.view_uniform_offset,
view_lights.gpu_light_binding_index,
]),
);
pass.set_bind_group(
1,
layout.bind_group(1).id,
mesh_view_bind_groups.mesh_transform_bind_group,
Some(&[extracted_mesh.transform_binding_offset]),
);
pass.set_vertex_buffer(0, extracted_mesh.vertex_buffer, 0);
if let Some(index_info) = &extracted_mesh.index_info {
pass.set_index_buffer(index_info.buffer, 0, IndexFormat::Uint32);
pass.draw_indexed(0..index_info.count, 0, 0..1);
} else {
panic!("non-indexed drawing not supported yet")
}
}
} | fn run(
&self, | random_line_split |
mod.rs | mod light;
pub use light::*;
use crate::StandardMaterial;
use bevy_asset::{Assets, Handle};
use bevy_ecs::{prelude::*, system::SystemState};
use bevy_math::Mat4;
use bevy_render2::{
core_pipeline::Transparent3dPhase,
mesh::Mesh,
pipeline::*,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_phase::{Draw, DrawFunctions, Drawable, RenderPhase, TrackedRenderPass},
render_resource::{BindGroupBuilder, BindGroupId, BufferId, DynamicUniformVec},
renderer::{RenderContext, RenderResources},
shader::{Shader, ShaderStage, ShaderStages},
texture::{TextureFormat, TextureSampleType},
view::{ViewMeta, ViewUniform},
};
use bevy_transform::components::GlobalTransform;
pub struct PbrShaders {
pipeline: PipelineId,
pipeline_descriptor: RenderPipelineDescriptor,
}
// TODO: this pattern for initializing the shaders / pipeline isn't ideal. this should be handled by the asset system
impl FromWorld for PbrShaders {
fn from_world(world: &mut World) -> Self {
let render_resources = world.get_resource::<RenderResources>().unwrap();
let vertex_shader = Shader::from_glsl(ShaderStage::Vertex, include_str!("pbr.vert"))
.get_spirv_shader(None)
.unwrap();
let fragment_shader = Shader::from_glsl(ShaderStage::Fragment, include_str!("pbr.frag"))
.get_spirv_shader(None)
.unwrap();
let vertex_layout = vertex_shader.reflect_layout(&Default::default()).unwrap();
let fragment_layout = fragment_shader.reflect_layout(&Default::default()).unwrap();
let mut pipeline_layout =
PipelineLayout::from_shader_layouts(&mut [vertex_layout, fragment_layout]);
let vertex = render_resources.create_shader_module(&vertex_shader);
let fragment = render_resources.create_shader_module(&fragment_shader);
pipeline_layout.vertex_buffer_descriptors = vec![VertexBufferLayout {
stride: 32,
name: "Vertex".into(),
step_mode: InputStepMode::Vertex,
attributes: vec![
// GOTCHA! Vertex_Position isn't first in the buffer due to how Mesh sorts attributes (alphabetically)
VertexAttribute {
name: "Vertex_Position".into(),
format: VertexFormat::Float32x3,
offset: 12,
shader_location: 0,
},
VertexAttribute {
name: "Vertex_Normals".into(),
format: VertexFormat::Float32x3,
offset: 0,
shader_location: 1,
},
VertexAttribute {
name: "Vertex_Uv".into(),
format: VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
},
],
}];
pipeline_layout.bind_group_mut(0).bindings[0].set_dynamic(true);
pipeline_layout.bind_group_mut(0).bindings[1].set_dynamic(true);
if let BindType::Texture { sample_type,.. } =
&mut pipeline_layout.bind_group_mut(0).bindings[2].bind_type
{
*sample_type = TextureSampleType::Depth;
}
if let BindType::Sampler { comparison,.. } =
&mut pipeline_layout.bind_group_mut(0).bindings[3].bind_type
{
*comparison = true;
}
pipeline_layout.bind_group_mut(1).bindings[0].set_dynamic(true);
pipeline_layout.update_bind_group_ids();
let pipeline_descriptor = RenderPipelineDescriptor {
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: CompareFunction::Less,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
color_target_states: vec![ColorTargetState {
format: TextureFormat::default(),
blend: Some(BlendState {
color: BlendComponent {
src_factor: BlendFactor::SrcAlpha,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha: BlendComponent {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::One,
operation: BlendOperation::Add,
},
}),
write_mask: ColorWrite::ALL,
}],
..RenderPipelineDescriptor::new(
ShaderStages {
vertex,
fragment: Some(fragment),
},
pipeline_layout,
)
};
let pipeline = render_resources.create_render_pipeline(&pipeline_descriptor);
PbrShaders {
pipeline,
pipeline_descriptor,
}
}
}
struct ExtractedMesh {
transform: Mat4,
vertex_buffer: BufferId,
index_info: Option<IndexInfo>,
transform_binding_offset: u32,
}
struct IndexInfo {
buffer: BufferId,
count: u32,
}
pub struct ExtractedMeshes {
meshes: Vec<ExtractedMesh>,
}
pub fn extract_meshes(
mut commands: Commands,
meshes: Res<Assets<Mesh>>,
_materials: Res<Assets<StandardMaterial>>,
query: Query<(&GlobalTransform, &Handle<Mesh>, &Handle<StandardMaterial>)>,
) {
let mut extracted_meshes = Vec::new();
for (transform, mesh_handle, _material_handle) in query.iter() {
if let Some(mesh) = meshes.get(mesh_handle) {
if let Some(gpu_data) = &mesh.gpu_data() {
extracted_meshes.push(ExtractedMesh {
transform: transform.compute_matrix(),
vertex_buffer: gpu_data.vertex_buffer,
index_info: gpu_data.index_buffer.map(|i| IndexInfo {
buffer: i,
count: mesh.indices().unwrap().len() as u32,
}),
transform_binding_offset: 0,
})
}
}
}
commands.insert_resource(ExtractedMeshes {
meshes: extracted_meshes,
});
}
#[derive(Default)]
pub struct MeshMeta {
transform_uniforms: DynamicUniformVec<Mat4>,
}
pub fn prepare_meshes(
render_resources: Res<RenderResources>,
mut mesh_meta: ResMut<MeshMeta>,
mut extracted_meshes: ResMut<ExtractedMeshes>,
) {
mesh_meta
.transform_uniforms
.reserve_and_clear(extracted_meshes.meshes.len(), &render_resources);
for extracted_mesh in extracted_meshes.meshes.iter_mut() {
extracted_mesh.transform_binding_offset =
mesh_meta.transform_uniforms.push(extracted_mesh.transform);
}
mesh_meta
.transform_uniforms
.write_to_staging_buffer(&render_resources);
}
// TODO: This is temporary. Once we expose BindGroupLayouts directly, we can create view bind groups without specific shader context
struct MeshViewBindGroups {
view_bind_group: BindGroupId,
mesh_transform_bind_group: BindGroupId,
}
pub fn queue_meshes(
mut commands: Commands,
draw_functions: Res<DrawFunctions>,
render_resources: Res<RenderResources>,
pbr_shaders: Res<PbrShaders>,
shadow_shaders: Res<ShadowShaders>,
mesh_meta: Res<MeshMeta>,
light_meta: Res<LightMeta>,
view_meta: Res<ViewMeta>,
extracted_meshes: Res<ExtractedMeshes>,
mut views: Query<(Entity, &ViewLights, &mut RenderPhase<Transparent3dPhase>)>,
mut view_light_shadow_phases: Query<&mut RenderPhase<ShadowPhase>>,
) {
if extracted_meshes.meshes.is_empty() {
return;
}
for (entity, view_lights, mut transparent_phase) in views.iter_mut() {
let layout = &pbr_shaders.pipeline_descriptor.layout;
let view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.add_binding(1, light_meta.view_gpu_lights.binding())
.add_binding(2, view_lights.light_depth_texture_view)
.add_binding(3, shadow_shaders.light_sampler)
.finish();
// TODO: this will only create the bind group if it isn't already created. this is a bit nasty
render_resources.create_bind_group(layout.bind_group(0).id, &view_bind_group);
let mesh_transform_bind_group = BindGroupBuilder::default()
.add_binding(0, mesh_meta.transform_uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(1).id, &mesh_transform_bind_group);
commands.entity(entity).insert(MeshViewBindGroups {
view_bind_group: view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
let draw_pbr = draw_functions.read().get_id::<DrawPbr>().unwrap();
for i in 0..extracted_meshes.meshes.len() {
// TODO: currently there is only "transparent phase". this should pick transparent vs opaque according to the mesh material
transparent_phase.add(Drawable {
draw_function: draw_pbr,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
});
}
// ultimately lights should check meshes for relevancy (ex: light views can "see" different meshes than the main view can)
let draw_shadow_mesh = draw_functions.read().get_id::<DrawShadowMesh>().unwrap();
for view_light_entity in view_lights.lights.iter().copied() {
let mut shadow_phase = view_light_shadow_phases.get_mut(view_light_entity).unwrap();
let layout = &shadow_shaders.pipeline_descriptor.layout;
let shadow_view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(0).id, &shadow_view_bind_group);
// TODO: this should only queue up meshes that are actually visible by each "light view"
for i in 0..extracted_meshes.meshes.len() {
shadow_phase.add(Drawable {
draw_function: draw_shadow_mesh,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
})
}
commands
.entity(view_light_entity)
.insert(MeshViewBindGroups {
view_bind_group: shadow_view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
}
}
}
// TODO: this logic can be moved to prepare_meshes once wgpu::Queue is exposed directly
pub struct PbrNode;
impl Node for PbrNode {
fn | (
&self,
_graph: &mut RenderGraphContext,
render_context: &mut dyn RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let mesh_meta = world.get_resource::<MeshMeta>().unwrap();
let light_meta = world.get_resource::<LightMeta>().unwrap();
mesh_meta
.transform_uniforms
.write_to_uniform_buffer(render_context);
light_meta
.view_gpu_lights
.write_to_uniform_buffer(render_context);
Ok(())
}
}
type DrawPbrParams<'a> = (
Res<'a, PbrShaders>,
Res<'a, ExtractedMeshes>,
Query<'a, (&'a ViewUniform, &'a MeshViewBindGroups, &'a ViewLights)>,
);
pub struct DrawPbr {
params: SystemState<DrawPbrParams<'static>>,
}
impl DrawPbr {
pub fn new(world: &mut World) -> Self {
Self {
params: SystemState::new(world),
}
}
}
impl Draw for DrawPbr {
fn draw(
&mut self,
world: &World,
pass: &mut TrackedRenderPass,
view: Entity,
draw_key: usize,
_sort_key: usize,
) {
let (pbr_shaders, extracted_meshes, views) = self.params.get(world);
let (view_uniforms, mesh_view_bind_groups, view_lights) = views.get(view).unwrap();
let layout = &pbr_shaders.pipeline_descriptor.layout;
let extracted_mesh = &extracted_meshes.meshes[draw_key];
pass.set_pipeline(pbr_shaders.pipeline);
pass.set_bind_group(
0,
layout.bind_group(0).id,
mesh_view_bind_groups.view_bind_group,
Some(&[
view_uniforms.view_uniform_offset,
view_lights.gpu_light_binding_index,
]),
);
pass.set_bind_group(
1,
layout.bind_group(1).id,
mesh_view_bind_groups.mesh_transform_bind_group,
Some(&[extracted_mesh.transform_binding_offset]),
);
pass.set_vertex_buffer(0, extracted_mesh.vertex_buffer, 0);
if let Some(index_info) = &extracted_mesh.index_info {
pass.set_index_buffer(index_info.buffer, 0, IndexFormat::Uint32);
pass.draw_indexed(0..index_info.count, 0, 0..1);
} else {
panic!("non-indexed drawing not supported yet")
}
}
}
| run | identifier_name |
mod.rs | //! Operating System backed readiness event queue.
//!
//! [`OsQueue`] provides an abstraction over platform specific Operating System
//! backed readiness event queues, such as kqueue or epoll.
//!
//! [`OsQueue`]: crate::os::OsQueue
//!
//! # Portability
//!
//! Using [`OsQueue`] provides a portable interface across supported platforms
//! as long as the caller takes the following into consideration:
//!
//! ### Draining readiness
//!
//! When using [edge-triggered] mode, once a readiness event is received, the
//! corresponding operation must be performed repeatedly until it returns
//! [`WouldBlock`]. Unless this is done, there is no guarantee that another
//! readiness event will be delivered, even if further data is received for the
//! [`Evented`] handle. See [`RegisterOption`] for more.
//!
//! [`WouldBlock`]: std::io::ErrorKind::WouldBlock
//! [edge-triggered]: crate::os::RegisterOption::EDGE
//! [`Evented`]: crate::os::Evented
//! [`RegisterOption`]: crate::os::RegisterOption
//!
//! ### Spurious events
//!
//! The [`Source::poll`] implementation may return readiness events even if the
//! associated [`Evented`] handle is not actually ready. Given the same code,
//! this may happen more on some platforms than others. It is important to never
//! assume that, just because a readiness notification was received, that the
//! associated operation will as well.
//!
//! If operation fails with a [`WouldBlock`] error, then the caller should not
//! treat this as an error and wait until another readiness event is received.
//!
//! Furthermore a single call to poll may result in multiple readiness events
//! being returned for a single `Evented` handle. For example, if a TCP socket
//! becomes both readable and writable, it may be possible for a single
//! readiness event to be returned with both [readable] and [writable] readiness
//! **OR** two separate events may be returned, one with readable set and one
//! with writable set.
//!
//! [`Source::poll`]: crate::event::Source::poll
//! [readable]: crate::os::Interests::READABLE
//! [writable]: crate::os::Interests::WRITABLE
//!
//! ### Registering handles
//!
//! Unless otherwise noted, it should be assumed that types implementing
//! [`Evented`] will never become ready unless they are registered with
//! `OsQueue`.
//!
//! For example:
//!
//! ```
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! use std::thread;
//! use std::time::Duration;
//!
//! use gaea::event;
//! use gaea::net::TcpStream;
//! use gaea::os::{OsQueue, RegisterOption};
//!
//! let address = "216.58.193.100:80".parse()?;
//! let mut stream = TcpStream::connect(address)?;
//!
//! // This actually does nothing towards connecting the TCP stream.
//! thread::sleep(Duration::from_secs(1));
//!
//! let mut os_queue = OsQueue::new()?;
//!
//! // The connect is not guaranteed to have started until it is registered at
//! // this point.
//! os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
//! # Ok(())
//! # }
//! ```
//!
//! ### Timeout granularity
//!
//! The timeout provided to [`event::Source::blocking_poll`] will be rounded
//! up to the system clock granularity (usually 1ms), and kernel scheduling
//! delays mean that the blocking interval may be overrun by a small amount.
//!
//! ### Interrupts while polling
//!
//! Interrupts (`EINTR` in C and `io::ErrorKind::Interrupted` in Rust) are
//! **not** handled, they are returned as errors. In most cases however these
//! can simply be ignored, but it's up to the user how to deal with the "error".
//!
//! # Implementation notes
//!
//! `OsQueue` is backed by a readiness event queue provided by the operating
//! system. On all platforms a call to [`Source::poll`] is mostly just a direct
//! system call. The following system implementations back `OsQueue`:
//!
//! | OS | Selector |
//! |---------|----------|
//! | FreeBSD | [kqueue](https://www.freebsd.org/cgi/man.cgi?query=kqueue) |
//! | Linux | [epoll](http://man7.org/linux/man-pages/man7/epoll.7.html) |
//! | macOS | [kqueue](https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man2/kqueue.2.html) |
//! | NetBSD | [kqueue](http://netbsd.gw.com/cgi-bin/man-cgi?kqueue) |
//! | OpenBSD | [kqueue](https://man.openbsd.org/kqueue) |
//!
//! On all supported platforms socket operations are handled by using the system
//! queue. Platform specific extensions (e.g. [`EventedFd`]) allow accessing
//! other features provided by individual system selectors.
//!
//! [`Eventedfd`]: crate::sys::unix::EventedFd
//! [`signalfd`]: http://man7.org/linux/man-pages/man2/signalfd.2.html
use std::io;
use std::time::Duration;
use log::trace;
use crate::{event, sys};
mod awakener;
mod evented;
mod interests;
mod option;
pub mod signals;
pub use self::awakener::Awakener;
pub use self::evented::Evented;
pub use self::interests::Interests;
pub use self::option::RegisterOption;
pub use self::signals::{Signal, SignalSet, Signals};
/// Readiness event queue backed by the OS. | /// e.g. read or write.
///
/// To use this queue an [`Evented`] handle must first be registered using the
/// [`register`] method, supplying an associated id, readiness interests and
/// polling option. The [associated id] is used to associate a readiness event
/// with an `Evented` handle. The readiness [interests] defines which specific
/// operations on the handle to monitor for readiness. And the final argument,
/// [`RegisterOption`], defines how to deliver the readiness events, see
/// [`RegisterOption`] for more information.
///
/// See to [module documentation] for information.
///
/// [reading]: crate::event::Ready::READABLE
/// [writing]: crate::event::Ready::WRITABLE
/// [`register`]: OsQueue::register
/// [associated id]: event::Id
/// [interests]: Interests
/// [module documentation]: crate::os
#[derive(Debug)]
pub struct OsQueue {
selector: sys::Selector,
}
impl OsQueue {
/// Create a new OS backed readiness event queue.
///
/// This function will make a syscall to the operating system to create the
/// system selector. If this syscall fails it will return the error.
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::os::OsQueue;
/// use gaea::poll;
///
/// // Create a new OS backed readiness event queue.
/// let mut os_queue = OsQueue::new()?;
///
/// // Create an event sink.
/// let mut events = Vec::new();
///
/// // Poll the queue for new readiness events.
/// // But since no `Evented` handles have been registered we'll receive no
/// // events.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(500)))?;
/// # Ok(())
/// # }
/// ```
pub fn new() -> io::Result<OsQueue> {
sys::Selector::new().map(|selector| OsQueue { selector })
}
/// Register an [`Evented`] handle with the `OsQueue`.
///
/// Once registered, the [`Evented`] handle will be monitored for readiness
/// state changes. When it notices a state change, it will return a
/// readiness event for the handle the next time the queue is [`polled`].
///
/// [`polled`]: crate::poll
///
/// # Arguments
///
/// `handle`: This is the handle that the `OsQueue` should monitor for
/// readiness state changes.
///
/// `id`: The caller picks a id to associate with the handle. When [`poll`]
/// returns an [event] for the handle, this id is [included]. This allows
/// the caller to map the event to its handle. The id associated with the
/// `Evented` handle can be changed at any time by calling [`reregister`].
///
/// `interests`: Specifies which operations `OsQueue` should monitor for
/// readiness. `OsQueue` will only return readiness events for operations
/// specified by this argument. If a socket is registered with [readable]
/// interests and the socket becomes writable, no event will be returned
/// from [`poll`]. The readiness interests for an `Evented` handle can be
/// changed at any time by calling [`reregister`]. Most types that
/// implemented [`Evented`] have a associated constant named `INTERESTS`
/// which provide a sane interest for that type, e.g. [`TcpStream`
/// interests] are readable and writable.
///
/// `opt`: Specifies the registration option. Just like the interests and
/// id, the option can be changed for an `Evented` handle at any time by
/// calling [`reregister`].
///
/// [`poll`]: crate::poll
/// [event]: crate::event::Event
/// [included]: crate::event::Event::id
/// [`reregister`]: OsQueue::reregister
/// [readable]: Interests::READABLE
/// [`TcpStream` interests]: crate::net::TcpStream::INTERESTS
///
/// # Notes
///
/// Unless otherwise specified, the caller should assume that once an
/// `Evented` handle is registered with a `OsQueue` instance, it is bound to
/// that `OsQueue` for the lifetime of the `Evented` handle. This remains
/// true even if the `Evented` handle is [deregistered].
///
/// [deregistered]: OsQueue::deregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
/// use gaea::{event, poll};
///
/// // Create a new `OsQueue` as well a containers for the events.
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with queue.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(0) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// }
/// }
/// }
/// # }
/// ```
pub fn register<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented +?Sized,
{
trace!("registering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.register(self, id, interests, opt)
}
/// Re-register an `Evented` handle with `OsQueue`.
///
/// Re-registering an `Evented` handle allows changing the details of the
/// registration. Specifically, it allows updating the associated `id`,
/// `interests`, and `opt` specified in previous `register` and `reregister`
/// calls.
///
/// The `reregister` arguments **fully override** the previous values. In
/// other words, if a socket is registered with [readable] interest and the
/// call to `reregister` specifies only [writable], then read interest is no
/// longer monitored for the handle.
///
/// The `Evented` handle must have previously been registered with this
/// `OsQueue` otherwise the call to `reregister` may return an error.
///
/// See the [`register`] documentation for details about the function
/// arguments.
///
/// [readable]: Interests::READABLE
/// [writable]: Interests::WRITABLE
/// [`register`]: OsQueue::register
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{Interests, RegisterOption, OsQueue};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`, only with readable interest.
/// os_queue.register(&mut stream, event::Id(0), Interests::READABLE, RegisterOption::EDGE)?;
///
/// // Reregister the connection specifying a different id and write interest
/// // instead. `RegisterOption::EDGE` must be specified even though that value
/// // is not being changed.
/// os_queue.reregister(&mut stream, event::Id(2), Interests::WRITABLE, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(2) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// } else if event.id() == event::Id(0) {
/// // We won't receive events with the old id anymore.
/// unreachable!();
/// }
/// }
/// }
/// # }
/// ```
pub fn reregister<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented +?Sized,
{
trace!("reregistering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.reregister(self, id, interests, opt)
}
/// Deregister an `Evented` handle from `OsQueue`.
///
/// When an `Evented` handle is deregistered, the handle will no longer be
/// monitored for readiness state changes. Unlike disabling handles with
/// [`oneshot`], deregistering clears up any internal resources needed to
/// track the handle.
///
/// A handle can be registered again using [`register`] after it has been
/// deregistered; however, it must be passed back to the **same** `OsQueue`.
///
/// # Notes
///
/// Calling [`reregister`] after `deregister` may be work on some platforms
/// but not all. To properly re-register a handle after deregistering use
/// `register`, this works on all platforms.
///
/// [`oneshot`]: RegisterOption::ONESHOT
/// [`register`]: OsQueue::register
/// [`reregister`]: OsQueue::reregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Do stuff with the connection etc.
///
/// // Deregister it so the resources can be cleaned up.
/// os_queue.deregister(&mut stream)?;
///
/// // Set a timeout because we shouldn't receive any events anymore.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(100)))?;
/// assert!(events.is_empty());
/// # Ok(())
/// # }
/// ```
pub fn deregister<E>(&mut self, handle: &mut E) -> io::Result<()>
where E: Evented +?Sized,
{
trace!("deregistering handle");
handle.deregister(self)
}
/// Get access to the system selector. Used by platform specific code, e.g.
/// `EventedFd`.
pub(crate) fn selector(&self) -> &sys::Selector {
&self.selector
}
}
impl<ES, E> event::Source<ES, E> for OsQueue
where ES: event::Sink,
E: From<io::Error>,
{
fn max_timeout(&self) -> Option<Duration> {
// Can't tell if an event is available.
None
}
fn poll(&mut self, event_sink: &mut ES) -> Result<(), E> {
self.blocking_poll(event_sink, Some(Duration::from_millis(0)))
}
fn blocking_poll(&mut self, event_sink: &mut ES, timeout: Option<Duration>) -> Result<(), E> {
trace!("polling OS queue: timeout={:?}", timeout);
self.selector.select(event_sink, timeout)
.map_err(Into::into)
}
} | ///
/// This queue allows a program to monitor a large number of [`Evented`]
/// handles, waiting until one or more become "ready" for some class of
/// operations; e.g. [reading] or [writing]. An [`Evented`] type is considered
/// ready if it is possible to immediately perform a corresponding operation; | random_line_split |
mod.rs | //! Operating System backed readiness event queue.
//!
//! [`OsQueue`] provides an abstraction over platform specific Operating System
//! backed readiness event queues, such as kqueue or epoll.
//!
//! [`OsQueue`]: crate::os::OsQueue
//!
//! # Portability
//!
//! Using [`OsQueue`] provides a portable interface across supported platforms
//! as long as the caller takes the following into consideration:
//!
//! ### Draining readiness
//!
//! When using [edge-triggered] mode, once a readiness event is received, the
//! corresponding operation must be performed repeatedly until it returns
//! [`WouldBlock`]. Unless this is done, there is no guarantee that another
//! readiness event will be delivered, even if further data is received for the
//! [`Evented`] handle. See [`RegisterOption`] for more.
//!
//! [`WouldBlock`]: std::io::ErrorKind::WouldBlock
//! [edge-triggered]: crate::os::RegisterOption::EDGE
//! [`Evented`]: crate::os::Evented
//! [`RegisterOption`]: crate::os::RegisterOption
//!
//! ### Spurious events
//!
//! The [`Source::poll`] implementation may return readiness events even if the
//! associated [`Evented`] handle is not actually ready. Given the same code,
//! this may happen more on some platforms than others. It is important to never
//! assume that, just because a readiness notification was received, that the
//! associated operation will as well.
//!
//! If operation fails with a [`WouldBlock`] error, then the caller should not
//! treat this as an error and wait until another readiness event is received.
//!
//! Furthermore a single call to poll may result in multiple readiness events
//! being returned for a single `Evented` handle. For example, if a TCP socket
//! becomes both readable and writable, it may be possible for a single
//! readiness event to be returned with both [readable] and [writable] readiness
//! **OR** two separate events may be returned, one with readable set and one
//! with writable set.
//!
//! [`Source::poll`]: crate::event::Source::poll
//! [readable]: crate::os::Interests::READABLE
//! [writable]: crate::os::Interests::WRITABLE
//!
//! ### Registering handles
//!
//! Unless otherwise noted, it should be assumed that types implementing
//! [`Evented`] will never become ready unless they are registered with
//! `OsQueue`.
//!
//! For example:
//!
//! ```
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! use std::thread;
//! use std::time::Duration;
//!
//! use gaea::event;
//! use gaea::net::TcpStream;
//! use gaea::os::{OsQueue, RegisterOption};
//!
//! let address = "216.58.193.100:80".parse()?;
//! let mut stream = TcpStream::connect(address)?;
//!
//! // This actually does nothing towards connecting the TCP stream.
//! thread::sleep(Duration::from_secs(1));
//!
//! let mut os_queue = OsQueue::new()?;
//!
//! // The connect is not guaranteed to have started until it is registered at
//! // this point.
//! os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
//! # Ok(())
//! # }
//! ```
//!
//! ### Timeout granularity
//!
//! The timeout provided to [`event::Source::blocking_poll`] will be rounded
//! up to the system clock granularity (usually 1ms), and kernel scheduling
//! delays mean that the blocking interval may be overrun by a small amount.
//!
//! ### Interrupts while polling
//!
//! Interrupts (`EINTR` in C and `io::ErrorKind::Interrupted` in Rust) are
//! **not** handled, they are returned as errors. In most cases however these
//! can simply be ignored, but it's up to the user how to deal with the "error".
//!
//! # Implementation notes
//!
//! `OsQueue` is backed by a readiness event queue provided by the operating
//! system. On all platforms a call to [`Source::poll`] is mostly just a direct
//! system call. The following system implementations back `OsQueue`:
//!
//! | OS | Selector |
//! |---------|----------|
//! | FreeBSD | [kqueue](https://www.freebsd.org/cgi/man.cgi?query=kqueue) |
//! | Linux | [epoll](http://man7.org/linux/man-pages/man7/epoll.7.html) |
//! | macOS | [kqueue](https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man2/kqueue.2.html) |
//! | NetBSD | [kqueue](http://netbsd.gw.com/cgi-bin/man-cgi?kqueue) |
//! | OpenBSD | [kqueue](https://man.openbsd.org/kqueue) |
//!
//! On all supported platforms socket operations are handled by using the system
//! queue. Platform specific extensions (e.g. [`EventedFd`]) allow accessing
//! other features provided by individual system selectors.
//!
//! [`Eventedfd`]: crate::sys::unix::EventedFd
//! [`signalfd`]: http://man7.org/linux/man-pages/man2/signalfd.2.html
use std::io;
use std::time::Duration;
use log::trace;
use crate::{event, sys};
mod awakener;
mod evented;
mod interests;
mod option;
pub mod signals;
pub use self::awakener::Awakener;
pub use self::evented::Evented;
pub use self::interests::Interests;
pub use self::option::RegisterOption;
pub use self::signals::{Signal, SignalSet, Signals};
/// Readiness event queue backed by the OS.
///
/// This queue allows a program to monitor a large number of [`Evented`]
/// handles, waiting until one or more become "ready" for some class of
/// operations; e.g. [reading] or [writing]. An [`Evented`] type is considered
/// ready if it is possible to immediately perform a corresponding operation;
/// e.g. read or write.
///
/// To use this queue an [`Evented`] handle must first be registered using the
/// [`register`] method, supplying an associated id, readiness interests and
/// polling option. The [associated id] is used to associate a readiness event
/// with an `Evented` handle. The readiness [interests] defines which specific
/// operations on the handle to monitor for readiness. And the final argument,
/// [`RegisterOption`], defines how to deliver the readiness events, see
/// [`RegisterOption`] for more information.
///
/// See to [module documentation] for information.
///
/// [reading]: crate::event::Ready::READABLE
/// [writing]: crate::event::Ready::WRITABLE
/// [`register`]: OsQueue::register
/// [associated id]: event::Id
/// [interests]: Interests
/// [module documentation]: crate::os
#[derive(Debug)]
pub struct OsQueue {
selector: sys::Selector,
}
impl OsQueue {
/// Create a new OS backed readiness event queue.
///
/// This function will make a syscall to the operating system to create the
/// system selector. If this syscall fails it will return the error.
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::os::OsQueue;
/// use gaea::poll;
///
/// // Create a new OS backed readiness event queue.
/// let mut os_queue = OsQueue::new()?;
///
/// // Create an event sink.
/// let mut events = Vec::new();
///
/// // Poll the queue for new readiness events.
/// // But since no `Evented` handles have been registered we'll receive no
/// // events.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(500)))?;
/// # Ok(())
/// # }
/// ```
pub fn new() -> io::Result<OsQueue> {
sys::Selector::new().map(|selector| OsQueue { selector })
}
/// Register an [`Evented`] handle with the `OsQueue`.
///
/// Once registered, the [`Evented`] handle will be monitored for readiness
/// state changes. When it notices a state change, it will return a
/// readiness event for the handle the next time the queue is [`polled`].
///
/// [`polled`]: crate::poll
///
/// # Arguments
///
/// `handle`: This is the handle that the `OsQueue` should monitor for
/// readiness state changes.
///
/// `id`: The caller picks a id to associate with the handle. When [`poll`]
/// returns an [event] for the handle, this id is [included]. This allows
/// the caller to map the event to its handle. The id associated with the
/// `Evented` handle can be changed at any time by calling [`reregister`].
///
/// `interests`: Specifies which operations `OsQueue` should monitor for
/// readiness. `OsQueue` will only return readiness events for operations
/// specified by this argument. If a socket is registered with [readable]
/// interests and the socket becomes writable, no event will be returned
/// from [`poll`]. The readiness interests for an `Evented` handle can be
/// changed at any time by calling [`reregister`]. Most types that
/// implemented [`Evented`] have a associated constant named `INTERESTS`
/// which provide a sane interest for that type, e.g. [`TcpStream`
/// interests] are readable and writable.
///
/// `opt`: Specifies the registration option. Just like the interests and
/// id, the option can be changed for an `Evented` handle at any time by
/// calling [`reregister`].
///
/// [`poll`]: crate::poll
/// [event]: crate::event::Event
/// [included]: crate::event::Event::id
/// [`reregister`]: OsQueue::reregister
/// [readable]: Interests::READABLE
/// [`TcpStream` interests]: crate::net::TcpStream::INTERESTS
///
/// # Notes
///
/// Unless otherwise specified, the caller should assume that once an
/// `Evented` handle is registered with a `OsQueue` instance, it is bound to
/// that `OsQueue` for the lifetime of the `Evented` handle. This remains
/// true even if the `Evented` handle is [deregistered].
///
/// [deregistered]: OsQueue::deregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
/// use gaea::{event, poll};
///
/// // Create a new `OsQueue` as well a containers for the events.
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with queue.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(0) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// }
/// }
/// }
/// # }
/// ```
pub fn register<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented +?Sized,
{
trace!("registering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.register(self, id, interests, opt)
}
/// Re-register an `Evented` handle with `OsQueue`.
///
/// Re-registering an `Evented` handle allows changing the details of the
/// registration. Specifically, it allows updating the associated `id`,
/// `interests`, and `opt` specified in previous `register` and `reregister`
/// calls.
///
/// The `reregister` arguments **fully override** the previous values. In
/// other words, if a socket is registered with [readable] interest and the
/// call to `reregister` specifies only [writable], then read interest is no
/// longer monitored for the handle.
///
/// The `Evented` handle must have previously been registered with this
/// `OsQueue` otherwise the call to `reregister` may return an error.
///
/// See the [`register`] documentation for details about the function
/// arguments.
///
/// [readable]: Interests::READABLE
/// [writable]: Interests::WRITABLE
/// [`register`]: OsQueue::register
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{Interests, RegisterOption, OsQueue};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`, only with readable interest.
/// os_queue.register(&mut stream, event::Id(0), Interests::READABLE, RegisterOption::EDGE)?;
///
/// // Reregister the connection specifying a different id and write interest
/// // instead. `RegisterOption::EDGE` must be specified even though that value
/// // is not being changed.
/// os_queue.reregister(&mut stream, event::Id(2), Interests::WRITABLE, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(2) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// } else if event.id() == event::Id(0) {
/// // We won't receive events with the old id anymore.
/// unreachable!();
/// }
/// }
/// }
/// # }
/// ```
pub fn reregister<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented +?Sized,
{
trace!("reregistering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.reregister(self, id, interests, opt)
}
/// Deregister an `Evented` handle from `OsQueue`.
///
/// When an `Evented` handle is deregistered, the handle will no longer be
/// monitored for readiness state changes. Unlike disabling handles with
/// [`oneshot`], deregistering clears up any internal resources needed to
/// track the handle.
///
/// A handle can be registered again using [`register`] after it has been
/// deregistered; however, it must be passed back to the **same** `OsQueue`.
///
/// # Notes
///
/// Calling [`reregister`] after `deregister` may be work on some platforms
/// but not all. To properly re-register a handle after deregistering use
/// `register`, this works on all platforms.
///
/// [`oneshot`]: RegisterOption::ONESHOT
/// [`register`]: OsQueue::register
/// [`reregister`]: OsQueue::reregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Do stuff with the connection etc.
///
/// // Deregister it so the resources can be cleaned up.
/// os_queue.deregister(&mut stream)?;
///
/// // Set a timeout because we shouldn't receive any events anymore.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(100)))?;
/// assert!(events.is_empty());
/// # Ok(())
/// # }
/// ```
pub fn deregister<E>(&mut self, handle: &mut E) -> io::Result<()>
where E: Evented +?Sized,
{
trace!("deregistering handle");
handle.deregister(self)
}
/// Get access to the system selector. Used by platform specific code, e.g.
/// `EventedFd`.
pub(crate) fn selector(&self) -> &sys::Selector |
}
impl<ES, E> event::Source<ES, E> for OsQueue
where ES: event::Sink,
E: From<io::Error>,
{
fn max_timeout(&self) -> Option<Duration> {
// Can't tell if an event is available.
None
}
fn poll(&mut self, event_sink: &mut ES) -> Result<(), E> {
self.blocking_poll(event_sink, Some(Duration::from_millis(0)))
}
fn blocking_poll(&mut self, event_sink: &mut ES, timeout: Option<Duration>) -> Result<(), E> {
trace!("polling OS queue: timeout={:?}", timeout);
self.selector.select(event_sink, timeout)
.map_err(Into::into)
}
}
| {
&self.selector
} | identifier_body |
mod.rs | //! Operating System backed readiness event queue.
//!
//! [`OsQueue`] provides an abstraction over platform specific Operating System
//! backed readiness event queues, such as kqueue or epoll.
//!
//! [`OsQueue`]: crate::os::OsQueue
//!
//! # Portability
//!
//! Using [`OsQueue`] provides a portable interface across supported platforms
//! as long as the caller takes the following into consideration:
//!
//! ### Draining readiness
//!
//! When using [edge-triggered] mode, once a readiness event is received, the
//! corresponding operation must be performed repeatedly until it returns
//! [`WouldBlock`]. Unless this is done, there is no guarantee that another
//! readiness event will be delivered, even if further data is received for the
//! [`Evented`] handle. See [`RegisterOption`] for more.
//!
//! [`WouldBlock`]: std::io::ErrorKind::WouldBlock
//! [edge-triggered]: crate::os::RegisterOption::EDGE
//! [`Evented`]: crate::os::Evented
//! [`RegisterOption`]: crate::os::RegisterOption
//!
//! ### Spurious events
//!
//! The [`Source::poll`] implementation may return readiness events even if the
//! associated [`Evented`] handle is not actually ready. Given the same code,
//! this may happen more on some platforms than others. It is important to never
//! assume that, just because a readiness notification was received, that the
//! associated operation will as well.
//!
//! If operation fails with a [`WouldBlock`] error, then the caller should not
//! treat this as an error and wait until another readiness event is received.
//!
//! Furthermore a single call to poll may result in multiple readiness events
//! being returned for a single `Evented` handle. For example, if a TCP socket
//! becomes both readable and writable, it may be possible for a single
//! readiness event to be returned with both [readable] and [writable] readiness
//! **OR** two separate events may be returned, one with readable set and one
//! with writable set.
//!
//! [`Source::poll`]: crate::event::Source::poll
//! [readable]: crate::os::Interests::READABLE
//! [writable]: crate::os::Interests::WRITABLE
//!
//! ### Registering handles
//!
//! Unless otherwise noted, it should be assumed that types implementing
//! [`Evented`] will never become ready unless they are registered with
//! `OsQueue`.
//!
//! For example:
//!
//! ```
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! use std::thread;
//! use std::time::Duration;
//!
//! use gaea::event;
//! use gaea::net::TcpStream;
//! use gaea::os::{OsQueue, RegisterOption};
//!
//! let address = "216.58.193.100:80".parse()?;
//! let mut stream = TcpStream::connect(address)?;
//!
//! // This actually does nothing towards connecting the TCP stream.
//! thread::sleep(Duration::from_secs(1));
//!
//! let mut os_queue = OsQueue::new()?;
//!
//! // The connect is not guaranteed to have started until it is registered at
//! // this point.
//! os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
//! # Ok(())
//! # }
//! ```
//!
//! ### Timeout granularity
//!
//! The timeout provided to [`event::Source::blocking_poll`] will be rounded
//! up to the system clock granularity (usually 1ms), and kernel scheduling
//! delays mean that the blocking interval may be overrun by a small amount.
//!
//! ### Interrupts while polling
//!
//! Interrupts (`EINTR` in C and `io::ErrorKind::Interrupted` in Rust) are
//! **not** handled, they are returned as errors. In most cases however these
//! can simply be ignored, but it's up to the user how to deal with the "error".
//!
//! # Implementation notes
//!
//! `OsQueue` is backed by a readiness event queue provided by the operating
//! system. On all platforms a call to [`Source::poll`] is mostly just a direct
//! system call. The following system implementations back `OsQueue`:
//!
//! | OS | Selector |
//! |---------|----------|
//! | FreeBSD | [kqueue](https://www.freebsd.org/cgi/man.cgi?query=kqueue) |
//! | Linux | [epoll](http://man7.org/linux/man-pages/man7/epoll.7.html) |
//! | macOS | [kqueue](https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man2/kqueue.2.html) |
//! | NetBSD | [kqueue](http://netbsd.gw.com/cgi-bin/man-cgi?kqueue) |
//! | OpenBSD | [kqueue](https://man.openbsd.org/kqueue) |
//!
//! On all supported platforms socket operations are handled by using the system
//! queue. Platform specific extensions (e.g. [`EventedFd`]) allow accessing
//! other features provided by individual system selectors.
//!
//! [`Eventedfd`]: crate::sys::unix::EventedFd
//! [`signalfd`]: http://man7.org/linux/man-pages/man2/signalfd.2.html
use std::io;
use std::time::Duration;
use log::trace;
use crate::{event, sys};
mod awakener;
mod evented;
mod interests;
mod option;
pub mod signals;
pub use self::awakener::Awakener;
pub use self::evented::Evented;
pub use self::interests::Interests;
pub use self::option::RegisterOption;
pub use self::signals::{Signal, SignalSet, Signals};
/// Readiness event queue backed by the OS.
///
/// This queue allows a program to monitor a large number of [`Evented`]
/// handles, waiting until one or more become "ready" for some class of
/// operations; e.g. [reading] or [writing]. An [`Evented`] type is considered
/// ready if it is possible to immediately perform a corresponding operation;
/// e.g. read or write.
///
/// To use this queue an [`Evented`] handle must first be registered using the
/// [`register`] method, supplying an associated id, readiness interests and
/// polling option. The [associated id] is used to associate a readiness event
/// with an `Evented` handle. The readiness [interests] defines which specific
/// operations on the handle to monitor for readiness. And the final argument,
/// [`RegisterOption`], defines how to deliver the readiness events, see
/// [`RegisterOption`] for more information.
///
/// See to [module documentation] for information.
///
/// [reading]: crate::event::Ready::READABLE
/// [writing]: crate::event::Ready::WRITABLE
/// [`register`]: OsQueue::register
/// [associated id]: event::Id
/// [interests]: Interests
/// [module documentation]: crate::os
#[derive(Debug)]
pub struct | {
selector: sys::Selector,
}
impl OsQueue {
/// Create a new OS backed readiness event queue.
///
/// This function will make a syscall to the operating system to create the
/// system selector. If this syscall fails it will return the error.
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::os::OsQueue;
/// use gaea::poll;
///
/// // Create a new OS backed readiness event queue.
/// let mut os_queue = OsQueue::new()?;
///
/// // Create an event sink.
/// let mut events = Vec::new();
///
/// // Poll the queue for new readiness events.
/// // But since no `Evented` handles have been registered we'll receive no
/// // events.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(500)))?;
/// # Ok(())
/// # }
/// ```
pub fn new() -> io::Result<OsQueue> {
sys::Selector::new().map(|selector| OsQueue { selector })
}
/// Register an [`Evented`] handle with the `OsQueue`.
///
/// Once registered, the [`Evented`] handle will be monitored for readiness
/// state changes. When it notices a state change, it will return a
/// readiness event for the handle the next time the queue is [`polled`].
///
/// [`polled`]: crate::poll
///
/// # Arguments
///
/// `handle`: This is the handle that the `OsQueue` should monitor for
/// readiness state changes.
///
/// `id`: The caller picks a id to associate with the handle. When [`poll`]
/// returns an [event] for the handle, this id is [included]. This allows
/// the caller to map the event to its handle. The id associated with the
/// `Evented` handle can be changed at any time by calling [`reregister`].
///
/// `interests`: Specifies which operations `OsQueue` should monitor for
/// readiness. `OsQueue` will only return readiness events for operations
/// specified by this argument. If a socket is registered with [readable]
/// interests and the socket becomes writable, no event will be returned
/// from [`poll`]. The readiness interests for an `Evented` handle can be
/// changed at any time by calling [`reregister`]. Most types that
/// implemented [`Evented`] have a associated constant named `INTERESTS`
/// which provide a sane interest for that type, e.g. [`TcpStream`
/// interests] are readable and writable.
///
/// `opt`: Specifies the registration option. Just like the interests and
/// id, the option can be changed for an `Evented` handle at any time by
/// calling [`reregister`].
///
/// [`poll`]: crate::poll
/// [event]: crate::event::Event
/// [included]: crate::event::Event::id
/// [`reregister`]: OsQueue::reregister
/// [readable]: Interests::READABLE
/// [`TcpStream` interests]: crate::net::TcpStream::INTERESTS
///
/// # Notes
///
/// Unless otherwise specified, the caller should assume that once an
/// `Evented` handle is registered with a `OsQueue` instance, it is bound to
/// that `OsQueue` for the lifetime of the `Evented` handle. This remains
/// true even if the `Evented` handle is [deregistered].
///
/// [deregistered]: OsQueue::deregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
/// use gaea::{event, poll};
///
/// // Create a new `OsQueue` as well a containers for the events.
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with queue.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(0) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// }
/// }
/// }
/// # }
/// ```
pub fn register<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented +?Sized,
{
trace!("registering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.register(self, id, interests, opt)
}
/// Re-register an `Evented` handle with `OsQueue`.
///
/// Re-registering an `Evented` handle allows changing the details of the
/// registration. Specifically, it allows updating the associated `id`,
/// `interests`, and `opt` specified in previous `register` and `reregister`
/// calls.
///
/// The `reregister` arguments **fully override** the previous values. In
/// other words, if a socket is registered with [readable] interest and the
/// call to `reregister` specifies only [writable], then read interest is no
/// longer monitored for the handle.
///
/// The `Evented` handle must have previously been registered with this
/// `OsQueue` otherwise the call to `reregister` may return an error.
///
/// See the [`register`] documentation for details about the function
/// arguments.
///
/// [readable]: Interests::READABLE
/// [writable]: Interests::WRITABLE
/// [`register`]: OsQueue::register
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{Interests, RegisterOption, OsQueue};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`, only with readable interest.
/// os_queue.register(&mut stream, event::Id(0), Interests::READABLE, RegisterOption::EDGE)?;
///
/// // Reregister the connection specifying a different id and write interest
/// // instead. `RegisterOption::EDGE` must be specified even though that value
/// // is not being changed.
/// os_queue.reregister(&mut stream, event::Id(2), Interests::WRITABLE, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(2) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// } else if event.id() == event::Id(0) {
/// // We won't receive events with the old id anymore.
/// unreachable!();
/// }
/// }
/// }
/// # }
/// ```
pub fn reregister<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented +?Sized,
{
trace!("reregistering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.reregister(self, id, interests, opt)
}
/// Deregister an `Evented` handle from `OsQueue`.
///
/// When an `Evented` handle is deregistered, the handle will no longer be
/// monitored for readiness state changes. Unlike disabling handles with
/// [`oneshot`], deregistering clears up any internal resources needed to
/// track the handle.
///
/// A handle can be registered again using [`register`] after it has been
/// deregistered; however, it must be passed back to the **same** `OsQueue`.
///
/// # Notes
///
/// Calling [`reregister`] after `deregister` may be work on some platforms
/// but not all. To properly re-register a handle after deregistering use
/// `register`, this works on all platforms.
///
/// [`oneshot`]: RegisterOption::ONESHOT
/// [`register`]: OsQueue::register
/// [`reregister`]: OsQueue::reregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Do stuff with the connection etc.
///
/// // Deregister it so the resources can be cleaned up.
/// os_queue.deregister(&mut stream)?;
///
/// // Set a timeout because we shouldn't receive any events anymore.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(100)))?;
/// assert!(events.is_empty());
/// # Ok(())
/// # }
/// ```
pub fn deregister<E>(&mut self, handle: &mut E) -> io::Result<()>
where E: Evented +?Sized,
{
trace!("deregistering handle");
handle.deregister(self)
}
/// Get access to the system selector. Used by platform specific code, e.g.
/// `EventedFd`.
pub(crate) fn selector(&self) -> &sys::Selector {
&self.selector
}
}
impl<ES, E> event::Source<ES, E> for OsQueue
where ES: event::Sink,
E: From<io::Error>,
{
fn max_timeout(&self) -> Option<Duration> {
// Can't tell if an event is available.
None
}
fn poll(&mut self, event_sink: &mut ES) -> Result<(), E> {
self.blocking_poll(event_sink, Some(Duration::from_millis(0)))
}
fn blocking_poll(&mut self, event_sink: &mut ES, timeout: Option<Duration>) -> Result<(), E> {
trace!("polling OS queue: timeout={:?}", timeout);
self.selector.select(event_sink, timeout)
.map_err(Into::into)
}
}
| OsQueue | identifier_name |
room_model.rs | use crate::room::member::Member;
use crate::room::room::{MemberLeaveNoticeType, RoomState};
use crate::room::room::{Room, MEMBER_MAX};
use crate::task_timer::{Task, TaskCmd};
use crate::TEMPLATES;
use log::{error, info, warn};
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use protobuf::Message;
use rayon::slice::ParallelSliceMut;
use serde_json::{Map, Value};
use std::borrow::BorrowMut;
use std::collections::hash_map::RandomState;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::str::FromStr;
use tools::cmd_code::ClientCode;
use tools::protos::base::RoomSettingPt;
use tools::protos::room::S_LEAVE_ROOM;
use tools::tcp::TcpSender;
use tools::templates::template::TemplateMgrTrait;
use tools::templates::tile_map_temp::TileMapTempMgr;
///teamID枚举
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum TeamId {
Min = 1, //最小teamid
Max = 4, //最大teamid
}
///房间类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum RoomType {
None = 0, //无效
Custom = 1, //自定义房间
Match = 2, //匹配房间
SeasonPve = 3, //赛季PVE房间
WorldBossPve = 4, //世界boss房间
}
impl RoomType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///战斗模式类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum BattleType {
None = 0, //无效初始值
OneVOneVOneVOne = 1, //1v1v1v1
TwoVTwo = 2, //2v2
OneVOne = 3, //1v1
}
impl Default for BattleType {
fn default() -> Self {
BattleType::OneVOneVOneVOne
}
}
impl BattleType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///房间设置
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomSetting {
pub battle_type: BattleType, //战斗类型
pub turn_limit_time: u32, //回合限制时间
pub season_id: u32, //赛季id
pub is_open_ai: bool, //是否开启ai
pub victory_condition: u32, //胜利条件
}
impl From<RoomSettingPt> for RoomSetting {
fn from(rs_pt: RoomSettingPt) -> Self {
let battle_type = BattleType::try_from(rs_pt.battle_type as u8).unwrap();
let is_open_ai = rs_pt.is_open_ai;
let victory_condition = rs_pt.victory_condition;
let turn_limit_time = rs_pt.turn_limit_time;
let season_id = rs_pt.season_id;
let rs = RoomSetting {
battle_type,
turn_limit_time,
season_id,
is_open_ai,
victory_condition,
};
rs
}
}
impl From<RoomSetting> for RoomSettingPt {
fn from(r: RoomSetting) -> Self {
let mut rsp = RoomSettingPt::new();
rsp.set_victory_condition(r.victory_condition);
rsp.set_battle_type(r.battle_type as u32);
rsp.set_season_id(r.season_id);
rsp.set_turn_limit_time(r.turn_limit_time);
rsp.set_is_open_ai(r.is_open_ai);
rsp
}
}
///房间缓存
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomCache {
room_id: u32,
count: u8,
}
pub trait RoomModel {
fn get_room_type(&self) -> RoomType;
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room>;
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32>;
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32>;
fn rm_room(&mut self, room_id: &u32);
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room>;
///根据房间id获得房间的可变指针
fn get_mut_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room_id);
}
}
///离开房间,离线也好,主动离开也好
pub fn leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id);
}
let room_cache = self.get_room_cache_mut(&room_id);
if room_cache.is_some() {
let rc = room_cache.unwrap();
rc.count -= 1;
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
} else if room_cache.is_none() && need_add_cache {
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = now_count as u8;
self.room_cache.push(rc);
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
info!(
"玩家离开房间匹配房间,满足条件,将放进重新放入匹配队列,room_id:{}",
room_id
);
}
Ok(room_id)
}
///删除房间
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
self.remove_room_cache(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
impl MatchRoom {
pub fn get_room_cache_mut(&mut self, room_id: &u32) -> Option<&mut RoomCache> {
let res = self.room_cache.iter_mut().find(|x| x.room_id == *room_id);
res
}
///删除缓存房间
pub fn remove_room_cache(&mut self, room_id: &u32) {
let mut index = -1_isize;
for i in self.room_cache.iter() {
index += 1;
if i.room_id!= *room_id {
continue;
}
break;
}
if ind | self.room_cache.remove(index as usize);
}
///快速加入
pub fn quickly_start(
&mut self,
member: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let room_id: u32;
let user_id = member.user_id;
//如果房间缓存里没有,则创建新房间
if self.room_cache.is_empty() {
//校验地图配置
let room_tmp_ref: &TileMapTempMgr = TEMPLATES.get_tile_map_temp_mgr_ref();
if room_tmp_ref.is_empty() {
anyhow::bail!("TileMapTempMgr is None")
}
//创建房间
room_id = self.create_room(BattleType::OneVOneVOneVOne, member, sender, task_sender)?;
info!("创建匹配房间,room_id:{},user_id:{}", room_id, user_id);
} else {
//如果有,则往房间里塞
room_id = self.get_room_cache_last_room_id()?;
//将成员加进房间
let room_mut = self.get_mut_room_by_room_id(&room_id)?;
if room_mut.get_member_count() >= MEMBER_MAX as usize {
anyhow::bail!("room is None,room_id:{}", room_id)
}
//将成员加入到房间中
room_mut.add_member(member)?;
//解决房间队列缓存
let room_cache_array: &mut Vec<RoomCache> = self.room_cache.as_mut();
let room_cache = room_cache_array.last_mut().unwrap();
//cache人数加1
room_cache.count += 1;
//如果人满里,则从缓存房间列表中弹出
if room_cache.count >= MEMBER_MAX {
room_cache_array.pop();
info!("匹配房人满,将房间从匹配队列移除!room_id:{}", room_id);
//创建延迟任务,并发送给定时器接收方执行
let mut task = Task::default();
let time_limit = TEMPLATES
.get_constant_temp_mgr_ref()
.temps
.get("kick_not_prepare_time");
if let Some(time) = time_limit {
let time = u64::from_str(time.value.as_str())?;
task.delay = time + 500;
} else {
task.delay = 60000_u64;
warn!("the Constant kick_not_prepare_time is None!pls check!");
}
task.cmd = TaskCmd::MatchRoomStart as u16;
let mut map = Map::new();
map.insert(
"battle_type".to_owned(),
Value::from(self.battle_type.into_u8()),
);
map.insert("room_id".to_owned(), Value::from(room_id));
task.data = Value::from(map);
let res = task_sender.send(task);
if let Err(e) = res {
error!("{:?}", e);
}
}
//重新排序
room_cache_array.par_sort_by(|a, b| b.count.cmp(&a.count));
}
Ok(room_id)
}
fn get_room_cache_last_room_id(&self) -> anyhow::Result<u32> {
let room_cache = self.room_cache.last();
if room_cache.is_none() {
let str = "room_cache is empty!".to_owned();
error!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let room_id = room_cache.unwrap().room_id;
Ok(room_id)
}
}
| ex < 0 {
return;
}
| identifier_body |
room_model.rs | use crate::room::member::Member;
use crate::room::room::{MemberLeaveNoticeType, RoomState};
use crate::room::room::{Room, MEMBER_MAX};
use crate::task_timer::{Task, TaskCmd};
use crate::TEMPLATES;
use log::{error, info, warn};
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use protobuf::Message;
use rayon::slice::ParallelSliceMut;
use serde_json::{Map, Value};
use std::borrow::BorrowMut;
use std::collections::hash_map::RandomState;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::str::FromStr;
use tools::cmd_code::ClientCode;
use tools::protos::base::RoomSettingPt;
use tools::protos::room::S_LEAVE_ROOM;
use tools::tcp::TcpSender;
use tools::templates::template::TemplateMgrTrait;
use tools::templates::tile_map_temp::TileMapTempMgr;
///teamID枚举
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum TeamId {
Min = 1, //最小teamid
Max = 4, //最大teamid
}
///房间类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum RoomType {
None = 0, //无效
Custom = 1, //自定义房间
Match = 2, //匹配房间
SeasonPve = 3, //赛季PVE房间
WorldBossPve = 4, //世界boss房间
}
impl RoomType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///战斗模式类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum BattleType {
None = 0, //无效初始值
OneVOneVOneVOne = 1, //1v1v1v1
TwoVTwo = 2, //2v2
OneVOne = 3, //1v1
}
impl Default for BattleType {
fn default() -> Self {
BattleType::OneVOneVOneVOne
}
}
impl BattleType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///房间设置
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomSetting {
pub battle_type: BattleType, //战斗类型
pub turn_limit_time: u32, //回合限制时间
pub season_id: u32, //赛季id
pub is_open_ai: bool, //是否开启ai
pub victory_condition: u32, //胜利条件
}
impl From<RoomSettingPt> for RoomSetting {
fn from(rs_pt: RoomSettingPt) -> Self {
let battle_type = BattleType::try_from(rs_pt.battle_type as u8).unwrap();
let is_open_ai = rs_pt.is_open_ai;
let victory_condition = rs_pt.victory_condition;
let turn_limit_time = rs_pt.turn_limit_time;
let season_id = rs_pt.season_id;
let rs = RoomSetting {
battle_type,
turn_limit_time,
season_id,
is_open_ai,
victory_condition,
};
rs
}
}
impl From<RoomSetting> for RoomSettingPt {
fn from(r: RoomSetting) -> Self {
let mut rsp = RoomSettingPt::new();
rsp.set_victory_condition(r.victory_condition);
rsp.set_battle_type(r.battle_type as u32);
rsp.set_season_id(r.season_id);
rsp.set_turn_limit_time(r.turn_limit_time);
rsp.set_is_open_ai(r.is_open_ai);
rsp
}
}
///房间缓存
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomCache {
room_id: u32,
count: u8,
}
pub trait RoomModel {
fn get_room_type(&self) -> RoomType;
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room>;
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32>;
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32>;
fn rm_room(&mut self, room_id: &u32);
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room>;
///根据房间id获得房间的可变指针
fn get_mut_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room_id);
}
}
///离开房间,离线也好,主动离开也好
pub fn leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id);
}
let room_cache = self.get_room_cache_mut(&room_id);
if room_cache.is_some() {
let rc = room_cache.unwrap();
rc.count -= 1;
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
} else if room_cache.is_none() && need_add_cache {
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = now_count as u8;
self.room_cache.push(rc);
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
info!(
"玩家离开房间匹配房间,满足条件,将放进重新放入匹配队列,room_id:{}",
room_id
);
}
Ok(room_id)
}
///删除房间
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
self.remove_room_cache(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
impl MatchRoom {
pub fn get_room_cache_mut(&mut self, room_id: &u32) -> Option<&mut RoomCache> {
let res = self.room_cache.iter_mut().find(|x| x.room_id == *room_id);
res
}
///删除缓存房间
pub fn remove_room_cache(&mut self, room_id: &u32) {
let mut index = -1_isize;
for i in self.room_cache.iter() {
index += 1;
if i.room_id!= *room_id {
continue;
}
break;
}
if index < 0 {
return;
}
self.room_cache.remove(index as usize);
}
///快速加入
pub fn quickly_start(
&mut self,
member: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> { | _id: u32;
let user_id = member.user_id;
//如果房间缓存里没有,则创建新房间
if self.room_cache.is_empty() {
//校验地图配置
let room_tmp_ref: &TileMapTempMgr = TEMPLATES.get_tile_map_temp_mgr_ref();
if room_tmp_ref.is_empty() {
anyhow::bail!("TileMapTempMgr is None")
}
//创建房间
room_id = self.create_room(BattleType::OneVOneVOneVOne, member, sender, task_sender)?;
info!("创建匹配房间,room_id:{},user_id:{}", room_id, user_id);
} else {
//如果有,则往房间里塞
room_id = self.get_room_cache_last_room_id()?;
//将成员加进房间
let room_mut = self.get_mut_room_by_room_id(&room_id)?;
if room_mut.get_member_count() >= MEMBER_MAX as usize {
anyhow::bail!("room is None,room_id:{}", room_id)
}
//将成员加入到房间中
room_mut.add_member(member)?;
//解决房间队列缓存
let room_cache_array: &mut Vec<RoomCache> = self.room_cache.as_mut();
let room_cache = room_cache_array.last_mut().unwrap();
//cache人数加1
room_cache.count += 1;
//如果人满里,则从缓存房间列表中弹出
if room_cache.count >= MEMBER_MAX {
room_cache_array.pop();
info!("匹配房人满,将房间从匹配队列移除!room_id:{}", room_id);
//创建延迟任务,并发送给定时器接收方执行
let mut task = Task::default();
let time_limit = TEMPLATES
.get_constant_temp_mgr_ref()
.temps
.get("kick_not_prepare_time");
if let Some(time) = time_limit {
let time = u64::from_str(time.value.as_str())?;
task.delay = time + 500;
} else {
task.delay = 60000_u64;
warn!("the Constant kick_not_prepare_time is None!pls check!");
}
task.cmd = TaskCmd::MatchRoomStart as u16;
let mut map = Map::new();
map.insert(
"battle_type".to_owned(),
Value::from(self.battle_type.into_u8()),
);
map.insert("room_id".to_owned(), Value::from(room_id));
task.data = Value::from(map);
let res = task_sender.send(task);
if let Err(e) = res {
error!("{:?}", e);
}
}
//重新排序
room_cache_array.par_sort_by(|a, b| b.count.cmp(&a.count));
}
Ok(room_id)
}
fn get_room_cache_last_room_id(&self) -> anyhow::Result<u32> {
let room_cache = self.room_cache.last();
if room_cache.is_none() {
let str = "room_cache is empty!".to_owned();
error!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let room_id = room_cache.unwrap().room_id;
Ok(room_id)
}
}
|
let room | identifier_name |
room_model.rs | use crate::room::member::Member;
use crate::room::room::{MemberLeaveNoticeType, RoomState};
use crate::room::room::{Room, MEMBER_MAX};
use crate::task_timer::{Task, TaskCmd};
use crate::TEMPLATES;
use log::{error, info, warn};
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use protobuf::Message;
use rayon::slice::ParallelSliceMut;
use serde_json::{Map, Value};
use std::borrow::BorrowMut;
use std::collections::hash_map::RandomState;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::str::FromStr;
use tools::cmd_code::ClientCode;
use tools::protos::base::RoomSettingPt;
use tools::protos::room::S_LEAVE_ROOM;
use tools::tcp::TcpSender;
use tools::templates::template::TemplateMgrTrait;
use tools::templates::tile_map_temp::TileMapTempMgr;
///teamID枚举
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum TeamId {
Min = 1, //最小teamid
Max = 4, //最大teamid
}
///房间类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum RoomType {
None = 0, //无效
Custom = 1, //自定义房间
Match = 2, //匹配房间
SeasonPve = 3, //赛季PVE房间
WorldBossPve = 4, //世界boss房间
}
impl RoomType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///战斗模式类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum BattleType {
None = 0, //无效初始值
OneVOneVOneVOne = 1, //1v1v1v1
TwoVTwo = 2, //2v2
OneVOne = 3, //1v1
}
impl Default for BattleType {
fn default() -> Self {
BattleType::OneVOneVOneVOne
}
}
impl BattleType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///房间设置
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomSetting {
pub battle_type: BattleType, //战斗类型
pub turn_limit_time: u32, //回合限制时间
pub season_id: u32, //赛季id
pub is_open_ai: bool, //是否开启ai
pub victory_condition: u32, //胜利条件
}
impl From<RoomSettingPt> for RoomSetting {
fn from(rs_pt: RoomSettingPt) -> Self {
let battle_type = BattleType::try_from(rs_pt.battle_type as u8).unwrap();
let is_open_ai = rs_pt.is_open_ai;
let victory_condition = rs_pt.victory_condition;
let turn_limit_time = rs_pt.turn_limit_time;
let season_id = rs_pt.season_id;
let rs = RoomSetting {
battle_type,
turn_limit_time,
season_id,
is_open_ai,
victory_condition,
};
rs
}
}
impl From<RoomSetting> for RoomSettingPt {
fn from(r: RoomSetting) -> Self {
let mut rsp = RoomSettingPt::new();
rsp.set_victory_condition(r.victory_condition);
rsp.set_battle_type(r.battle_type as u32);
rsp.set_season_id(r.season_id);
rsp.set_turn_limit_time(r.turn_limit_time);
rsp.set_is_open_ai(r.is_open_ai);
rsp
}
}
///房间缓存
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomCache {
room_id: u32,
count: u8,
}
pub trait RoomModel {
fn get_room_type(&self) -> RoomType;
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room>;
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32>;
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32>;
fn rm_room(&mut self, room_id: &u32);
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room>;
///根据房间id获得房间的可变指针
fn get_mut_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room_id);
}
}
///离开房间,离线也好,主动离开也好
pub fn leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id); |
let room_cache = self.get_room_cache_mut(&room_id);
if room_cache.is_some() {
let rc = room_cache.unwrap();
rc.count -= 1;
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
} else if room_cache.is_none() && need_add_cache {
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = now_count as u8;
self.room_cache.push(rc);
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
info!(
"玩家离开房间匹配房间,满足条件,将放进重新放入匹配队列,room_id:{}",
room_id
);
}
Ok(room_id)
}
///删除房间
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
self.remove_room_cache(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
impl MatchRoom {
pub fn get_room_cache_mut(&mut self, room_id: &u32) -> Option<&mut RoomCache> {
let res = self.room_cache.iter_mut().find(|x| x.room_id == *room_id);
res
}
///删除缓存房间
pub fn remove_room_cache(&mut self, room_id: &u32) {
let mut index = -1_isize;
for i in self.room_cache.iter() {
index += 1;
if i.room_id!= *room_id {
continue;
}
break;
}
if index < 0 {
return;
}
self.room_cache.remove(index as usize);
}
///快速加入
pub fn quickly_start(
&mut self,
member: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let room_id: u32;
let user_id = member.user_id;
//如果房间缓存里没有,则创建新房间
if self.room_cache.is_empty() {
//校验地图配置
let room_tmp_ref: &TileMapTempMgr = TEMPLATES.get_tile_map_temp_mgr_ref();
if room_tmp_ref.is_empty() {
anyhow::bail!("TileMapTempMgr is None")
}
//创建房间
room_id = self.create_room(BattleType::OneVOneVOneVOne, member, sender, task_sender)?;
info!("创建匹配房间,room_id:{},user_id:{}", room_id, user_id);
} else {
//如果有,则往房间里塞
room_id = self.get_room_cache_last_room_id()?;
//将成员加进房间
let room_mut = self.get_mut_room_by_room_id(&room_id)?;
if room_mut.get_member_count() >= MEMBER_MAX as usize {
anyhow::bail!("room is None,room_id:{}", room_id)
}
//将成员加入到房间中
room_mut.add_member(member)?;
//解决房间队列缓存
let room_cache_array: &mut Vec<RoomCache> = self.room_cache.as_mut();
let room_cache = room_cache_array.last_mut().unwrap();
//cache人数加1
room_cache.count += 1;
//如果人满里,则从缓存房间列表中弹出
if room_cache.count >= MEMBER_MAX {
room_cache_array.pop();
info!("匹配房人满,将房间从匹配队列移除!room_id:{}", room_id);
//创建延迟任务,并发送给定时器接收方执行
let mut task = Task::default();
let time_limit = TEMPLATES
.get_constant_temp_mgr_ref()
.temps
.get("kick_not_prepare_time");
if let Some(time) = time_limit {
let time = u64::from_str(time.value.as_str())?;
task.delay = time + 500;
} else {
task.delay = 60000_u64;
warn!("the Constant kick_not_prepare_time is None!pls check!");
}
task.cmd = TaskCmd::MatchRoomStart as u16;
let mut map = Map::new();
map.insert(
"battle_type".to_owned(),
Value::from(self.battle_type.into_u8()),
);
map.insert("room_id".to_owned(), Value::from(room_id));
task.data = Value::from(map);
let res = task_sender.send(task);
if let Err(e) = res {
error!("{:?}", e);
}
}
//重新排序
room_cache_array.par_sort_by(|a, b| b.count.cmp(&a.count));
}
Ok(room_id)
}
fn get_room_cache_last_room_id(&self) -> anyhow::Result<u32> {
let room_cache = self.room_cache.last();
if room_cache.is_none() {
let str = "room_cache is empty!".to_owned();
error!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let room_id = room_cache.unwrap().room_id;
Ok(room_id)
}
} | } | random_line_split |
room_model.rs | use crate::room::member::Member;
use crate::room::room::{MemberLeaveNoticeType, RoomState};
use crate::room::room::{Room, MEMBER_MAX};
use crate::task_timer::{Task, TaskCmd};
use crate::TEMPLATES;
use log::{error, info, warn};
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use protobuf::Message;
use rayon::slice::ParallelSliceMut;
use serde_json::{Map, Value};
use std::borrow::BorrowMut;
use std::collections::hash_map::RandomState;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::str::FromStr;
use tools::cmd_code::ClientCode;
use tools::protos::base::RoomSettingPt;
use tools::protos::room::S_LEAVE_ROOM;
use tools::tcp::TcpSender;
use tools::templates::template::TemplateMgrTrait;
use tools::templates::tile_map_temp::TileMapTempMgr;
///teamID枚举
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum TeamId {
Min = 1, //最小teamid
Max = 4, //最大teamid
}
///房间类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum RoomType {
None = 0, //无效
Custom = 1, //自定义房间
Match = 2, //匹配房间
SeasonPve = 3, //赛季PVE房间
WorldBossPve = 4, //世界boss房间
}
impl RoomType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///战斗模式类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum BattleType {
None = 0, //无效初始值
OneVOneVOneVOne = 1, //1v1v1v1
TwoVTwo = 2, //2v2
OneVOne = 3, //1v1
}
impl Default for BattleType {
fn default() -> Self {
BattleType::OneVOneVOneVOne
}
}
impl BattleType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///房间设置
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomSetting {
pub battle_type: BattleType, //战斗类型
pub turn_limit_time: u32, //回合限制时间
pub season_id: u32, //赛季id
pub is_open_ai: bool, //是否开启ai
pub victory_condition: u32, //胜利条件
}
impl From<RoomSettingPt> for RoomSetting {
fn from(rs_pt: RoomSettingPt) -> Self {
let battle_type = BattleType::try_from(rs_pt.battle_type as u8).unwrap();
let is_open_ai = rs_pt.is_open_ai;
let victory_condition = rs_pt.victory_condition;
let turn_limit_time = rs_pt.turn_limit_time;
let season_id = rs_pt.season_id;
let rs = RoomSetting {
battle_type,
turn_limit_time,
season_id,
is_open_ai,
victory_condition,
};
rs
}
}
impl From<RoomSetting> for RoomSettingPt {
fn from(r: RoomSetting) -> Self {
let mut rsp = RoomSettingPt::new();
rsp.set_victory_condition(r.victory_condition);
rsp.set_battle_type(r.battle_type as u32);
rsp.set_season_id(r.season_id);
rsp.set_turn_limit_time(r.turn_limit_time);
rsp.set_is_open_ai(r.is_open_ai);
rsp
}
}
///房间缓存
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomCache {
room_id: u32,
count: u8,
}
pub trait RoomModel {
fn get_room_type(&self) -> RoomType;
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room>;
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32>;
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32>;
fn rm_room(&mut self, room_id: &u32);
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room>;
///根据房间id获得房间的可变指针
fn get_mut_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room | leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id);
}
let room_cache = self.get_room_cache_mut(&room_id);
if room_cache.is_some() {
let rc = room_cache.unwrap();
rc.count -= 1;
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
} else if room_cache.is_none() && need_add_cache {
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = now_count as u8;
self.room_cache.push(rc);
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
info!(
"玩家离开房间匹配房间,满足条件,将放进重新放入匹配队列,room_id:{}",
room_id
);
}
Ok(room_id)
}
///删除房间
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
self.remove_room_cache(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
impl MatchRoom {
pub fn get_room_cache_mut(&mut self, room_id: &u32) -> Option<&mut RoomCache> {
let res = self.room_cache.iter_mut().find(|x| x.room_id == *room_id);
res
}
///删除缓存房间
pub fn remove_room_cache(&mut self, room_id: &u32) {
let mut index = -1_isize;
for i in self.room_cache.iter() {
index += 1;
if i.room_id!= *room_id {
continue;
}
break;
}
if index < 0 {
return;
}
self.room_cache.remove(index as usize);
}
///快速加入
pub fn quickly_start(
&mut self,
member: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let room_id: u32;
let user_id = member.user_id;
//如果房间缓存里没有,则创建新房间
if self.room_cache.is_empty() {
//校验地图配置
let room_tmp_ref: &TileMapTempMgr = TEMPLATES.get_tile_map_temp_mgr_ref();
if room_tmp_ref.is_empty() {
anyhow::bail!("TileMapTempMgr is None")
}
//创建房间
room_id = self.create_room(BattleType::OneVOneVOneVOne, member, sender, task_sender)?;
info!("创建匹配房间,room_id:{},user_id:{}", room_id, user_id);
} else {
//如果有,则往房间里塞
room_id = self.get_room_cache_last_room_id()?;
//将成员加进房间
let room_mut = self.get_mut_room_by_room_id(&room_id)?;
if room_mut.get_member_count() >= MEMBER_MAX as usize {
anyhow::bail!("room is None,room_id:{}", room_id)
}
//将成员加入到房间中
room_mut.add_member(member)?;
//解决房间队列缓存
let room_cache_array: &mut Vec<RoomCache> = self.room_cache.as_mut();
let room_cache = room_cache_array.last_mut().unwrap();
//cache人数加1
room_cache.count += 1;
//如果人满里,则从缓存房间列表中弹出
if room_cache.count >= MEMBER_MAX {
room_cache_array.pop();
info!("匹配房人满,将房间从匹配队列移除!room_id:{}", room_id);
//创建延迟任务,并发送给定时器接收方执行
let mut task = Task::default();
let time_limit = TEMPLATES
.get_constant_temp_mgr_ref()
.temps
.get("kick_not_prepare_time");
if let Some(time) = time_limit {
let time = u64::from_str(time.value.as_str())?;
task.delay = time + 500;
} else {
task.delay = 60000_u64;
warn!("the Constant kick_not_prepare_time is None!pls check!");
}
task.cmd = TaskCmd::MatchRoomStart as u16;
let mut map = Map::new();
map.insert(
"battle_type".to_owned(),
Value::from(self.battle_type.into_u8()),
);
map.insert("room_id".to_owned(), Value::from(room_id));
task.data = Value::from(map);
let res = task_sender.send(task);
if let Err(e) = res {
error!("{:?}", e);
}
}
//重新排序
room_cache_array.par_sort_by(|a, b| b.count.cmp(&a.count));
}
Ok(room_id)
}
fn get_room_cache_last_room_id(&self) -> anyhow::Result<u32> {
let room_cache = self.room_cache.last();
if room_cache.is_none() {
let str = "room_cache is empty!".to_owned();
error!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let room_id = room_cache.unwrap().room_id;
Ok(room_id)
}
}
| _id);
}
}
///离开房间,离线也好,主动离开也好
pub fn | conditional_block |
main.rs | #[macro_use]
extern crate derive_new;
use std::fmt::{self, Debug, Display};
use serde::{Deserialize, Serialize};
const PROOF: &str = "0";
#[derive(Serialize, Deserialize, Debug, Clone, new)]
struct Block {
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: Option<u128>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Wallet {
pub_key: String,
priv_key: String,
}
impl Wallet {
fn get(&self) -> String {
self.pub_key.clone()
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct BlockChain {
blocks: Vec<Block>,
pending_transactions: Vec<Transaction>,
}
impl Block {
fn test_block(&self) -> String {
calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Transaction {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
}
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test!= prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key |
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() {
// panic!("oh no");
// }
// use rsa::{PaddingScheme, PublicKey, RsaPrivateKey};
// let mut rng = rand::rngs::OsRng;
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let privet_key =
// rsa::RsaPrivateKey::new(&mut rng, 333).expect("Oh nie nie da sie privata stworzyc");
// let public = rsa::RsaPublicKey::from(&privet_key);
// let enc_data = public
// .encrypt(&mut rng, padding, b"s")
// .expect("can't encrypt data");
// println!("{:?}", enc_data);
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let decode = privet_key.decrypt(padding, &enc_data).unwrap();
// println!("{}", String::from_utf8_lossy(&decode));
}
fn create_pending(blockchin: &mut BlockChain, proof: &str) {
let mut tran: Vec<Transaction> = Vec::new();
for x in blockchin.clone().get_pendding_transactions() {
tran.push(x.clone());
if tran.len() == 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
} else if blockchin.clone().get_pendding_transactions().len() < 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
}
}
blockchin.clear_pendding_transactions();
}
fn generate_wallet() -> Wallet {
let key = openssl::rsa::Rsa::generate(1024).expect("Failed to generate key"); //2048
let priv_key = key.private_key_to_pem().unwrap();
let pub_key = key.public_key_to_pem().unwrap();
let priv_key = hex::encode(priv_key);
let pub_key = hex::encode(pub_key);
Wallet { pub_key, priv_key }
}
| {
let amount = y.amount as u128;
balance += amount;
} | conditional_block |
main.rs | #[macro_use]
extern crate derive_new;
use std::fmt::{self, Debug, Display};
use serde::{Deserialize, Serialize};
const PROOF: &str = "0";
#[derive(Serialize, Deserialize, Debug, Clone, new)]
struct Block {
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: Option<u128>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Wallet {
pub_key: String,
priv_key: String,
}
impl Wallet {
fn get(&self) -> String {
self.pub_key.clone()
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct BlockChain {
blocks: Vec<Block>,
pending_transactions: Vec<Transaction>,
}
impl Block {
fn test_block(&self) -> String {
calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct | {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
}
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test!= prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() {
// panic!("oh no");
// }
// use rsa::{PaddingScheme, PublicKey, RsaPrivateKey};
// let mut rng = rand::rngs::OsRng;
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let privet_key =
// rsa::RsaPrivateKey::new(&mut rng, 333).expect("Oh nie nie da sie privata stworzyc");
// let public = rsa::RsaPublicKey::from(&privet_key);
// let enc_data = public
// .encrypt(&mut rng, padding, b"s")
// .expect("can't encrypt data");
// println!("{:?}", enc_data);
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let decode = privet_key.decrypt(padding, &enc_data).unwrap();
// println!("{}", String::from_utf8_lossy(&decode));
}
fn create_pending(blockchin: &mut BlockChain, proof: &str) {
let mut tran: Vec<Transaction> = Vec::new();
for x in blockchin.clone().get_pendding_transactions() {
tran.push(x.clone());
if tran.len() == 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
} else if blockchin.clone().get_pendding_transactions().len() < 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
}
}
blockchin.clear_pendding_transactions();
}
fn generate_wallet() -> Wallet {
let key = openssl::rsa::Rsa::generate(1024).expect("Failed to generate key"); //2048
let priv_key = key.private_key_to_pem().unwrap();
let pub_key = key.public_key_to_pem().unwrap();
let priv_key = hex::encode(priv_key);
let pub_key = hex::encode(pub_key);
Wallet { pub_key, priv_key }
}
| Transaction | identifier_name |
main.rs | #[macro_use]
extern crate derive_new;
use std::fmt::{self, Debug, Display};
use serde::{Deserialize, Serialize};
const PROOF: &str = "0";
#[derive(Serialize, Deserialize, Debug, Clone, new)]
struct Block {
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: Option<u128>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Wallet {
pub_key: String,
priv_key: String,
}
impl Wallet {
fn get(&self) -> String {
self.pub_key.clone()
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct BlockChain {
blocks: Vec<Block>,
pending_transactions: Vec<Transaction>,
}
impl Block {
fn test_block(&self) -> String {
calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Transaction {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
}
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test!= prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block { | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() {
// panic!("oh no");
// }
// use rsa::{PaddingScheme, PublicKey, RsaPrivateKey};
// let mut rng = rand::rngs::OsRng;
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let privet_key =
// rsa::RsaPrivateKey::new(&mut rng, 333).expect("Oh nie nie da sie privata stworzyc");
// let public = rsa::RsaPublicKey::from(&privet_key);
// let enc_data = public
// .encrypt(&mut rng, padding, b"s")
// .expect("can't encrypt data");
// println!("{:?}", enc_data);
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let decode = privet_key.decrypt(padding, &enc_data).unwrap();
// println!("{}", String::from_utf8_lossy(&decode));
}
fn create_pending(blockchin: &mut BlockChain, proof: &str) {
let mut tran: Vec<Transaction> = Vec::new();
for x in blockchin.clone().get_pendding_transactions() {
tran.push(x.clone());
if tran.len() == 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
} else if blockchin.clone().get_pendding_transactions().len() < 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
}
}
blockchin.clear_pendding_transactions();
}
fn generate_wallet() -> Wallet {
let key = openssl::rsa::Rsa::generate(1024).expect("Failed to generate key"); //2048
let priv_key = key.private_key_to_pem().unwrap();
let pub_key = key.public_key_to_pem().unwrap();
let priv_key = hex::encode(priv_key);
let pub_key = hex::encode(pub_key);
Wallet { pub_key, priv_key }
} | random_line_split |
|
main.rs | #[macro_use]
extern crate derive_new;
use std::fmt::{self, Debug, Display};
use serde::{Deserialize, Serialize};
const PROOF: &str = "0";
#[derive(Serialize, Deserialize, Debug, Clone, new)]
struct Block {
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: Option<u128>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Wallet {
pub_key: String,
priv_key: String,
}
impl Wallet {
fn get(&self) -> String {
self.pub_key.clone()
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct BlockChain {
blocks: Vec<Block>,
pending_transactions: Vec<Transaction>,
}
impl Block {
fn test_block(&self) -> String {
calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Transaction {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test!= prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() {
// panic!("oh no");
// }
// use rsa::{PaddingScheme, PublicKey, RsaPrivateKey};
// let mut rng = rand::rngs::OsRng;
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let privet_key =
// rsa::RsaPrivateKey::new(&mut rng, 333).expect("Oh nie nie da sie privata stworzyc");
// let public = rsa::RsaPublicKey::from(&privet_key);
// let enc_data = public
// .encrypt(&mut rng, padding, b"s")
// .expect("can't encrypt data");
// println!("{:?}", enc_data);
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let decode = privet_key.decrypt(padding, &enc_data).unwrap();
// println!("{}", String::from_utf8_lossy(&decode));
}
fn create_pending(blockchin: &mut BlockChain, proof: &str) {
let mut tran: Vec<Transaction> = Vec::new();
for x in blockchin.clone().get_pendding_transactions() {
tran.push(x.clone());
if tran.len() == 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
} else if blockchin.clone().get_pendding_transactions().len() < 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
}
}
blockchin.clear_pendding_transactions();
}
fn generate_wallet() -> Wallet {
let key = openssl::rsa::Rsa::generate(1024).expect("Failed to generate key"); //2048
let priv_key = key.private_key_to_pem().unwrap();
let pub_key = key.public_key_to_pem().unwrap();
let priv_key = hex::encode(priv_key);
let pub_key = hex::encode(pub_key);
Wallet { pub_key, priv_key }
}
| {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
} | identifier_body |
lib.rs | //! A fast, extensible, command-line arguments parser.
//!
//! This library is very new, so expect regular breaking changes. If you find a
//! bug or lacking documentation, don't hesitate to open an
//! [issue](https://github.com/Aloso/parkour/issues) or a pull request.
//!
//! This crate started as an experiment, so I'm not sure yet if I want to
//! maintain it long-term. See [here](https://github.com/Aloso/parkour/issues/1)
//! for more.
//!
//! ## Getting started
//!
//! Parkour requires const generics. The first rust version that supports them
//! is Rust 1.51 (`rustc 1.51.0-beta.2`). You can install it with `rustup
//! default beta`.
//!
//! It's recommended to import the [prelude](./prelude/index.html):
//!
//! ```
//! use parkour::prelude::*;
//! ```
//!
//! First, create a struct containing all the data you want to parse. For
//! example:
//!
//! ```
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! struct Show {
//! pos1: String,
//! out: ColorSpace,
//! size: u8,
//! }
//!
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! `bool`, `u8` and `String` can all be parsed by default. To parse
//! `ColorSpace`, we have to implement the [`FromInputValue`] trait. This
//! easiest by using the derive macro:
//!
//! ```
//! # use parkour::prelude::*;
//! #[derive(FromInputValue)]
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! This parses the names of the enum variants case-insensitively. When an
//! invalid value is provided, the error message will say something like:
//!
//! ```text
//! unexpected value, got `foo`, expected rgb, cmy, cmyk, hsv, hsl or cielab
//! ```
//!
//! Now let's implement `Show` as a subcommand. Unfortunately, there's no
//! convenient derive macro (yet):
//!
//! ```
//! # use parkour::prelude::*;
//! # #[derive(FromInputValue)]
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! #
//! struct Show {
//! pos1: String,
//! color_space: ColorSpace,
//! size: u8,
//! }
//!
//! impl FromInput<'static> for Show {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! if input.parse_command("show") {
//! let mut pos1 = None;
//! let mut color_space = None;
//! let mut size = None;
//!
//! while!input.is_empty() {
//! if SetOnce(&mut color_space)
//! .apply(input, &Flag::LongShort("color-space", "c").into())? {
//! continue;
//! }
//!
//! if SetOnce(&mut size)
//! .apply(input, &Flag::LongShort("size", "s").into())? {
//! continue;
//! }
//!
//! if pos1.is_none()
//! && SetPositional(&mut pos1).apply(input, &"pos1".into())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//!
//! Ok(Show {
//! pos1: pos1.ok_or_else(|| parkour::Error::missing_argument("pos1"))?,
//! color_space: color_space
//! .ok_or_else(|| parkour::Error::missing_argument("--color-space"))?,
//! size: size.unwrap_or(4),
//! })
//! } else {
//! Err(parkour::Error::no_value())
//! }
//! }
//! }
//! ```
//!
//! To parse a subcommand, we implement the [`FromInput`] trait. We first check
//! if the next argument is the word `show`. If that's the case, we iterate over
//! the remaining input, until it is empty.
//!
//! In the subcommand, we expect two named arguments (`--color-space` and
//! `--size`) and a positional argument (`pos`). Therefore, in each iteration,
//! we first check if we can parse the named arguments, and then the positional
//! argument. If none of them succeeds and there is still input left, then
//! `input.expect_empty()?` throws an error.
//!
//! Producing the `Show` struct is rather straightforward (`pos` and
//! `--color-space` are required, `--size` defaults to `4`). However, parsing
//! the values involves some type system magic. `SetOnce` and `SetPositional`
//! are [actions], they check if the referenced types can be parsed, and if so,
//! assign the parsed value to the variable automatically. They also ensure that
//! each argument is parsed at most once.
//!
//! Whenever something is parsed, a _context_ is provided that can contain
//! information about _how_ the value should be parsed. In the above example,
//! `Flag::LongShort("color-space", "c").into()` is a context that instructs the
//! parser to parse the color space after the `--color-space` or the `-c` flag.
//!
//! The main command can be implemented similarly:
//!
//! ```
//! # use parkour::prelude::*;
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! # struct Show {
//! # pos1: String, | //! # impl FromInput<'static> for Show {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # todo!()
//! # }
//! # }
//! #
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! // discard the first argument, which is the path to the executable
//! input.bump_argument().unwrap();
//!
//! let mut show = None;
//! let mut color = None;
//!
//! while!input.is_empty() {
//! if SetOnce(&mut color).apply(input, &Flag::LongShort("color", "c").into())? {
//! continue;
//! }
//!
//! if SetSubcommand(&mut show).apply(input, &())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! This is pretty self-explanatory. Now let's proceed to the main function:
//!
//! ```
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! # impl FromInput<'static> for Command {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # Ok(Command { color: None, show: None })
//! # }
//! # }
//! #
//! use std::error::Error;
//!
//! fn main() {
//! match Command::from_input(&mut parkour::parser(), &()) {
//! Ok(command) => {
//! println!("parsed successfully");
//! }
//! Err(e) if e.is_early_exit() => {}
//! Err(e) => {
//! eprint!("{}", e);
//! let mut source = e.source();
//! while let Some(s) = source {
//! eprint!(": {}", s);
//! source = s.source();
//! }
//! eprintln!();
//! }
//! }
//! }
//! ```
//!
//! The [`parser`] function creates a new parser instance, which
//! implements [`Parse`]. This is used to parse the `Command`. If it fails, we
//! print the error with its sources. I will implement a more convenient method
//! for this, I just haven't gotten around to it yet. I also plan to implement
//! ANSI color support.
//!
//! What's with the `e.is_early_exit()`, you might wonder? This error is
//! returned when parsing was aborted and can be ignored. This error can be used
//! e.g. when the `--help` flag is encountered:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while!input.is_empty() {
//! if input.parse_long_flag("help") || input.parse_short_flag("h") {
//! println!("Usage:\n\
//! my-program [-h,--help]\n\
//! my-program show POS1 -c,--color-space VALUE [-s,--size N]");
//!
//! return Err(parkour::Error::early_exit());
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! There is one special case that isn't handled yet: The argument `--` usually
//! causes the remaining tokens to be treated as positional arguments, even if
//! they start with a dash. This is easily implemented:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while!input.is_empty() {
//! if input.parse_long_flag("") {
//! input.set_ignore_dashes(true);
//! continue;
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! Unfortunately, this must be repeated in every subcommand.
#![forbid(unsafe_code)]
#![warn(missing_docs)]
pub use error::{Error, ErrorInner};
pub use from_input::{FromInput, FromInputValue};
pub use parse::Parse;
pub use palex::ArgsInput;
#[cfg(feature = "derive")]
pub use parkour_derive::{FromInput, FromInputValue};
pub mod actions;
mod error;
mod from_input;
pub mod help;
pub mod impls;
mod parse;
pub mod util;
/// A parkour result.
pub type Result<T> = std::result::Result<T, Error>;
/// Create a new parser, which can be used to parse the
/// command-line arguments of the program.
pub fn parser() -> ArgsInput {
ArgsInput::from_args()
}
/// A prelude to make it easier to import all the needed types and traits. Use
/// it like this:
///
/// ```
/// use parkour::prelude::*;
/// ```
pub mod prelude {
pub use crate::actions::{
Action, Append, Dec, Inc, Reset, Set, SetOnce, SetPositional, SetSubcommand,
Unset,
};
pub use crate::impls::{ListCtx, NumberCtx, StringCtx};
pub use crate::util::{ArgCtx, Flag, PosCtx};
pub use crate::{ArgsInput, FromInput, FromInputValue, Parse};
} | //! # color_space: ColorSpace,
//! # size: u8,
//! # } | random_line_split |
lib.rs | //! A fast, extensible, command-line arguments parser.
//!
//! This library is very new, so expect regular breaking changes. If you find a
//! bug or lacking documentation, don't hesitate to open an
//! [issue](https://github.com/Aloso/parkour/issues) or a pull request.
//!
//! This crate started as an experiment, so I'm not sure yet if I want to
//! maintain it long-term. See [here](https://github.com/Aloso/parkour/issues/1)
//! for more.
//!
//! ## Getting started
//!
//! Parkour requires const generics. The first rust version that supports them
//! is Rust 1.51 (`rustc 1.51.0-beta.2`). You can install it with `rustup
//! default beta`.
//!
//! It's recommended to import the [prelude](./prelude/index.html):
//!
//! ```
//! use parkour::prelude::*;
//! ```
//!
//! First, create a struct containing all the data you want to parse. For
//! example:
//!
//! ```
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! struct Show {
//! pos1: String,
//! out: ColorSpace,
//! size: u8,
//! }
//!
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! `bool`, `u8` and `String` can all be parsed by default. To parse
//! `ColorSpace`, we have to implement the [`FromInputValue`] trait. This
//! easiest by using the derive macro:
//!
//! ```
//! # use parkour::prelude::*;
//! #[derive(FromInputValue)]
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! This parses the names of the enum variants case-insensitively. When an
//! invalid value is provided, the error message will say something like:
//!
//! ```text
//! unexpected value, got `foo`, expected rgb, cmy, cmyk, hsv, hsl or cielab
//! ```
//!
//! Now let's implement `Show` as a subcommand. Unfortunately, there's no
//! convenient derive macro (yet):
//!
//! ```
//! # use parkour::prelude::*;
//! # #[derive(FromInputValue)]
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! #
//! struct Show {
//! pos1: String,
//! color_space: ColorSpace,
//! size: u8,
//! }
//!
//! impl FromInput<'static> for Show {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! if input.parse_command("show") {
//! let mut pos1 = None;
//! let mut color_space = None;
//! let mut size = None;
//!
//! while!input.is_empty() {
//! if SetOnce(&mut color_space)
//! .apply(input, &Flag::LongShort("color-space", "c").into())? {
//! continue;
//! }
//!
//! if SetOnce(&mut size)
//! .apply(input, &Flag::LongShort("size", "s").into())? {
//! continue;
//! }
//!
//! if pos1.is_none()
//! && SetPositional(&mut pos1).apply(input, &"pos1".into())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//!
//! Ok(Show {
//! pos1: pos1.ok_or_else(|| parkour::Error::missing_argument("pos1"))?,
//! color_space: color_space
//! .ok_or_else(|| parkour::Error::missing_argument("--color-space"))?,
//! size: size.unwrap_or(4),
//! })
//! } else {
//! Err(parkour::Error::no_value())
//! }
//! }
//! }
//! ```
//!
//! To parse a subcommand, we implement the [`FromInput`] trait. We first check
//! if the next argument is the word `show`. If that's the case, we iterate over
//! the remaining input, until it is empty.
//!
//! In the subcommand, we expect two named arguments (`--color-space` and
//! `--size`) and a positional argument (`pos`). Therefore, in each iteration,
//! we first check if we can parse the named arguments, and then the positional
//! argument. If none of them succeeds and there is still input left, then
//! `input.expect_empty()?` throws an error.
//!
//! Producing the `Show` struct is rather straightforward (`pos` and
//! `--color-space` are required, `--size` defaults to `4`). However, parsing
//! the values involves some type system magic. `SetOnce` and `SetPositional`
//! are [actions], they check if the referenced types can be parsed, and if so,
//! assign the parsed value to the variable automatically. They also ensure that
//! each argument is parsed at most once.
//!
//! Whenever something is parsed, a _context_ is provided that can contain
//! information about _how_ the value should be parsed. In the above example,
//! `Flag::LongShort("color-space", "c").into()` is a context that instructs the
//! parser to parse the color space after the `--color-space` or the `-c` flag.
//!
//! The main command can be implemented similarly:
//!
//! ```
//! # use parkour::prelude::*;
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! # struct Show {
//! # pos1: String,
//! # color_space: ColorSpace,
//! # size: u8,
//! # }
//! # impl FromInput<'static> for Show {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # todo!()
//! # }
//! # }
//! #
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! // discard the first argument, which is the path to the executable
//! input.bump_argument().unwrap();
//!
//! let mut show = None;
//! let mut color = None;
//!
//! while!input.is_empty() {
//! if SetOnce(&mut color).apply(input, &Flag::LongShort("color", "c").into())? {
//! continue;
//! }
//!
//! if SetSubcommand(&mut show).apply(input, &())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! This is pretty self-explanatory. Now let's proceed to the main function:
//!
//! ```
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! # impl FromInput<'static> for Command {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # Ok(Command { color: None, show: None })
//! # }
//! # }
//! #
//! use std::error::Error;
//!
//! fn main() {
//! match Command::from_input(&mut parkour::parser(), &()) {
//! Ok(command) => {
//! println!("parsed successfully");
//! }
//! Err(e) if e.is_early_exit() => {}
//! Err(e) => {
//! eprint!("{}", e);
//! let mut source = e.source();
//! while let Some(s) = source {
//! eprint!(": {}", s);
//! source = s.source();
//! }
//! eprintln!();
//! }
//! }
//! }
//! ```
//!
//! The [`parser`] function creates a new parser instance, which
//! implements [`Parse`]. This is used to parse the `Command`. If it fails, we
//! print the error with its sources. I will implement a more convenient method
//! for this, I just haven't gotten around to it yet. I also plan to implement
//! ANSI color support.
//!
//! What's with the `e.is_early_exit()`, you might wonder? This error is
//! returned when parsing was aborted and can be ignored. This error can be used
//! e.g. when the `--help` flag is encountered:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while!input.is_empty() {
//! if input.parse_long_flag("help") || input.parse_short_flag("h") {
//! println!("Usage:\n\
//! my-program [-h,--help]\n\
//! my-program show POS1 -c,--color-space VALUE [-s,--size N]");
//!
//! return Err(parkour::Error::early_exit());
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! There is one special case that isn't handled yet: The argument `--` usually
//! causes the remaining tokens to be treated as positional arguments, even if
//! they start with a dash. This is easily implemented:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while!input.is_empty() {
//! if input.parse_long_flag("") {
//! input.set_ignore_dashes(true);
//! continue;
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! Unfortunately, this must be repeated in every subcommand.
#![forbid(unsafe_code)]
#![warn(missing_docs)]
pub use error::{Error, ErrorInner};
pub use from_input::{FromInput, FromInputValue};
pub use parse::Parse;
pub use palex::ArgsInput;
#[cfg(feature = "derive")]
pub use parkour_derive::{FromInput, FromInputValue};
pub mod actions;
mod error;
mod from_input;
pub mod help;
pub mod impls;
mod parse;
pub mod util;
/// A parkour result.
pub type Result<T> = std::result::Result<T, Error>;
/// Create a new parser, which can be used to parse the
/// command-line arguments of the program.
pub fn | () -> ArgsInput {
ArgsInput::from_args()
}
/// A prelude to make it easier to import all the needed types and traits. Use
/// it like this:
///
/// ```
/// use parkour::prelude::*;
/// ```
pub mod prelude {
pub use crate::actions::{
Action, Append, Dec, Inc, Reset, Set, SetOnce, SetPositional, SetSubcommand,
Unset,
};
pub use crate::impls::{ListCtx, NumberCtx, StringCtx};
pub use crate::util::{ArgCtx, Flag, PosCtx};
pub use crate::{ArgsInput, FromInput, FromInputValue, Parse};
}
| parser | identifier_name |
lib.rs | //! A fast, extensible, command-line arguments parser.
//!
//! This library is very new, so expect regular breaking changes. If you find a
//! bug or lacking documentation, don't hesitate to open an
//! [issue](https://github.com/Aloso/parkour/issues) or a pull request.
//!
//! This crate started as an experiment, so I'm not sure yet if I want to
//! maintain it long-term. See [here](https://github.com/Aloso/parkour/issues/1)
//! for more.
//!
//! ## Getting started
//!
//! Parkour requires const generics. The first rust version that supports them
//! is Rust 1.51 (`rustc 1.51.0-beta.2`). You can install it with `rustup
//! default beta`.
//!
//! It's recommended to import the [prelude](./prelude/index.html):
//!
//! ```
//! use parkour::prelude::*;
//! ```
//!
//! First, create a struct containing all the data you want to parse. For
//! example:
//!
//! ```
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! struct Show {
//! pos1: String,
//! out: ColorSpace,
//! size: u8,
//! }
//!
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! `bool`, `u8` and `String` can all be parsed by default. To parse
//! `ColorSpace`, we have to implement the [`FromInputValue`] trait. This
//! easiest by using the derive macro:
//!
//! ```
//! # use parkour::prelude::*;
//! #[derive(FromInputValue)]
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! This parses the names of the enum variants case-insensitively. When an
//! invalid value is provided, the error message will say something like:
//!
//! ```text
//! unexpected value, got `foo`, expected rgb, cmy, cmyk, hsv, hsl or cielab
//! ```
//!
//! Now let's implement `Show` as a subcommand. Unfortunately, there's no
//! convenient derive macro (yet):
//!
//! ```
//! # use parkour::prelude::*;
//! # #[derive(FromInputValue)]
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! #
//! struct Show {
//! pos1: String,
//! color_space: ColorSpace,
//! size: u8,
//! }
//!
//! impl FromInput<'static> for Show {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! if input.parse_command("show") {
//! let mut pos1 = None;
//! let mut color_space = None;
//! let mut size = None;
//!
//! while!input.is_empty() {
//! if SetOnce(&mut color_space)
//! .apply(input, &Flag::LongShort("color-space", "c").into())? {
//! continue;
//! }
//!
//! if SetOnce(&mut size)
//! .apply(input, &Flag::LongShort("size", "s").into())? {
//! continue;
//! }
//!
//! if pos1.is_none()
//! && SetPositional(&mut pos1).apply(input, &"pos1".into())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//!
//! Ok(Show {
//! pos1: pos1.ok_or_else(|| parkour::Error::missing_argument("pos1"))?,
//! color_space: color_space
//! .ok_or_else(|| parkour::Error::missing_argument("--color-space"))?,
//! size: size.unwrap_or(4),
//! })
//! } else {
//! Err(parkour::Error::no_value())
//! }
//! }
//! }
//! ```
//!
//! To parse a subcommand, we implement the [`FromInput`] trait. We first check
//! if the next argument is the word `show`. If that's the case, we iterate over
//! the remaining input, until it is empty.
//!
//! In the subcommand, we expect two named arguments (`--color-space` and
//! `--size`) and a positional argument (`pos`). Therefore, in each iteration,
//! we first check if we can parse the named arguments, and then the positional
//! argument. If none of them succeeds and there is still input left, then
//! `input.expect_empty()?` throws an error.
//!
//! Producing the `Show` struct is rather straightforward (`pos` and
//! `--color-space` are required, `--size` defaults to `4`). However, parsing
//! the values involves some type system magic. `SetOnce` and `SetPositional`
//! are [actions], they check if the referenced types can be parsed, and if so,
//! assign the parsed value to the variable automatically. They also ensure that
//! each argument is parsed at most once.
//!
//! Whenever something is parsed, a _context_ is provided that can contain
//! information about _how_ the value should be parsed. In the above example,
//! `Flag::LongShort("color-space", "c").into()` is a context that instructs the
//! parser to parse the color space after the `--color-space` or the `-c` flag.
//!
//! The main command can be implemented similarly:
//!
//! ```
//! # use parkour::prelude::*;
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! # struct Show {
//! # pos1: String,
//! # color_space: ColorSpace,
//! # size: u8,
//! # }
//! # impl FromInput<'static> for Show {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # todo!()
//! # }
//! # }
//! #
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! // discard the first argument, which is the path to the executable
//! input.bump_argument().unwrap();
//!
//! let mut show = None;
//! let mut color = None;
//!
//! while!input.is_empty() {
//! if SetOnce(&mut color).apply(input, &Flag::LongShort("color", "c").into())? {
//! continue;
//! }
//!
//! if SetSubcommand(&mut show).apply(input, &())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! This is pretty self-explanatory. Now let's proceed to the main function:
//!
//! ```
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! # impl FromInput<'static> for Command {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # Ok(Command { color: None, show: None })
//! # }
//! # }
//! #
//! use std::error::Error;
//!
//! fn main() {
//! match Command::from_input(&mut parkour::parser(), &()) {
//! Ok(command) => {
//! println!("parsed successfully");
//! }
//! Err(e) if e.is_early_exit() => {}
//! Err(e) => {
//! eprint!("{}", e);
//! let mut source = e.source();
//! while let Some(s) = source {
//! eprint!(": {}", s);
//! source = s.source();
//! }
//! eprintln!();
//! }
//! }
//! }
//! ```
//!
//! The [`parser`] function creates a new parser instance, which
//! implements [`Parse`]. This is used to parse the `Command`. If it fails, we
//! print the error with its sources. I will implement a more convenient method
//! for this, I just haven't gotten around to it yet. I also plan to implement
//! ANSI color support.
//!
//! What's with the `e.is_early_exit()`, you might wonder? This error is
//! returned when parsing was aborted and can be ignored. This error can be used
//! e.g. when the `--help` flag is encountered:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while!input.is_empty() {
//! if input.parse_long_flag("help") || input.parse_short_flag("h") {
//! println!("Usage:\n\
//! my-program [-h,--help]\n\
//! my-program show POS1 -c,--color-space VALUE [-s,--size N]");
//!
//! return Err(parkour::Error::early_exit());
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! There is one special case that isn't handled yet: The argument `--` usually
//! causes the remaining tokens to be treated as positional arguments, even if
//! they start with a dash. This is easily implemented:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while!input.is_empty() {
//! if input.parse_long_flag("") {
//! input.set_ignore_dashes(true);
//! continue;
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! Unfortunately, this must be repeated in every subcommand.
#![forbid(unsafe_code)]
#![warn(missing_docs)]
pub use error::{Error, ErrorInner};
pub use from_input::{FromInput, FromInputValue};
pub use parse::Parse;
pub use palex::ArgsInput;
#[cfg(feature = "derive")]
pub use parkour_derive::{FromInput, FromInputValue};
pub mod actions;
mod error;
mod from_input;
pub mod help;
pub mod impls;
mod parse;
pub mod util;
/// A parkour result.
pub type Result<T> = std::result::Result<T, Error>;
/// Create a new parser, which can be used to parse the
/// command-line arguments of the program.
pub fn parser() -> ArgsInput |
/// A prelude to make it easier to import all the needed types and traits. Use
/// it like this:
///
/// ```
/// use parkour::prelude::*;
/// ```
pub mod prelude {
pub use crate::actions::{
Action, Append, Dec, Inc, Reset, Set, SetOnce, SetPositional, SetSubcommand,
Unset,
};
pub use crate::impls::{ListCtx, NumberCtx, StringCtx};
pub use crate::util::{ArgCtx, Flag, PosCtx};
pub use crate::{ArgsInput, FromInput, FromInputValue, Parse};
}
| {
ArgsInput::from_args()
} | identifier_body |
gossip.rs | use crate::clock::{Clock, HybridTimestamp};
use crate::event_emitter::EventEmitter;
use crate::proto::gossip::*;
use crate::proto::gossip_grpc::*;
use crate::proto::PeerState;
use crate::rpc_client::RpcClient;
use failure::{err_msg, format_err, Error};
use futures::prelude::*;
use futures::sync::{mpsc, oneshot};
use grpcio::{RpcContext, Service, UnarySink};
use log::*;
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use std::time::{Duration, Instant};
use tokio::timer::Interval;
#[derive(Clone)]
pub struct GossipServer {
state: GossipState,
sender: mpsc::Sender<GossipEvent>,
}
impl Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
}
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher,
event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(),
peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
}
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)|!self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn | (&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id!= 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id!= 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender, client.clone());
drop(locked_state);
let gossip_stream = Interval::new(Instant::now(), Duration::from_secs(5))
.map(|_| ClientEvent::GossipTick)
.map_err(|err| error!("Error in gossip tick: {:?}", err));
let close_stream = receiver
.into_stream()
.map(|_: ()| ClientEvent::Done)
.map_err(|_| ());
let (sender, receiver) = mpsc::channel(64);
let producer = gossip_stream
.select(close_stream)
.take_while(|item| Ok(*item!= ClientEvent::Done))
.for_each(move |event| sender.clone().send(event).map_err(|_| ()).map(|_| ()));
let consumer = ClientContext::new(state.new_ref(), client).run(receiver);
tokio::spawn(consumer);
tokio::spawn(producer);
}
impl ClientContext {
pub fn new(state: GossipStateRef, client: RpcClient) -> Self {
Self { state, client }
}
pub fn run(self, receiver: mpsc::Receiver<ClientEvent>) -> impl Future<Item = (), Error = ()> {
// TODO: should age out nodes that have been failing gossip for a while
// TODO: should have a separate heartbeat loop for tracking peer offsets
receiver.for_each(move |_event| {
let state = self.state.upgrade();
let current_gossip = state.get_current();
self.client
.gossip(¤t_gossip)
.map(move |gossip| {
if gossip.get_node_id()!= current_gossip.get_node_id() {
state.update_clock(gossip.get_updated_at().into());
}
state.merge_gossip(gossip)
})
.then(move |_| Ok(()))
})
}
}
| publish_peer_discovered | identifier_name |
gossip.rs | use crate::clock::{Clock, HybridTimestamp};
use crate::event_emitter::EventEmitter;
use crate::proto::gossip::*;
use crate::proto::gossip_grpc::*;
use crate::proto::PeerState;
use crate::rpc_client::RpcClient;
use failure::{err_msg, format_err, Error};
use futures::prelude::*;
use futures::sync::{mpsc, oneshot};
use grpcio::{RpcContext, Service, UnarySink};
use log::*;
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use std::time::{Duration, Instant};
use tokio::timer::Interval;
#[derive(Clone)]
pub struct GossipServer {
state: GossipState,
sender: mpsc::Sender<GossipEvent>,
}
impl Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self |
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher,
event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(),
peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
}
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)|!self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn publish_peer_discovered(&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id!= 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id!= 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender, client.clone());
drop(locked_state);
let gossip_stream = Interval::new(Instant::now(), Duration::from_secs(5))
.map(|_| ClientEvent::GossipTick)
.map_err(|err| error!("Error in gossip tick: {:?}", err));
let close_stream = receiver
.into_stream()
.map(|_: ()| ClientEvent::Done)
.map_err(|_| ());
let (sender, receiver) = mpsc::channel(64);
let producer = gossip_stream
.select(close_stream)
.take_while(|item| Ok(*item!= ClientEvent::Done))
.for_each(move |event| sender.clone().send(event).map_err(|_| ()).map(|_| ()));
let consumer = ClientContext::new(state.new_ref(), client).run(receiver);
tokio::spawn(consumer);
tokio::spawn(producer);
}
impl ClientContext {
pub fn new(state: GossipStateRef, client: RpcClient) -> Self {
Self { state, client }
}
pub fn run(self, receiver: mpsc::Receiver<ClientEvent>) -> impl Future<Item = (), Error = ()> {
// TODO: should age out nodes that have been failing gossip for a while
// TODO: should have a separate heartbeat loop for tracking peer offsets
receiver.for_each(move |_event| {
let state = self.state.upgrade();
let current_gossip = state.get_current();
self.client
.gossip(¤t_gossip)
.map(move |gossip| {
if gossip.get_node_id()!= current_gossip.get_node_id() {
state.update_clock(gossip.get_updated_at().into());
}
state.merge_gossip(gossip)
})
.then(move |_| Ok(()))
})
}
}
| {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
} | identifier_body |
gossip.rs | use crate::clock::{Clock, HybridTimestamp};
use crate::event_emitter::EventEmitter;
use crate::proto::gossip::*;
use crate::proto::gossip_grpc::*;
use crate::proto::PeerState;
use crate::rpc_client::RpcClient;
use failure::{err_msg, format_err, Error};
use futures::prelude::*;
use futures::sync::{mpsc, oneshot};
use grpcio::{RpcContext, Service, UnarySink};
use log::*;
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use std::time::{Duration, Instant};
use tokio::timer::Interval;
#[derive(Clone)]
pub struct GossipServer {
state: GossipState,
sender: mpsc::Sender<GossipEvent>,
}
impl Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
}
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher,
event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(),
peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() |
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)|!self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn publish_peer_discovered(&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id!= 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id!= 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender, client.clone());
drop(locked_state);
let gossip_stream = Interval::new(Instant::now(), Duration::from_secs(5))
.map(|_| ClientEvent::GossipTick)
.map_err(|err| error!("Error in gossip tick: {:?}", err));
let close_stream = receiver
.into_stream()
.map(|_: ()| ClientEvent::Done)
.map_err(|_| ());
let (sender, receiver) = mpsc::channel(64);
let producer = gossip_stream
.select(close_stream)
.take_while(|item| Ok(*item!= ClientEvent::Done))
.for_each(move |event| sender.clone().send(event).map_err(|_| ()).map(|_| ()));
let consumer = ClientContext::new(state.new_ref(), client).run(receiver);
tokio::spawn(consumer);
tokio::spawn(producer);
}
impl ClientContext {
pub fn new(state: GossipStateRef, client: RpcClient) -> Self {
Self { state, client }
}
pub fn run(self, receiver: mpsc::Receiver<ClientEvent>) -> impl Future<Item = (), Error = ()> {
// TODO: should age out nodes that have been failing gossip for a while
// TODO: should have a separate heartbeat loop for tracking peer offsets
receiver.for_each(move |_event| {
let state = self.state.upgrade();
let current_gossip = state.get_current();
self.client
.gossip(¤t_gossip)
.map(move |gossip| {
if gossip.get_node_id()!= current_gossip.get_node_id() {
state.update_clock(gossip.get_updated_at().into());
}
state.merge_gossip(gossip)
})
.then(move |_| Ok(()))
})
}
}
| {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
} | conditional_block |
gossip.rs | use crate::clock::{Clock, HybridTimestamp};
use crate::event_emitter::EventEmitter;
use crate::proto::gossip::*;
use crate::proto::gossip_grpc::*;
use crate::proto::PeerState;
use crate::rpc_client::RpcClient;
use failure::{err_msg, format_err, Error};
use futures::prelude::*;
use futures::sync::{mpsc, oneshot};
use grpcio::{RpcContext, Service, UnarySink};
use log::*;
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use std::time::{Duration, Instant};
use tokio::timer::Interval;
#[derive(Clone)]
pub struct GossipServer {
state: GossipState,
sender: mpsc::Sender<GossipEvent>,
}
impl Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
}
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher, | peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
}
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)|!self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn publish_peer_discovered(&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id!= 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id!= 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender, client.clone());
drop(locked_state);
let gossip_stream = Interval::new(Instant::now(), Duration::from_secs(5))
.map(|_| ClientEvent::GossipTick)
.map_err(|err| error!("Error in gossip tick: {:?}", err));
let close_stream = receiver
.into_stream()
.map(|_: ()| ClientEvent::Done)
.map_err(|_| ());
let (sender, receiver) = mpsc::channel(64);
let producer = gossip_stream
.select(close_stream)
.take_while(|item| Ok(*item!= ClientEvent::Done))
.for_each(move |event| sender.clone().send(event).map_err(|_| ()).map(|_| ()));
let consumer = ClientContext::new(state.new_ref(), client).run(receiver);
tokio::spawn(consumer);
tokio::spawn(producer);
}
impl ClientContext {
pub fn new(state: GossipStateRef, client: RpcClient) -> Self {
Self { state, client }
}
pub fn run(self, receiver: mpsc::Receiver<ClientEvent>) -> impl Future<Item = (), Error = ()> {
// TODO: should age out nodes that have been failing gossip for a while
// TODO: should have a separate heartbeat loop for tracking peer offsets
receiver.for_each(move |_event| {
let state = self.state.upgrade();
let current_gossip = state.get_current();
self.client
.gossip(¤t_gossip)
.map(move |gossip| {
if gossip.get_node_id()!= current_gossip.get_node_id() {
state.update_clock(gossip.get_updated_at().into());
}
state.merge_gossip(gossip)
})
.then(move |_| Ok(()))
})
}
} | event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(), | random_line_split |
mod.rs | //! # Queueing Honey Badger
//!
//! This works exactly like Dynamic Honey Badger, but it has a transaction queue built in. Whenever
//! an epoch is output, it will automatically select a list of pending transactions and propose it
//! for the next one. The user can continuously add more pending transactions to the queue.
//!
//! If there are no pending transactions, no validators in the process of being added or
//! removed and not enough other nodes have proposed yet, no automatic proposal will be made: The
//! network then waits until at least _f + 1_ have any content for the next epoch.
//!
//! ## How it works
//!
//! Queueing Honey Badger runs a Dynamic Honey Badger internally, and automatically inputs a list
//! of pending transactions as its contribution at the beginning of each epoch. These are selected
//! by making a random choice of _B / N_ out of the first _B_ entries in the queue, where _B_ is the
//! configurable `batch_size` parameter, and _N_ is the current number of validators.
//!
//! After each output, the transactions that made it into the new batch are removed from the queue.
//!
//! The random choice of transactions is made to reduce redundancy even if all validators have
//! roughly the same entries in their queues. By selecting a random fraction of the first _B_
//! entries, any two nodes will likely make almost disjoint contributions instead of proposing
//! the same transaction multiple times.
use std::marker::PhantomData;
use std::{cmp, iter};
use crypto::PublicKey;
use derivative::Derivative;
use failure::Fail;
use rand::{Rand, Rng};
use serde::{de::DeserializeOwned, Serialize};
use dynamic_honey_badger::{self, Batch as DhbBatch, DynamicHoneyBadger, Message};
use transaction_queue::TransactionQueue;
use {util, Contribution, DistAlgorithm, NetworkInfo, NodeIdT};
pub use dynamic_honey_badger::{Change, ChangeState, Input};
/// Queueing honey badger error variants.
#[derive(Debug, Fail)]
pub enum Error {
/// Failed to handle input.
#[fail(display = "Input error: {}", _0)]
Input(dynamic_honey_badger::Error),
/// Failed to handle a message.
#[fail(display = "Handle message error: {}", _0)]
HandleMessage(dynamic_honey_badger::Error),
/// Failed to propose a contribution.
#[fail(display = "Propose error: {}", _0)]
Propose(dynamic_honey_badger::Error),
}
/// The result of `QueueingHoneyBadger` handling an input or message.
pub type Result<T> = ::std::result::Result<T, Error>;
/// A Queueing Honey Badger builder, to configure the parameters and create new instances of
/// `QueueingHoneyBadger`.
pub struct QueueingHoneyBadgerBuilder<T, N: Rand + Ord, Q> {
/// Shared network data.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
_phantom: PhantomData<T>,
}
type QueueingHoneyBadgerWithStep<T, N, Q> = (QueueingHoneyBadger<T, N, Q>, Step<T, N>);
impl<T, N, Q> QueueingHoneyBadgerBuilder<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
// TODO: Make it easier to build a `QueueingHoneyBadger` with a `JoinPlan`. Handle `Step`
// conversion internally.
pub fn new(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> Self {
// TODO: Use the defaults from `HoneyBadgerBuilder`.
QueueingHoneyBadgerBuilder {
dyn_hb,
batch_size: 100,
queue: Default::default(),
_phantom: PhantomData,
}
}
/// Sets the target number of transactions per batch.
pub fn batch_size(mut self, batch_size: usize) -> Self {
self.batch_size = batch_size;
self
}
/// Sets the transaction queue object.
pub fn queue(mut self, queue: Q) -> Self {
self.queue = queue;
self
}
/// Creates a new Queueing Honey Badger instance with an empty buffer.
pub fn | <R>(self, rng: R) -> QueueingHoneyBadgerWithStep<T, N, Q>
where
R:'static + Rng + Send + Sync,
{
self.build_with_transactions(None, rng)
.expect("building without transactions cannot fail")
}
/// Returns a new Queueing Honey Badger instance that starts with the given transactions in its
/// buffer.
pub fn build_with_transactions<TI, R>(
mut self,
txs: TI,
rng: R,
) -> Result<QueueingHoneyBadgerWithStep<T, N, Q>>
where
TI: IntoIterator<Item = T>,
R:'static + Rng + Send + Sync,
{
self.queue.extend(txs);
let mut qhb = QueueingHoneyBadger {
dyn_hb: self.dyn_hb,
batch_size: self.batch_size,
queue: self.queue,
rng: Box::new(rng),
};
let step = qhb.propose()?;
Ok((qhb, step))
}
}
/// A Honey Badger instance that can handle adding and removing nodes and manages a transaction
/// queue.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct QueueingHoneyBadger<T, N: Rand + Ord, Q> {
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The internal managed `DynamicHoneyBadger` instance.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
/// Random number generator used for choosing transactions from the queue.
#[derivative(Debug(format_with = "util::fmt_rng"))]
rng: Box<dyn Rng + Send + Sync>,
}
/// A `QueueingHoneyBadger` step, possibly containing multiple outputs.
pub type Step<T, N> = ::Step<Message<N>, Batch<T, N>, N>;
impl<T, N, Q> DistAlgorithm for QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
type NodeId = N;
type Input = Input<T, N>;
type Output = Batch<T, N>;
type Message = Message<N>;
type Error = Error;
fn handle_input(&mut self, input: Self::Input) -> Result<Step<T, N>> {
// User transactions are forwarded to `HoneyBadger` right away. Internal messages are
// in addition signed and broadcast.
match input {
Input::User(tx) => self.push_transaction(tx),
Input::Change(change) => self.vote_for(change),
}
}
fn handle_message(&mut self, sender_id: &N, message: Self::Message) -> Result<Step<T, N>> {
self.handle_message(sender_id, message)
}
fn terminated(&self) -> bool {
false
}
fn our_id(&self) -> &N {
self.dyn_hb.our_id()
}
}
impl<T, N, Q> QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
pub fn builder(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> QueueingHoneyBadgerBuilder<T, N, Q> {
QueueingHoneyBadgerBuilder::new(dyn_hb)
}
/// Adds a transaction to the queue.
///
/// This can be called at any time to append to the transaction queue. The new transaction will
/// be proposed in some future epoch.
///
/// If no proposal has yet been made for the current epoch, this may trigger one. In this case,
/// a nonempty step will returned, with the corresponding messages. (Or, if we are the only
/// validator, even with the completed batch as an output.)
pub fn push_transaction(&mut self, tx: T) -> Result<Step<T, N>> {
self.queue.extend(iter::once(tx));
self.propose()
}
/// Casts a vote to change the set of validators.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_for(&mut self, change: Change<N>) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_for(change))
}
/// Casts a vote to add a node as a validator.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_add(&mut self, node_id: N, pub_key: PublicKey) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_add(node_id, pub_key))
}
/// Casts a vote to demote a validator to observer.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_remove(&mut self, node_id: &N) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_remove(node_id))
}
/// Handles a message received from `sender_id`.
///
/// This must be called with every message we receive from another node.
pub fn handle_message(&mut self, sender_id: &N, message: Message<N>) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.handle_message(sender_id, message))
}
/// Returns a reference to the internal managed `DynamicHoneyBadger` instance.
pub fn dyn_hb(&self) -> &DynamicHoneyBadger<Vec<T>, N> {
&self.dyn_hb
}
/// Returns the information about the node IDs in the network, and the cryptographic keys.
pub fn netinfo(&self) -> &NetworkInfo<N> {
self.dyn_hb.netinfo()
}
/// Applies a function `f` to the `DynamicHoneyBadger` instance and processes the step.
fn apply<F>(&mut self, f: F) -> Result<Step<T, N>>
where
F: FnOnce(&mut DynamicHoneyBadger<Vec<T>, N>) -> dynamic_honey_badger::Result<Step<T, N>>,
{
let step = f(&mut self.dyn_hb).map_err(Error::Input)?;
self.queue
.remove_multiple(step.output.iter().flat_map(Batch::iter));
Ok(step.join(self.propose()?))
}
/// Returns the epoch of the next batch that will be output.
pub fn next_epoch(&self) -> u64 {
self.dyn_hb.next_epoch()
}
/// Returns `true` if we are ready to propose our contribution for the next epoch, i.e. if the
/// previous epoch has completed and we have either pending transactions or we are required to
/// make a proposal to avoid stalling the network.
fn can_propose(&self) -> bool {
if self.dyn_hb.has_input() {
return false; // Previous epoch is still in progress.
}
!self.queue.is_empty() || self.dyn_hb.should_propose()
}
/// Initiates the next epoch by proposing a batch from the queue.
fn propose(&mut self) -> Result<Step<T, N>> {
let mut step = Step::default();
while self.can_propose() {
let amount = cmp::max(1, self.batch_size / self.dyn_hb.netinfo().num_nodes());
let proposal = self.queue.choose(&mut self.rng, amount, self.batch_size);
step.extend(
self.dyn_hb
.handle_input(Input::User(proposal))
.map_err(Error::Propose)?,
);
}
Ok(step)
}
}
/// A batch containing a list of transactions from at least two thirds of the validators.
pub type Batch<T, N> = DhbBatch<Vec<T>, N>;
| build | identifier_name |
mod.rs | //! # Queueing Honey Badger
//!
//! This works exactly like Dynamic Honey Badger, but it has a transaction queue built in. Whenever
//! an epoch is output, it will automatically select a list of pending transactions and propose it
//! for the next one. The user can continuously add more pending transactions to the queue.
//!
//! If there are no pending transactions, no validators in the process of being added or
//! removed and not enough other nodes have proposed yet, no automatic proposal will be made: The
//! network then waits until at least _f + 1_ have any content for the next epoch.
//!
//! ## How it works
//!
//! Queueing Honey Badger runs a Dynamic Honey Badger internally, and automatically inputs a list
//! of pending transactions as its contribution at the beginning of each epoch. These are selected
//! by making a random choice of _B / N_ out of the first _B_ entries in the queue, where _B_ is the
//! configurable `batch_size` parameter, and _N_ is the current number of validators.
//!
//! After each output, the transactions that made it into the new batch are removed from the queue.
//!
//! The random choice of transactions is made to reduce redundancy even if all validators have
//! roughly the same entries in their queues. By selecting a random fraction of the first _B_
//! entries, any two nodes will likely make almost disjoint contributions instead of proposing
//! the same transaction multiple times.
use std::marker::PhantomData;
use std::{cmp, iter};
use crypto::PublicKey;
use derivative::Derivative;
use failure::Fail;
use rand::{Rand, Rng};
use serde::{de::DeserializeOwned, Serialize};
use dynamic_honey_badger::{self, Batch as DhbBatch, DynamicHoneyBadger, Message};
use transaction_queue::TransactionQueue;
use {util, Contribution, DistAlgorithm, NetworkInfo, NodeIdT};
pub use dynamic_honey_badger::{Change, ChangeState, Input};
/// Queueing honey badger error variants.
#[derive(Debug, Fail)]
pub enum Error {
/// Failed to handle input.
#[fail(display = "Input error: {}", _0)]
Input(dynamic_honey_badger::Error),
/// Failed to handle a message.
#[fail(display = "Handle message error: {}", _0)]
HandleMessage(dynamic_honey_badger::Error),
/// Failed to propose a contribution.
#[fail(display = "Propose error: {}", _0)]
Propose(dynamic_honey_badger::Error),
}
/// The result of `QueueingHoneyBadger` handling an input or message.
pub type Result<T> = ::std::result::Result<T, Error>;
/// A Queueing Honey Badger builder, to configure the parameters and create new instances of
/// `QueueingHoneyBadger`.
pub struct QueueingHoneyBadgerBuilder<T, N: Rand + Ord, Q> {
/// Shared network data.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
_phantom: PhantomData<T>,
}
type QueueingHoneyBadgerWithStep<T, N, Q> = (QueueingHoneyBadger<T, N, Q>, Step<T, N>);
impl<T, N, Q> QueueingHoneyBadgerBuilder<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
// TODO: Make it easier to build a `QueueingHoneyBadger` with a `JoinPlan`. Handle `Step`
// conversion internally.
pub fn new(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> Self {
// TODO: Use the defaults from `HoneyBadgerBuilder`.
QueueingHoneyBadgerBuilder {
dyn_hb,
batch_size: 100,
queue: Default::default(),
_phantom: PhantomData,
}
}
/// Sets the target number of transactions per batch.
pub fn batch_size(mut self, batch_size: usize) -> Self {
self.batch_size = batch_size;
self
}
/// Sets the transaction queue object.
pub fn queue(mut self, queue: Q) -> Self {
self.queue = queue;
self
}
/// Creates a new Queueing Honey Badger instance with an empty buffer.
pub fn build<R>(self, rng: R) -> QueueingHoneyBadgerWithStep<T, N, Q>
where
R:'static + Rng + Send + Sync,
{
self.build_with_transactions(None, rng)
.expect("building without transactions cannot fail")
}
/// Returns a new Queueing Honey Badger instance that starts with the given transactions in its
/// buffer.
pub fn build_with_transactions<TI, R>(
mut self,
txs: TI,
rng: R,
) -> Result<QueueingHoneyBadgerWithStep<T, N, Q>>
where
TI: IntoIterator<Item = T>,
R:'static + Rng + Send + Sync,
{
self.queue.extend(txs);
let mut qhb = QueueingHoneyBadger {
dyn_hb: self.dyn_hb,
batch_size: self.batch_size,
queue: self.queue,
rng: Box::new(rng),
};
let step = qhb.propose()?;
Ok((qhb, step))
}
}
/// A Honey Badger instance that can handle adding and removing nodes and manages a transaction
/// queue.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct QueueingHoneyBadger<T, N: Rand + Ord, Q> {
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The internal managed `DynamicHoneyBadger` instance.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
/// Random number generator used for choosing transactions from the queue.
#[derivative(Debug(format_with = "util::fmt_rng"))]
rng: Box<dyn Rng + Send + Sync>,
}
/// A `QueueingHoneyBadger` step, possibly containing multiple outputs.
pub type Step<T, N> = ::Step<Message<N>, Batch<T, N>, N>;
impl<T, N, Q> DistAlgorithm for QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
type NodeId = N;
type Input = Input<T, N>;
type Output = Batch<T, N>;
type Message = Message<N>;
type Error = Error;
fn handle_input(&mut self, input: Self::Input) -> Result<Step<T, N>> {
// User transactions are forwarded to `HoneyBadger` right away. Internal messages are
// in addition signed and broadcast.
match input {
Input::User(tx) => self.push_transaction(tx),
Input::Change(change) => self.vote_for(change),
}
}
fn handle_message(&mut self, sender_id: &N, message: Self::Message) -> Result<Step<T, N>> {
self.handle_message(sender_id, message)
}
fn terminated(&self) -> bool {
false
}
fn our_id(&self) -> &N {
self.dyn_hb.our_id()
}
}
impl<T, N, Q> QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
pub fn builder(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> QueueingHoneyBadgerBuilder<T, N, Q> {
QueueingHoneyBadgerBuilder::new(dyn_hb)
}
/// Adds a transaction to the queue.
///
/// This can be called at any time to append to the transaction queue. The new transaction will
/// be proposed in some future epoch.
///
/// If no proposal has yet been made for the current epoch, this may trigger one. In this case,
/// a nonempty step will returned, with the corresponding messages. (Or, if we are the only
/// validator, even with the completed batch as an output.)
pub fn push_transaction(&mut self, tx: T) -> Result<Step<T, N>> {
self.queue.extend(iter::once(tx));
self.propose()
}
/// Casts a vote to change the set of validators.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_for(&mut self, change: Change<N>) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_for(change))
}
/// Casts a vote to add a node as a validator.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_add(&mut self, node_id: N, pub_key: PublicKey) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_add(node_id, pub_key))
}
/// Casts a vote to demote a validator to observer.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_remove(&mut self, node_id: &N) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_remove(node_id))
}
/// Handles a message received from `sender_id`.
///
/// This must be called with every message we receive from another node.
pub fn handle_message(&mut self, sender_id: &N, message: Message<N>) -> Result<Step<T, N>> |
/// Returns a reference to the internal managed `DynamicHoneyBadger` instance.
pub fn dyn_hb(&self) -> &DynamicHoneyBadger<Vec<T>, N> {
&self.dyn_hb
}
/// Returns the information about the node IDs in the network, and the cryptographic keys.
pub fn netinfo(&self) -> &NetworkInfo<N> {
self.dyn_hb.netinfo()
}
/// Applies a function `f` to the `DynamicHoneyBadger` instance and processes the step.
fn apply<F>(&mut self, f: F) -> Result<Step<T, N>>
where
F: FnOnce(&mut DynamicHoneyBadger<Vec<T>, N>) -> dynamic_honey_badger::Result<Step<T, N>>,
{
let step = f(&mut self.dyn_hb).map_err(Error::Input)?;
self.queue
.remove_multiple(step.output.iter().flat_map(Batch::iter));
Ok(step.join(self.propose()?))
}
/// Returns the epoch of the next batch that will be output.
pub fn next_epoch(&self) -> u64 {
self.dyn_hb.next_epoch()
}
/// Returns `true` if we are ready to propose our contribution for the next epoch, i.e. if the
/// previous epoch has completed and we have either pending transactions or we are required to
/// make a proposal to avoid stalling the network.
fn can_propose(&self) -> bool {
if self.dyn_hb.has_input() {
return false; // Previous epoch is still in progress.
}
!self.queue.is_empty() || self.dyn_hb.should_propose()
}
/// Initiates the next epoch by proposing a batch from the queue.
fn propose(&mut self) -> Result<Step<T, N>> {
let mut step = Step::default();
while self.can_propose() {
let amount = cmp::max(1, self.batch_size / self.dyn_hb.netinfo().num_nodes());
let proposal = self.queue.choose(&mut self.rng, amount, self.batch_size);
step.extend(
self.dyn_hb
.handle_input(Input::User(proposal))
.map_err(Error::Propose)?,
);
}
Ok(step)
}
}
/// A batch containing a list of transactions from at least two thirds of the validators.
pub type Batch<T, N> = DhbBatch<Vec<T>, N>;
| {
self.apply(|dyn_hb| dyn_hb.handle_message(sender_id, message))
} | identifier_body |
mod.rs | //! # Queueing Honey Badger
//!
//! This works exactly like Dynamic Honey Badger, but it has a transaction queue built in. Whenever
//! an epoch is output, it will automatically select a list of pending transactions and propose it
//! for the next one. The user can continuously add more pending transactions to the queue.
//!
//! If there are no pending transactions, no validators in the process of being added or
//! removed and not enough other nodes have proposed yet, no automatic proposal will be made: The
//! network then waits until at least _f + 1_ have any content for the next epoch.
//!
//! ## How it works
//!
//! Queueing Honey Badger runs a Dynamic Honey Badger internally, and automatically inputs a list
//! of pending transactions as its contribution at the beginning of each epoch. These are selected
//! by making a random choice of _B / N_ out of the first _B_ entries in the queue, where _B_ is the
//! configurable `batch_size` parameter, and _N_ is the current number of validators.
//!
//! After each output, the transactions that made it into the new batch are removed from the queue.
//!
//! The random choice of transactions is made to reduce redundancy even if all validators have
//! roughly the same entries in their queues. By selecting a random fraction of the first _B_
//! entries, any two nodes will likely make almost disjoint contributions instead of proposing
//! the same transaction multiple times.
use std::marker::PhantomData;
use std::{cmp, iter};
use crypto::PublicKey;
use derivative::Derivative;
use failure::Fail;
use rand::{Rand, Rng};
use serde::{de::DeserializeOwned, Serialize};
use dynamic_honey_badger::{self, Batch as DhbBatch, DynamicHoneyBadger, Message};
use transaction_queue::TransactionQueue;
use {util, Contribution, DistAlgorithm, NetworkInfo, NodeIdT};
pub use dynamic_honey_badger::{Change, ChangeState, Input};
/// Queueing honey badger error variants.
#[derive(Debug, Fail)]
pub enum Error {
/// Failed to handle input.
#[fail(display = "Input error: {}", _0)]
Input(dynamic_honey_badger::Error),
/// Failed to handle a message.
#[fail(display = "Handle message error: {}", _0)]
HandleMessage(dynamic_honey_badger::Error),
/// Failed to propose a contribution.
#[fail(display = "Propose error: {}", _0)]
Propose(dynamic_honey_badger::Error),
}
/// The result of `QueueingHoneyBadger` handling an input or message.
pub type Result<T> = ::std::result::Result<T, Error>;
/// A Queueing Honey Badger builder, to configure the parameters and create new instances of
/// `QueueingHoneyBadger`.
pub struct QueueingHoneyBadgerBuilder<T, N: Rand + Ord, Q> {
/// Shared network data.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
_phantom: PhantomData<T>,
}
type QueueingHoneyBadgerWithStep<T, N, Q> = (QueueingHoneyBadger<T, N, Q>, Step<T, N>);
impl<T, N, Q> QueueingHoneyBadgerBuilder<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
// TODO: Make it easier to build a `QueueingHoneyBadger` with a `JoinPlan`. Handle `Step`
// conversion internally.
pub fn new(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> Self {
// TODO: Use the defaults from `HoneyBadgerBuilder`.
QueueingHoneyBadgerBuilder {
dyn_hb,
batch_size: 100,
queue: Default::default(),
_phantom: PhantomData,
}
}
/// Sets the target number of transactions per batch.
pub fn batch_size(mut self, batch_size: usize) -> Self {
self.batch_size = batch_size;
self
}
/// Sets the transaction queue object.
pub fn queue(mut self, queue: Q) -> Self {
self.queue = queue;
self
}
/// Creates a new Queueing Honey Badger instance with an empty buffer.
pub fn build<R>(self, rng: R) -> QueueingHoneyBadgerWithStep<T, N, Q>
where
R:'static + Rng + Send + Sync,
{
self.build_with_transactions(None, rng)
.expect("building without transactions cannot fail")
}
/// Returns a new Queueing Honey Badger instance that starts with the given transactions in its
/// buffer.
pub fn build_with_transactions<TI, R>(
mut self,
txs: TI,
rng: R,
) -> Result<QueueingHoneyBadgerWithStep<T, N, Q>>
where
TI: IntoIterator<Item = T>,
R:'static + Rng + Send + Sync,
{
self.queue.extend(txs);
let mut qhb = QueueingHoneyBadger {
dyn_hb: self.dyn_hb,
batch_size: self.batch_size,
queue: self.queue,
rng: Box::new(rng),
};
let step = qhb.propose()?;
Ok((qhb, step))
}
}
/// A Honey Badger instance that can handle adding and removing nodes and manages a transaction
/// queue.
#[derive(Derivative)]
#[derivative(Debug)]
pub struct QueueingHoneyBadger<T, N: Rand + Ord, Q> {
/// The target number of transactions to be included in each batch.
batch_size: usize,
/// The internal managed `DynamicHoneyBadger` instance.
dyn_hb: DynamicHoneyBadger<Vec<T>, N>,
/// The queue of pending transactions that haven't been output in a batch yet.
queue: Q,
/// Random number generator used for choosing transactions from the queue.
#[derivative(Debug(format_with = "util::fmt_rng"))]
rng: Box<dyn Rng + Send + Sync>,
}
/// A `QueueingHoneyBadger` step, possibly containing multiple outputs.
pub type Step<T, N> = ::Step<Message<N>, Batch<T, N>, N>;
impl<T, N, Q> DistAlgorithm for QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
type NodeId = N;
type Input = Input<T, N>;
type Output = Batch<T, N>;
type Message = Message<N>;
type Error = Error;
fn handle_input(&mut self, input: Self::Input) -> Result<Step<T, N>> {
// User transactions are forwarded to `HoneyBadger` right away. Internal messages are
// in addition signed and broadcast.
match input {
Input::User(tx) => self.push_transaction(tx),
Input::Change(change) => self.vote_for(change),
}
}
fn handle_message(&mut self, sender_id: &N, message: Self::Message) -> Result<Step<T, N>> {
self.handle_message(sender_id, message)
}
fn terminated(&self) -> bool {
false
}
| self.dyn_hb.our_id()
}
}
impl<T, N, Q> QueueingHoneyBadger<T, N, Q>
where
T: Contribution + Serialize + DeserializeOwned + Clone,
N: NodeIdT + Serialize + DeserializeOwned + Rand,
Q: TransactionQueue<T>,
{
/// Returns a new `QueueingHoneyBadgerBuilder` configured to use the node IDs and cryptographic
/// keys specified by `netinfo`.
pub fn builder(dyn_hb: DynamicHoneyBadger<Vec<T>, N>) -> QueueingHoneyBadgerBuilder<T, N, Q> {
QueueingHoneyBadgerBuilder::new(dyn_hb)
}
/// Adds a transaction to the queue.
///
/// This can be called at any time to append to the transaction queue. The new transaction will
/// be proposed in some future epoch.
///
/// If no proposal has yet been made for the current epoch, this may trigger one. In this case,
/// a nonempty step will returned, with the corresponding messages. (Or, if we are the only
/// validator, even with the completed batch as an output.)
pub fn push_transaction(&mut self, tx: T) -> Result<Step<T, N>> {
self.queue.extend(iter::once(tx));
self.propose()
}
/// Casts a vote to change the set of validators.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_for(&mut self, change: Change<N>) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_for(change))
}
/// Casts a vote to add a node as a validator.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_add(&mut self, node_id: N, pub_key: PublicKey) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_add(node_id, pub_key))
}
/// Casts a vote to demote a validator to observer.
///
/// This stores a pending vote for the change. It will be included in some future batch, and
/// once enough validators have been voted for the same change, it will take effect.
pub fn vote_to_remove(&mut self, node_id: &N) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.vote_to_remove(node_id))
}
/// Handles a message received from `sender_id`.
///
/// This must be called with every message we receive from another node.
pub fn handle_message(&mut self, sender_id: &N, message: Message<N>) -> Result<Step<T, N>> {
self.apply(|dyn_hb| dyn_hb.handle_message(sender_id, message))
}
/// Returns a reference to the internal managed `DynamicHoneyBadger` instance.
pub fn dyn_hb(&self) -> &DynamicHoneyBadger<Vec<T>, N> {
&self.dyn_hb
}
/// Returns the information about the node IDs in the network, and the cryptographic keys.
pub fn netinfo(&self) -> &NetworkInfo<N> {
self.dyn_hb.netinfo()
}
/// Applies a function `f` to the `DynamicHoneyBadger` instance and processes the step.
fn apply<F>(&mut self, f: F) -> Result<Step<T, N>>
where
F: FnOnce(&mut DynamicHoneyBadger<Vec<T>, N>) -> dynamic_honey_badger::Result<Step<T, N>>,
{
let step = f(&mut self.dyn_hb).map_err(Error::Input)?;
self.queue
.remove_multiple(step.output.iter().flat_map(Batch::iter));
Ok(step.join(self.propose()?))
}
/// Returns the epoch of the next batch that will be output.
pub fn next_epoch(&self) -> u64 {
self.dyn_hb.next_epoch()
}
/// Returns `true` if we are ready to propose our contribution for the next epoch, i.e. if the
/// previous epoch has completed and we have either pending transactions or we are required to
/// make a proposal to avoid stalling the network.
fn can_propose(&self) -> bool {
if self.dyn_hb.has_input() {
return false; // Previous epoch is still in progress.
}
!self.queue.is_empty() || self.dyn_hb.should_propose()
}
/// Initiates the next epoch by proposing a batch from the queue.
fn propose(&mut self) -> Result<Step<T, N>> {
let mut step = Step::default();
while self.can_propose() {
let amount = cmp::max(1, self.batch_size / self.dyn_hb.netinfo().num_nodes());
let proposal = self.queue.choose(&mut self.rng, amount, self.batch_size);
step.extend(
self.dyn_hb
.handle_input(Input::User(proposal))
.map_err(Error::Propose)?,
);
}
Ok(step)
}
}
/// A batch containing a list of transactions from at least two thirds of the validators.
pub type Batch<T, N> = DhbBatch<Vec<T>, N>; | fn our_id(&self) -> &N { | random_line_split |
lib.rs | Enso are a very powerful mechanism and are used to transform group of tokens into
//! almost any statement. First, macros need to be discovered and registered. Currently, there is no
//! real macro discovery process, as there is no support for user-defined macros. Instead, there is
//! a set of hardcoded macros defined in the compiler.
//!
//! Each macro defines one or more segments. Every segment starts with a predefined token and can
//! contain any number of other tokens. For example, the macro `if... then... else...` contains
//! three segments. Macros can also accept prefix tokens, a set of tokens on the left of the first
//! segment. A good example is the lambda macro `... ->...`.
//!
//! In this step, a [`MacroMatchTree`] is built. Basically, it is a map from the possible next
//! segment name to information of what other segments are required and what is the macro definition
//! in case these segments were found. For example, let's consider two macros: `if... then...`,
//! and `if... then... else...`. In such a case, the macro registry will contain only one entry,
//! "if", and two sets of possible resolution paths: ["then"], and ["then", "else"], each associated
//! with the corresponding macro definition.
//!
//! # Splitting the token stream by the macro segments.
//! The input token stream is being iterated and is being split based on the segments of the
//! registered macros. For example, for the input `if a b then c d else e f`, the token stream will
//! be split into three segments, `a b`, `c d`, and `e f`, which will be associated with the
//! `if... then... else...` macro definition.
//!
//! The splitting process is hierarchical. It means that a new macro can start being resolved during
//! resolution of a parent macro. For example, `if if a then b then c else d` is a correct
//! expression. After finding the first `if` token, the token stream will be split. The next `if`
//! token starts a new token stream splitting. The first `then` token belongs to the nested macro,
//! however, as soon as the resolver sees the second `then` token, it will consider the nested macro
//! to be finished, and will come back to parent macro resolution.
//!
//! # Resolving right-hand-side patterns of macro segments.
//! In the next steps, each macro is being analyzed, started from the most nested ones. For each
//! macro, the [`Pattern`] of last segment is being run to check which tokens belong to that macro,
//! and which tokens should be transferred to parent macro definition. For example, consider the
//! following code `process (read file) content-> print content`. The `(...)` is a macro with two
//! sections `(` and `)`. Let's mark the token splitting with `[` and `]` characters. The previous
//! macro resolution steps would output such split of the token stream:
//! `process [(read file][) content[-> print content]]`. In this step, the most inner macro will be
//! analyzed first. The pattern of the last segment of the inner macro (`->`) defines that it
//! consumes all tokens, so all the tokens `print content` are left as they are. Now, the resolution
//! moves to the parent macro. Its last segment starts with the `)` token, which pattern defines
//! that it does not consume any tokens, so all of its current tokens (`content[-> print content]]`)
//! are popped to a parent definition, forming `process [(read file][)] content[-> print content]`.
//!
//! Please note, that root of the expression is considered a special macro as well. It is done for
//! the algorithm unification purposes.
//!
//! # Resolving left-hand-side patterns of macro segments.
//! In this step, each macro is being analyzed, started from the most nested ones. For each macro,
//! the [`Pattern`] of the macro prefix is being run to check which tokens belong to the prefix of
//! the macro (in case the macro defines the prefix). In the example above, the macro `->` defines
//! complex prefix rules: if the token on the left of the arrow used no space, then only a single
//! token will be consumed. As a result of this step, the following token split will occur:
//! `[process [(read file][)] [content-> print content]`, which is exactly what we wanted.
//!
//! # Resolving patterns of macro segments.
//! In this step, all macro segment patterns are being resolved and errors are reported in case it
//! was not possible. If tokens in a segment match the segment pattern, they are sent to the
//! operator precedence resolver for final transformation.
//!
//! # Operator precedence resolution.
//! Each token stream sent to the operator resolver is processed by a modified Shunting Yard
//! algorithm, which handles such situations as multiple operators placed next to each other,
//! multiple identifiers placed next to each other, and also takes spacing into consideration in
//! order to implement spacing-aware precedence rules. After all segments are resolved, the macro
//! is being treated as a single token in one of the segments of the parent macro, and is being
//! processed by the operator precedence resolver as well. In the end, a single [`syntax::Tree`] is
//! produced, containing the parsed expression.
#![recursion_limit = "256"]
// === Features ===
#![allow(incomplete_features)]
#![feature(let_chains)]
#![feature(allocator_api)]
#![feature(exact_size_is_empty)]
#![feature(test)]
#![feature(specialization)]
#![feature(if_let_guard)]
#![feature(box_patterns)]
#![feature(option_get_or_insert_default)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![allow(clippy::option_map_unit_fn)]
#![allow(clippy::precedence)]
#![allow(dead_code)]
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
use crate::prelude::*;
// ==============
// === Export ===
// ==============
pub mod lexer;
pub mod macros;
pub mod metadata;
pub mod serialization;
pub mod source;
pub mod syntax;
/// Popular utilities, imported by most modules of this crate.
pub mod prelude {
pub use enso_prelude::serde_reexports::*;
pub use enso_prelude::*;
pub use enso_reflect as reflect;
pub use enso_reflect::Reflect;
pub use enso_types::traits::*;
pub use enso_types::unit2::Bytes;
/// Wraps return value for functions whose implementations don't handle all cases yet. When the
/// parser is complete, this type will be eliminated.
pub type WipResult<T> = Result<T, String>;
/// Return type for functions that will only fail in case of a bug in the implementation.
#[derive(Debug, Default)]
pub struct ParseResult<T> {
/// The result of the operation. If `internal_error` is set, this is a best-effort value
/// that cannot be assumed to be accurate; otherwise, it should be correct.
pub value: T,
/// Internal error encountered while computing this result.
pub internal_error: Option<String>,
}
impl<T> ParseResult<T> {
/// Return a new [`ParseResult`] whose value is the result of applying the given function to
/// the input's value, and whose `internal_error` field is the same as the input.
pub fn map<U, F>(self, f: F) -> ParseResult<U>
where F: FnOnce(T) -> U {
let ParseResult { value, internal_error } = self;
let value = f(value);
ParseResult { value, internal_error }
}
/// Panic if the result contains an internal error; otherwise, return the contained value.
pub fn unwrap(self) -> T {
assert_eq!(self.internal_error, None);
self.value
}
}
}
// ==============
// === Parser ===
// ==============
/// Enso parser. See the module documentation to learn more about how it works.
#[allow(missing_docs)]
#[derive(Debug)]
pub struct Parser {
pub macros: macros::resolver::MacroMap,
}
impl Parser {
/// Constructor.
pub fn new() -> Self {
let macros = macros::built_in::all();
Self { macros }
}
/// Main entry point.
pub fn run<'s>(&self, code: &'s str) -> syntax::Tree<'s> {
let tokens = lexer::run(code);
let resolver = macros::resolver::Resolver::new_statement();
let result = tokens.map(|tokens| resolver.run(&self.macros, tokens));
let value = result.value;
if let Some(error) = result.internal_error {
return value.with_error(format!("Internal error: {error}"));
}
value
}
}
impl Default for Parser {
fn default() -> Self {
Self::new()
}
}
// == Parsing helpers ==
/// Reinterpret an expression in a statement context (i.e. as a top level member of a block).
///
/// In statement context, an expression that has an assignment operator at its top level is
/// interpreted as a variable assignment or method definition.
fn expression_to_statement(mut tree: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
let mut left_offset = source::span::Offset::default();
if let Tree { variant: box Variant::Annotated(annotated),.. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::AnnotatedBuiltin(annotated),.. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::Documented(documented),.. } = &mut tree {
documented.expression = documented.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::TypeAnnotated(annotated), span } = tree {
let colon = annotated.operator;
let type_ = annotated.type_;
let variable = annotated.expression;
let mut tree = Tree::type_signature(variable, colon, type_);
tree.span.left_offset += span.left_offset;
return tree;
}
let tree_ = &mut tree;
let opr_app = match tree_ {
Tree { variant: box Variant::OprApp(opr_app), span } => {
left_offset += &span.left_offset;
opr_app
}
_ => return tree,
};
if let OprApp { lhs: Some(lhs), opr: Ok(opr), rhs } = opr_app && opr.properties.is_assignment() {
let (leftmost, args) = collect_arguments(lhs.clone());
if let Some(rhs) = rhs {
if let Variant::Ident(ident) = &*leftmost.variant && ident.token.variant.is_type {
// If the LHS is a type, this is a (destructuring) assignment.
let lhs = expression_to_pattern(mem::take(lhs));
let mut result = Tree::assignment(lhs, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
if args.is_empty() &&!is_body_block(rhs) {
// If the LHS has no arguments, and there is a RHS, and the RHS is not a body block,
// this is a variable assignment.
let mut result = Tree::assignment(leftmost, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
if is_qualified_name(&leftmost) {
// If this is not a variable assignment, and the leftmost leaf of the `App` tree is
// a qualified name, this is a function definition.
let mut result = Tree::function(leftmost, args, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
tree
}
fn is_qualified_name(tree: &syntax::Tree) -> bool {
use syntax::tree::*;
match &*tree.variant {
Variant::Ident(_) => true,
Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) })
if matches!(&*rhs.variant, Variant::Ident(_)) && opr.properties.is_dot() =>
is_qualified_name(lhs),
_ => false,
}
}
fn expression_to_type(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast,.. }) => expression_to_type(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_type(body)), close),
box Variant::OprApp(OprApp { lhs, opr, rhs }) =>
Tree::opr_app(lhs.map(expression_to_type), opr, rhs.map(expression_to_type)),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_type(func), expression_to_type(arg)),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn expression_to_pattern(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast,.. }) => expression_to_pattern(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_pattern(body)), close),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_pattern(func), expression_to_pattern(arg)),
box Variant::TypeAnnotated(TypeAnnotated { expression, operator, type_ }) =>
Tree::type_annotated(expression_to_pattern(expression), operator, type_),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn collect_arguments(tree: syntax::Tree) -> (syntax::Tree, Vec<syntax::tree::ArgumentDefinition>) {
let mut args = vec![];
let tree = unroll_arguments(tree, &mut args);
args.reverse();
(tree, args)
}
fn collect_arguments_inclusive(tree: syntax::Tree) -> Vec<syntax::tree::ArgumentDefinition> {
let mut args = vec![];
let first = unroll_arguments(tree, &mut args);
args.push(parse_argument_definition(first));
args.reverse();
args
}
fn unroll_arguments<'s>(
mut tree: syntax::Tree<'s>,
args: &mut Vec<syntax::tree::ArgumentDefinition<'s>>,
) -> syntax::Tree<'s> {
while let Some(arg) = parse_argument_application(&mut tree) {
args.push(arg);
}
tree
}
/// Try to parse the expression as an application of a function to an `ArgumentDefinition`. If it
/// matches, replace the expression with its LHS, and return the `ArgumentDefinition` node.
pub fn parse_argument_application<'s>(
expression: &'_ mut syntax::Tree<'s>,
) -> Option<syntax::tree::ArgumentDefinition<'s>> {
use syntax::tree::*;
match &mut expression.variant {
box Variant::App(App { func, arg }) => {
let arg = parse_argument_definition(arg.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(arg)
}
box Variant::NamedApp(NamedApp { func, open, name, equals, arg, close }) => {
let open = mem::take(open);
let close = mem::take(close);
let equals = equals.clone();
let pattern = Tree::ident(name.clone());
let open2 = default();
let suspension = default();
let close2 = default();
let type_ = default();
let default = Some(ArgumentDefault { equals, expression: arg.clone() });
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open,
open2,
pattern,
suspension,
default,
close2,
type_,
close,
})
}
box Variant::DefaultApp(DefaultApp { func, default: default_ }) => {
let pattern = Tree::ident(default_.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open: default(),
open2: default(),
suspension: default(),
pattern,
type_: default(),
close2: default(),
default: default(),
close: default(),
})
}
_ => None,
}
}
/// Interpret the expression as an element of an argument definition sequence.
pub fn | (mut pattern: syntax::Tree) -> syntax::tree::ArgumentDefinition {
use syntax::tree::*;
let mut open1 = default();
let mut close1 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open1 = open;
close1 = close;
pattern = body;
}
let mut default_ = default();
if let Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_assignment() {
let left_offset = pattern.span.left_offset;
default_ = Some(ArgumentDefault { equals: opr.clone(), expression: rhs.clone() });
pattern = lhs.clone();
pattern.span.left_offset += left_offset;
}
let mut open2 = default();
let mut close2 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open2 = open;
close2 = close;
pattern = body;
}
let mut type__ = default();
if let box Variant::TypeAnnotated(TypeAnnotated { mut expression, operator, type_ }) =
pattern.variant
{
expression.span.left_offset += pattern.span.left_offset;
type__ = Some(ArgumentType { operator, type_ });
pattern = expression;
}
let mut suspension = default();
if let box Variant::TemplateFunction(TemplateFunction { mut ast,.. }) = pattern.variant {
ast.span.left_offset += pattern.span.left_offset;
pattern = ast;
}
if let Variant::UnaryOprApp(UnaryOprApp { opr, rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_suspension() {
let mut opr = opr.clone();
opr.left_offset += pattern.span.left_offset;
suspension = Some(opr);
pattern = rhs.clone();
}
let pattern = expression_to_pattern(pattern);
let open = open1;
let close = close1;
let type_ = type__;
ArgumentDefinition { open, open2, pattern, suspension, default: default_, close2, type_, close }
}
/// Return whether the expression is a body block.
fn is_body_block(expression: &syntax::tree::Tree<'_>) -> bool {
matches!(&*expression.variant, syntax::tree::Variant::BodyBlock {.. })
}
// ==================
// === Benchmarks ===
// ==================
#[cfg(test)]
mod benches {
use super::*;
extern crate test;
use test::Bencher;
#[bench]
fn bench_parsing_type_defs(bencher: &mut Bencher) {
let reps = 1_000;
let str = "type Option a b c\n".repeat(reps);
let parser = Parser::new();
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_blocks(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 10_000;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let mut indent = 0u32;
for _ in 0..lines {
// Indent:
// 1/8 chance of increasing.
// 1/8 chance of decreasing.
// 3/4 chance of leaving unchanged.
match rng.gen_range(0..8) {
0u32 => indent = indent.saturating_sub(1),
1 => indent += 1,
_ => (),
}
for _ in 0..indent {
str.push(' ');
}
// 1/4 chance of operator-block line syntax.
if rng.gen_range(0..4) == 0u32 {
str.push_str("* ");
}
str.push('x');
// Equal chance of the next line being interpreted as a body block or argument block
// line, if it is indented and doesn't match the operator-block syntax.
// The `=` operator is chosen to exercise the expression-to-statement conversion path.
if rng.gen() {
str.push_str(" =");
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_expressions(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 100;
let avg_group_len = 20;
let avg_groups_per_line = 20;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let normal = rand_distr::StandardNormal;
for _ in 0..lines {
let operators = ['=', '+', '-', '*', ':'];
let groups: f64 = normal.sample(&mut rng);
let groups = (groups * avg_groups_per_line as f64) as usize;
for _ in 0..groups {
let len: f64 = normal.sample(&mut rng);
let len = (len * avg_group_len as f64) as usize;
str.push('x');
for _ in 0..len {
let i = rng.gen_range(0..operators.len());
| parse_argument_definition | identifier_name |
lib.rs | in Enso are a very powerful mechanism and are used to transform group of tokens into
//! almost any statement. First, macros need to be discovered and registered. Currently, there is no
//! real macro discovery process, as there is no support for user-defined macros. Instead, there is
//! a set of hardcoded macros defined in the compiler.
//!
//! Each macro defines one or more segments. Every segment starts with a predefined token and can
//! contain any number of other tokens. For example, the macro `if... then... else...` contains
//! three segments. Macros can also accept prefix tokens, a set of tokens on the left of the first
//! segment. A good example is the lambda macro `... ->...`.
//!
//! In this step, a [`MacroMatchTree`] is built. Basically, it is a map from the possible next
//! segment name to information of what other segments are required and what is the macro definition | //! # Splitting the token stream by the macro segments.
//! The input token stream is being iterated and is being split based on the segments of the
//! registered macros. For example, for the input `if a b then c d else e f`, the token stream will
//! be split into three segments, `a b`, `c d`, and `e f`, which will be associated with the
//! `if... then... else...` macro definition.
//!
//! The splitting process is hierarchical. It means that a new macro can start being resolved during
//! resolution of a parent macro. For example, `if if a then b then c else d` is a correct
//! expression. After finding the first `if` token, the token stream will be split. The next `if`
//! token starts a new token stream splitting. The first `then` token belongs to the nested macro,
//! however, as soon as the resolver sees the second `then` token, it will consider the nested macro
//! to be finished, and will come back to parent macro resolution.
//!
//! # Resolving right-hand-side patterns of macro segments.
//! In the next steps, each macro is being analyzed, started from the most nested ones. For each
//! macro, the [`Pattern`] of last segment is being run to check which tokens belong to that macro,
//! and which tokens should be transferred to parent macro definition. For example, consider the
//! following code `process (read file) content-> print content`. The `(...)` is a macro with two
//! sections `(` and `)`. Let's mark the token splitting with `[` and `]` characters. The previous
//! macro resolution steps would output such split of the token stream:
//! `process [(read file][) content[-> print content]]`. In this step, the most inner macro will be
//! analyzed first. The pattern of the last segment of the inner macro (`->`) defines that it
//! consumes all tokens, so all the tokens `print content` are left as they are. Now, the resolution
//! moves to the parent macro. Its last segment starts with the `)` token, which pattern defines
//! that it does not consume any tokens, so all of its current tokens (`content[-> print content]]`)
//! are popped to a parent definition, forming `process [(read file][)] content[-> print content]`.
//!
//! Please note, that root of the expression is considered a special macro as well. It is done for
//! the algorithm unification purposes.
//!
//! # Resolving left-hand-side patterns of macro segments.
//! In this step, each macro is being analyzed, started from the most nested ones. For each macro,
//! the [`Pattern`] of the macro prefix is being run to check which tokens belong to the prefix of
//! the macro (in case the macro defines the prefix). In the example above, the macro `->` defines
//! complex prefix rules: if the token on the left of the arrow used no space, then only a single
//! token will be consumed. As a result of this step, the following token split will occur:
//! `[process [(read file][)] [content-> print content]`, which is exactly what we wanted.
//!
//! # Resolving patterns of macro segments.
//! In this step, all macro segment patterns are being resolved and errors are reported in case it
//! was not possible. If tokens in a segment match the segment pattern, they are sent to the
//! operator precedence resolver for final transformation.
//!
//! # Operator precedence resolution.
//! Each token stream sent to the operator resolver is processed by a modified Shunting Yard
//! algorithm, which handles such situations as multiple operators placed next to each other,
//! multiple identifiers placed next to each other, and also takes spacing into consideration in
//! order to implement spacing-aware precedence rules. After all segments are resolved, the macro
//! is being treated as a single token in one of the segments of the parent macro, and is being
//! processed by the operator precedence resolver as well. In the end, a single [`syntax::Tree`] is
//! produced, containing the parsed expression.
#![recursion_limit = "256"]
// === Features ===
#![allow(incomplete_features)]
#![feature(let_chains)]
#![feature(allocator_api)]
#![feature(exact_size_is_empty)]
#![feature(test)]
#![feature(specialization)]
#![feature(if_let_guard)]
#![feature(box_patterns)]
#![feature(option_get_or_insert_default)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![allow(clippy::option_map_unit_fn)]
#![allow(clippy::precedence)]
#![allow(dead_code)]
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
use crate::prelude::*;
// ==============
// === Export ===
// ==============
pub mod lexer;
pub mod macros;
pub mod metadata;
pub mod serialization;
pub mod source;
pub mod syntax;
/// Popular utilities, imported by most modules of this crate.
pub mod prelude {
pub use enso_prelude::serde_reexports::*;
pub use enso_prelude::*;
pub use enso_reflect as reflect;
pub use enso_reflect::Reflect;
pub use enso_types::traits::*;
pub use enso_types::unit2::Bytes;
/// Wraps return value for functions whose implementations don't handle all cases yet. When the
/// parser is complete, this type will be eliminated.
pub type WipResult<T> = Result<T, String>;
/// Return type for functions that will only fail in case of a bug in the implementation.
#[derive(Debug, Default)]
pub struct ParseResult<T> {
/// The result of the operation. If `internal_error` is set, this is a best-effort value
/// that cannot be assumed to be accurate; otherwise, it should be correct.
pub value: T,
/// Internal error encountered while computing this result.
pub internal_error: Option<String>,
}
impl<T> ParseResult<T> {
/// Return a new [`ParseResult`] whose value is the result of applying the given function to
/// the input's value, and whose `internal_error` field is the same as the input.
pub fn map<U, F>(self, f: F) -> ParseResult<U>
where F: FnOnce(T) -> U {
let ParseResult { value, internal_error } = self;
let value = f(value);
ParseResult { value, internal_error }
}
/// Panic if the result contains an internal error; otherwise, return the contained value.
pub fn unwrap(self) -> T {
assert_eq!(self.internal_error, None);
self.value
}
}
}
// ==============
// === Parser ===
// ==============
/// Enso parser. See the module documentation to learn more about how it works.
#[allow(missing_docs)]
#[derive(Debug)]
pub struct Parser {
pub macros: macros::resolver::MacroMap,
}
impl Parser {
/// Constructor.
pub fn new() -> Self {
let macros = macros::built_in::all();
Self { macros }
}
/// Main entry point.
pub fn run<'s>(&self, code: &'s str) -> syntax::Tree<'s> {
let tokens = lexer::run(code);
let resolver = macros::resolver::Resolver::new_statement();
let result = tokens.map(|tokens| resolver.run(&self.macros, tokens));
let value = result.value;
if let Some(error) = result.internal_error {
return value.with_error(format!("Internal error: {error}"));
}
value
}
}
impl Default for Parser {
fn default() -> Self {
Self::new()
}
}
// == Parsing helpers ==
/// Reinterpret an expression in a statement context (i.e. as a top level member of a block).
///
/// In statement context, an expression that has an assignment operator at its top level is
/// interpreted as a variable assignment or method definition.
fn expression_to_statement(mut tree: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
let mut left_offset = source::span::Offset::default();
if let Tree { variant: box Variant::Annotated(annotated),.. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::AnnotatedBuiltin(annotated),.. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::Documented(documented),.. } = &mut tree {
documented.expression = documented.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::TypeAnnotated(annotated), span } = tree {
let colon = annotated.operator;
let type_ = annotated.type_;
let variable = annotated.expression;
let mut tree = Tree::type_signature(variable, colon, type_);
tree.span.left_offset += span.left_offset;
return tree;
}
let tree_ = &mut tree;
let opr_app = match tree_ {
Tree { variant: box Variant::OprApp(opr_app), span } => {
left_offset += &span.left_offset;
opr_app
}
_ => return tree,
};
if let OprApp { lhs: Some(lhs), opr: Ok(opr), rhs } = opr_app && opr.properties.is_assignment() {
let (leftmost, args) = collect_arguments(lhs.clone());
if let Some(rhs) = rhs {
if let Variant::Ident(ident) = &*leftmost.variant && ident.token.variant.is_type {
// If the LHS is a type, this is a (destructuring) assignment.
let lhs = expression_to_pattern(mem::take(lhs));
let mut result = Tree::assignment(lhs, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
if args.is_empty() &&!is_body_block(rhs) {
// If the LHS has no arguments, and there is a RHS, and the RHS is not a body block,
// this is a variable assignment.
let mut result = Tree::assignment(leftmost, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
if is_qualified_name(&leftmost) {
// If this is not a variable assignment, and the leftmost leaf of the `App` tree is
// a qualified name, this is a function definition.
let mut result = Tree::function(leftmost, args, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
tree
}
fn is_qualified_name(tree: &syntax::Tree) -> bool {
use syntax::tree::*;
match &*tree.variant {
Variant::Ident(_) => true,
Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) })
if matches!(&*rhs.variant, Variant::Ident(_)) && opr.properties.is_dot() =>
is_qualified_name(lhs),
_ => false,
}
}
fn expression_to_type(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast,.. }) => expression_to_type(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_type(body)), close),
box Variant::OprApp(OprApp { lhs, opr, rhs }) =>
Tree::opr_app(lhs.map(expression_to_type), opr, rhs.map(expression_to_type)),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_type(func), expression_to_type(arg)),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn expression_to_pattern(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast,.. }) => expression_to_pattern(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_pattern(body)), close),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_pattern(func), expression_to_pattern(arg)),
box Variant::TypeAnnotated(TypeAnnotated { expression, operator, type_ }) =>
Tree::type_annotated(expression_to_pattern(expression), operator, type_),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn collect_arguments(tree: syntax::Tree) -> (syntax::Tree, Vec<syntax::tree::ArgumentDefinition>) {
let mut args = vec![];
let tree = unroll_arguments(tree, &mut args);
args.reverse();
(tree, args)
}
fn collect_arguments_inclusive(tree: syntax::Tree) -> Vec<syntax::tree::ArgumentDefinition> {
let mut args = vec![];
let first = unroll_arguments(tree, &mut args);
args.push(parse_argument_definition(first));
args.reverse();
args
}
fn unroll_arguments<'s>(
mut tree: syntax::Tree<'s>,
args: &mut Vec<syntax::tree::ArgumentDefinition<'s>>,
) -> syntax::Tree<'s> {
while let Some(arg) = parse_argument_application(&mut tree) {
args.push(arg);
}
tree
}
/// Try to parse the expression as an application of a function to an `ArgumentDefinition`. If it
/// matches, replace the expression with its LHS, and return the `ArgumentDefinition` node.
pub fn parse_argument_application<'s>(
expression: &'_ mut syntax::Tree<'s>,
) -> Option<syntax::tree::ArgumentDefinition<'s>> {
use syntax::tree::*;
match &mut expression.variant {
box Variant::App(App { func, arg }) => {
let arg = parse_argument_definition(arg.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(arg)
}
box Variant::NamedApp(NamedApp { func, open, name, equals, arg, close }) => {
let open = mem::take(open);
let close = mem::take(close);
let equals = equals.clone();
let pattern = Tree::ident(name.clone());
let open2 = default();
let suspension = default();
let close2 = default();
let type_ = default();
let default = Some(ArgumentDefault { equals, expression: arg.clone() });
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open,
open2,
pattern,
suspension,
default,
close2,
type_,
close,
})
}
box Variant::DefaultApp(DefaultApp { func, default: default_ }) => {
let pattern = Tree::ident(default_.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open: default(),
open2: default(),
suspension: default(),
pattern,
type_: default(),
close2: default(),
default: default(),
close: default(),
})
}
_ => None,
}
}
/// Interpret the expression as an element of an argument definition sequence.
pub fn parse_argument_definition(mut pattern: syntax::Tree) -> syntax::tree::ArgumentDefinition {
use syntax::tree::*;
let mut open1 = default();
let mut close1 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open1 = open;
close1 = close;
pattern = body;
}
let mut default_ = default();
if let Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_assignment() {
let left_offset = pattern.span.left_offset;
default_ = Some(ArgumentDefault { equals: opr.clone(), expression: rhs.clone() });
pattern = lhs.clone();
pattern.span.left_offset += left_offset;
}
let mut open2 = default();
let mut close2 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open2 = open;
close2 = close;
pattern = body;
}
let mut type__ = default();
if let box Variant::TypeAnnotated(TypeAnnotated { mut expression, operator, type_ }) =
pattern.variant
{
expression.span.left_offset += pattern.span.left_offset;
type__ = Some(ArgumentType { operator, type_ });
pattern = expression;
}
let mut suspension = default();
if let box Variant::TemplateFunction(TemplateFunction { mut ast,.. }) = pattern.variant {
ast.span.left_offset += pattern.span.left_offset;
pattern = ast;
}
if let Variant::UnaryOprApp(UnaryOprApp { opr, rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_suspension() {
let mut opr = opr.clone();
opr.left_offset += pattern.span.left_offset;
suspension = Some(opr);
pattern = rhs.clone();
}
let pattern = expression_to_pattern(pattern);
let open = open1;
let close = close1;
let type_ = type__;
ArgumentDefinition { open, open2, pattern, suspension, default: default_, close2, type_, close }
}
/// Return whether the expression is a body block.
fn is_body_block(expression: &syntax::tree::Tree<'_>) -> bool {
matches!(&*expression.variant, syntax::tree::Variant::BodyBlock {.. })
}
// ==================
// === Benchmarks ===
// ==================
#[cfg(test)]
mod benches {
use super::*;
extern crate test;
use test::Bencher;
#[bench]
fn bench_parsing_type_defs(bencher: &mut Bencher) {
let reps = 1_000;
let str = "type Option a b c\n".repeat(reps);
let parser = Parser::new();
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_blocks(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 10_000;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let mut indent = 0u32;
for _ in 0..lines {
// Indent:
// 1/8 chance of increasing.
// 1/8 chance of decreasing.
// 3/4 chance of leaving unchanged.
match rng.gen_range(0..8) {
0u32 => indent = indent.saturating_sub(1),
1 => indent += 1,
_ => (),
}
for _ in 0..indent {
str.push(' ');
}
// 1/4 chance of operator-block line syntax.
if rng.gen_range(0..4) == 0u32 {
str.push_str("* ");
}
str.push('x');
// Equal chance of the next line being interpreted as a body block or argument block
// line, if it is indented and doesn't match the operator-block syntax.
// The `=` operator is chosen to exercise the expression-to-statement conversion path.
if rng.gen() {
str.push_str(" =");
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_expressions(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 100;
let avg_group_len = 20;
let avg_groups_per_line = 20;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let normal = rand_distr::StandardNormal;
for _ in 0..lines {
let operators = ['=', '+', '-', '*', ':'];
let groups: f64 = normal.sample(&mut rng);
let groups = (groups * avg_groups_per_line as f64) as usize;
for _ in 0..groups {
let len: f64 = normal.sample(&mut rng);
let len = (len * avg_group_len as f64) as usize;
str.push('x');
for _ in 0..len {
let i = rng.gen_range(0..operators.len());
| //! in case these segments were found. For example, let's consider two macros: `if ... then ...`,
//! and `if ... then ... else ...`. In such a case, the macro registry will contain only one entry,
//! "if", and two sets of possible resolution paths: ["then"], and ["then", "else"], each associated
//! with the corresponding macro definition.
//! | random_line_split |
lib.rs | Enso are a very powerful mechanism and are used to transform group of tokens into
//! almost any statement. First, macros need to be discovered and registered. Currently, there is no
//! real macro discovery process, as there is no support for user-defined macros. Instead, there is
//! a set of hardcoded macros defined in the compiler.
//!
//! Each macro defines one or more segments. Every segment starts with a predefined token and can
//! contain any number of other tokens. For example, the macro `if... then... else...` contains
//! three segments. Macros can also accept prefix tokens, a set of tokens on the left of the first
//! segment. A good example is the lambda macro `... ->...`.
//!
//! In this step, a [`MacroMatchTree`] is built. Basically, it is a map from the possible next
//! segment name to information of what other segments are required and what is the macro definition
//! in case these segments were found. For example, let's consider two macros: `if... then...`,
//! and `if... then... else...`. In such a case, the macro registry will contain only one entry,
//! "if", and two sets of possible resolution paths: ["then"], and ["then", "else"], each associated
//! with the corresponding macro definition.
//!
//! # Splitting the token stream by the macro segments.
//! The input token stream is being iterated and is being split based on the segments of the
//! registered macros. For example, for the input `if a b then c d else e f`, the token stream will
//! be split into three segments, `a b`, `c d`, and `e f`, which will be associated with the
//! `if... then... else...` macro definition.
//!
//! The splitting process is hierarchical. It means that a new macro can start being resolved during
//! resolution of a parent macro. For example, `if if a then b then c else d` is a correct
//! expression. After finding the first `if` token, the token stream will be split. The next `if`
//! token starts a new token stream splitting. The first `then` token belongs to the nested macro,
//! however, as soon as the resolver sees the second `then` token, it will consider the nested macro
//! to be finished, and will come back to parent macro resolution.
//!
//! # Resolving right-hand-side patterns of macro segments.
//! In the next steps, each macro is being analyzed, started from the most nested ones. For each
//! macro, the [`Pattern`] of last segment is being run to check which tokens belong to that macro,
//! and which tokens should be transferred to parent macro definition. For example, consider the
//! following code `process (read file) content-> print content`. The `(...)` is a macro with two
//! sections `(` and `)`. Let's mark the token splitting with `[` and `]` characters. The previous
//! macro resolution steps would output such split of the token stream:
//! `process [(read file][) content[-> print content]]`. In this step, the most inner macro will be
//! analyzed first. The pattern of the last segment of the inner macro (`->`) defines that it
//! consumes all tokens, so all the tokens `print content` are left as they are. Now, the resolution
//! moves to the parent macro. Its last segment starts with the `)` token, which pattern defines
//! that it does not consume any tokens, so all of its current tokens (`content[-> print content]]`)
//! are popped to a parent definition, forming `process [(read file][)] content[-> print content]`.
//!
//! Please note, that root of the expression is considered a special macro as well. It is done for
//! the algorithm unification purposes.
//!
//! # Resolving left-hand-side patterns of macro segments.
//! In this step, each macro is being analyzed, started from the most nested ones. For each macro,
//! the [`Pattern`] of the macro prefix is being run to check which tokens belong to the prefix of
//! the macro (in case the macro defines the prefix). In the example above, the macro `->` defines
//! complex prefix rules: if the token on the left of the arrow used no space, then only a single
//! token will be consumed. As a result of this step, the following token split will occur:
//! `[process [(read file][)] [content-> print content]`, which is exactly what we wanted.
//!
//! # Resolving patterns of macro segments.
//! In this step, all macro segment patterns are being resolved and errors are reported in case it
//! was not possible. If tokens in a segment match the segment pattern, they are sent to the
//! operator precedence resolver for final transformation.
//!
//! # Operator precedence resolution.
//! Each token stream sent to the operator resolver is processed by a modified Shunting Yard
//! algorithm, which handles such situations as multiple operators placed next to each other,
//! multiple identifiers placed next to each other, and also takes spacing into consideration in
//! order to implement spacing-aware precedence rules. After all segments are resolved, the macro
//! is being treated as a single token in one of the segments of the parent macro, and is being
//! processed by the operator precedence resolver as well. In the end, a single [`syntax::Tree`] is
//! produced, containing the parsed expression.
#![recursion_limit = "256"]
// === Features ===
#![allow(incomplete_features)]
#![feature(let_chains)]
#![feature(allocator_api)]
#![feature(exact_size_is_empty)]
#![feature(test)]
#![feature(specialization)]
#![feature(if_let_guard)]
#![feature(box_patterns)]
#![feature(option_get_or_insert_default)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![allow(clippy::option_map_unit_fn)]
#![allow(clippy::precedence)]
#![allow(dead_code)]
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
use crate::prelude::*;
// ==============
// === Export ===
// ==============
pub mod lexer;
pub mod macros;
pub mod metadata;
pub mod serialization;
pub mod source;
pub mod syntax;
/// Popular utilities, imported by most modules of this crate.
pub mod prelude {
pub use enso_prelude::serde_reexports::*;
pub use enso_prelude::*;
pub use enso_reflect as reflect;
pub use enso_reflect::Reflect;
pub use enso_types::traits::*;
pub use enso_types::unit2::Bytes;
/// Wraps return value for functions whose implementations don't handle all cases yet. When the
/// parser is complete, this type will be eliminated.
pub type WipResult<T> = Result<T, String>;
/// Return type for functions that will only fail in case of a bug in the implementation.
#[derive(Debug, Default)]
pub struct ParseResult<T> {
/// The result of the operation. If `internal_error` is set, this is a best-effort value
/// that cannot be assumed to be accurate; otherwise, it should be correct.
pub value: T,
/// Internal error encountered while computing this result.
pub internal_error: Option<String>,
}
impl<T> ParseResult<T> {
/// Return a new [`ParseResult`] whose value is the result of applying the given function to
/// the input's value, and whose `internal_error` field is the same as the input.
pub fn map<U, F>(self, f: F) -> ParseResult<U>
where F: FnOnce(T) -> U {
let ParseResult { value, internal_error } = self;
let value = f(value);
ParseResult { value, internal_error }
}
/// Panic if the result contains an internal error; otherwise, return the contained value.
pub fn unwrap(self) -> T {
assert_eq!(self.internal_error, None);
self.value
}
}
}
// ==============
// === Parser ===
// ==============
/// Enso parser. See the module documentation to learn more about how it works.
#[allow(missing_docs)]
#[derive(Debug)]
pub struct Parser {
pub macros: macros::resolver::MacroMap,
}
impl Parser {
/// Constructor.
pub fn new() -> Self {
let macros = macros::built_in::all();
Self { macros }
}
/// Main entry point.
pub fn run<'s>(&self, code: &'s str) -> syntax::Tree<'s> {
let tokens = lexer::run(code);
let resolver = macros::resolver::Resolver::new_statement();
let result = tokens.map(|tokens| resolver.run(&self.macros, tokens));
let value = result.value;
if let Some(error) = result.internal_error {
return value.with_error(format!("Internal error: {error}"));
}
value
}
}
impl Default for Parser {
fn default() -> Self {
Self::new()
}
}
// == Parsing helpers ==
/// Reinterpret an expression in a statement context (i.e. as a top level member of a block).
///
/// In statement context, an expression that has an assignment operator at its top level is
/// interpreted as a variable assignment or method definition.
fn expression_to_statement(mut tree: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
let mut left_offset = source::span::Offset::default();
if let Tree { variant: box Variant::Annotated(annotated),.. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::AnnotatedBuiltin(annotated),.. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::Documented(documented),.. } = &mut tree {
documented.expression = documented.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::TypeAnnotated(annotated), span } = tree {
let colon = annotated.operator;
let type_ = annotated.type_;
let variable = annotated.expression;
let mut tree = Tree::type_signature(variable, colon, type_);
tree.span.left_offset += span.left_offset;
return tree;
}
let tree_ = &mut tree;
let opr_app = match tree_ {
Tree { variant: box Variant::OprApp(opr_app), span } => {
left_offset += &span.left_offset;
opr_app
}
_ => return tree,
};
if let OprApp { lhs: Some(lhs), opr: Ok(opr), rhs } = opr_app && opr.properties.is_assignment() {
let (leftmost, args) = collect_arguments(lhs.clone());
if let Some(rhs) = rhs {
if let Variant::Ident(ident) = &*leftmost.variant && ident.token.variant.is_type {
// If the LHS is a type, this is a (destructuring) assignment.
let lhs = expression_to_pattern(mem::take(lhs));
let mut result = Tree::assignment(lhs, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
if args.is_empty() &&!is_body_block(rhs) {
// If the LHS has no arguments, and there is a RHS, and the RHS is not a body block,
// this is a variable assignment.
let mut result = Tree::assignment(leftmost, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
if is_qualified_name(&leftmost) {
// If this is not a variable assignment, and the leftmost leaf of the `App` tree is
// a qualified name, this is a function definition.
let mut result = Tree::function(leftmost, args, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
tree
}
fn is_qualified_name(tree: &syntax::Tree) -> bool {
use syntax::tree::*;
match &*tree.variant {
Variant::Ident(_) => true,
Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) })
if matches!(&*rhs.variant, Variant::Ident(_)) && opr.properties.is_dot() =>
is_qualified_name(lhs),
_ => false,
}
}
fn expression_to_type(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast,.. }) => expression_to_type(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_type(body)), close),
box Variant::OprApp(OprApp { lhs, opr, rhs }) =>
Tree::opr_app(lhs.map(expression_to_type), opr, rhs.map(expression_to_type)),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_type(func), expression_to_type(arg)),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn expression_to_pattern(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast,.. }) => expression_to_pattern(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_pattern(body)), close),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_pattern(func), expression_to_pattern(arg)),
box Variant::TypeAnnotated(TypeAnnotated { expression, operator, type_ }) =>
Tree::type_annotated(expression_to_pattern(expression), operator, type_),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn collect_arguments(tree: syntax::Tree) -> (syntax::Tree, Vec<syntax::tree::ArgumentDefinition>) {
let mut args = vec![];
let tree = unroll_arguments(tree, &mut args);
args.reverse();
(tree, args)
}
fn collect_arguments_inclusive(tree: syntax::Tree) -> Vec<syntax::tree::ArgumentDefinition> {
let mut args = vec![];
let first = unroll_arguments(tree, &mut args);
args.push(parse_argument_definition(first));
args.reverse();
args
}
fn unroll_arguments<'s>(
mut tree: syntax::Tree<'s>,
args: &mut Vec<syntax::tree::ArgumentDefinition<'s>>,
) -> syntax::Tree<'s> {
while let Some(arg) = parse_argument_application(&mut tree) {
args.push(arg);
}
tree
}
/// Try to parse the expression as an application of a function to an `ArgumentDefinition`. If it
/// matches, replace the expression with its LHS, and return the `ArgumentDefinition` node.
pub fn parse_argument_application<'s>(
expression: &'_ mut syntax::Tree<'s>,
) -> Option<syntax::tree::ArgumentDefinition<'s>> {
use syntax::tree::*;
match &mut expression.variant {
box Variant::App(App { func, arg }) => {
let arg = parse_argument_definition(arg.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(arg)
}
box Variant::NamedApp(NamedApp { func, open, name, equals, arg, close }) => {
let open = mem::take(open);
let close = mem::take(close);
let equals = equals.clone();
let pattern = Tree::ident(name.clone());
let open2 = default();
let suspension = default();
let close2 = default();
let type_ = default();
let default = Some(ArgumentDefault { equals, expression: arg.clone() });
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open,
open2,
pattern,
suspension,
default,
close2,
type_,
close,
})
}
box Variant::DefaultApp(DefaultApp { func, default: default_ }) => {
let pattern = Tree::ident(default_.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open: default(),
open2: default(),
suspension: default(),
pattern,
type_: default(),
close2: default(),
default: default(),
close: default(),
})
}
_ => None,
}
}
/// Interpret the expression as an element of an argument definition sequence.
pub fn parse_argument_definition(mut pattern: syntax::Tree) -> syntax::tree::ArgumentDefinition {
use syntax::tree::*;
let mut open1 = default();
let mut close1 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open1 = open;
close1 = close;
pattern = body;
}
let mut default_ = default();
if let Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_assignment() {
let left_offset = pattern.span.left_offset;
default_ = Some(ArgumentDefault { equals: opr.clone(), expression: rhs.clone() });
pattern = lhs.clone();
pattern.span.left_offset += left_offset;
}
let mut open2 = default();
let mut close2 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open2 = open;
close2 = close;
pattern = body;
}
let mut type__ = default();
if let box Variant::TypeAnnotated(TypeAnnotated { mut expression, operator, type_ }) =
pattern.variant
{
expression.span.left_offset += pattern.span.left_offset;
type__ = Some(ArgumentType { operator, type_ });
pattern = expression;
}
let mut suspension = default();
if let box Variant::TemplateFunction(TemplateFunction { mut ast,.. }) = pattern.variant {
ast.span.left_offset += pattern.span.left_offset;
pattern = ast;
}
if let Variant::UnaryOprApp(UnaryOprApp { opr, rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_suspension() {
let mut opr = opr.clone();
opr.left_offset += pattern.span.left_offset;
suspension = Some(opr);
pattern = rhs.clone();
}
let pattern = expression_to_pattern(pattern);
let open = open1;
let close = close1;
let type_ = type__;
ArgumentDefinition { open, open2, pattern, suspension, default: default_, close2, type_, close }
}
/// Return whether the expression is a body block.
fn is_body_block(expression: &syntax::tree::Tree<'_>) -> bool {
matches!(&*expression.variant, syntax::tree::Variant::BodyBlock {.. })
}
// ==================
// === Benchmarks ===
// ==================
#[cfg(test)]
mod benches {
use super::*;
extern crate test;
use test::Bencher;
#[bench]
fn bench_parsing_type_defs(bencher: &mut Bencher) {
let reps = 1_000;
let str = "type Option a b c\n".repeat(reps);
let parser = Parser::new();
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_blocks(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 10_000;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let mut indent = 0u32;
for _ in 0..lines {
// Indent:
// 1/8 chance of increasing.
// 1/8 chance of decreasing.
// 3/4 chance of leaving unchanged.
match rng.gen_range(0..8) {
0u32 => indent = indent.saturating_sub(1),
1 => indent += 1,
_ => (),
}
for _ in 0..indent {
str.push(' ');
}
// 1/4 chance of operator-block line syntax.
if rng.gen_range(0..4) == 0u32 |
str.push('x');
// Equal chance of the next line being interpreted as a body block or argument block
// line, if it is indented and doesn't match the operator-block syntax.
// The `=` operator is chosen to exercise the expression-to-statement conversion path.
if rng.gen() {
str.push_str(" =");
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_expressions(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 100;
let avg_group_len = 20;
let avg_groups_per_line = 20;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let normal = rand_distr::StandardNormal;
for _ in 0..lines {
let operators = ['=', '+', '-', '*', ':'];
let groups: f64 = normal.sample(&mut rng);
let groups = (groups * avg_groups_per_line as f64) as usize;
for _ in 0..groups {
let len: f64 = normal.sample(&mut rng);
let len = (len * avg_group_len as f64) as usize;
str.push('x');
for _ in 0..len {
let i = rng.gen_range(0..operators.len());
| {
str.push_str("* ");
} | conditional_block |
lib.rs | Enso are a very powerful mechanism and are used to transform group of tokens into
//! almost any statement. First, macros need to be discovered and registered. Currently, there is no
//! real macro discovery process, as there is no support for user-defined macros. Instead, there is
//! a set of hardcoded macros defined in the compiler.
//!
//! Each macro defines one or more segments. Every segment starts with a predefined token and can
//! contain any number of other tokens. For example, the macro `if... then... else...` contains
//! three segments. Macros can also accept prefix tokens, a set of tokens on the left of the first
//! segment. A good example is the lambda macro `... ->...`.
//!
//! In this step, a [`MacroMatchTree`] is built. Basically, it is a map from the possible next
//! segment name to information of what other segments are required and what is the macro definition
//! in case these segments were found. For example, let's consider two macros: `if... then...`,
//! and `if... then... else...`. In such a case, the macro registry will contain only one entry,
//! "if", and two sets of possible resolution paths: ["then"], and ["then", "else"], each associated
//! with the corresponding macro definition.
//!
//! # Splitting the token stream by the macro segments.
//! The input token stream is being iterated and is being split based on the segments of the
//! registered macros. For example, for the input `if a b then c d else e f`, the token stream will
//! be split into three segments, `a b`, `c d`, and `e f`, which will be associated with the
//! `if... then... else...` macro definition.
//!
//! The splitting process is hierarchical. It means that a new macro can start being resolved during
//! resolution of a parent macro. For example, `if if a then b then c else d` is a correct
//! expression. After finding the first `if` token, the token stream will be split. The next `if`
//! token starts a new token stream splitting. The first `then` token belongs to the nested macro,
//! however, as soon as the resolver sees the second `then` token, it will consider the nested macro
//! to be finished, and will come back to parent macro resolution.
//!
//! # Resolving right-hand-side patterns of macro segments.
//! In the next steps, each macro is being analyzed, started from the most nested ones. For each
//! macro, the [`Pattern`] of last segment is being run to check which tokens belong to that macro,
//! and which tokens should be transferred to parent macro definition. For example, consider the
//! following code `process (read file) content-> print content`. The `(...)` is a macro with two
//! sections `(` and `)`. Let's mark the token splitting with `[` and `]` characters. The previous
//! macro resolution steps would output such split of the token stream:
//! `process [(read file][) content[-> print content]]`. In this step, the most inner macro will be
//! analyzed first. The pattern of the last segment of the inner macro (`->`) defines that it
//! consumes all tokens, so all the tokens `print content` are left as they are. Now, the resolution
//! moves to the parent macro. Its last segment starts with the `)` token, which pattern defines
//! that it does not consume any tokens, so all of its current tokens (`content[-> print content]]`)
//! are popped to a parent definition, forming `process [(read file][)] content[-> print content]`.
//!
//! Please note, that root of the expression is considered a special macro as well. It is done for
//! the algorithm unification purposes.
//!
//! # Resolving left-hand-side patterns of macro segments.
//! In this step, each macro is being analyzed, started from the most nested ones. For each macro,
//! the [`Pattern`] of the macro prefix is being run to check which tokens belong to the prefix of
//! the macro (in case the macro defines the prefix). In the example above, the macro `->` defines
//! complex prefix rules: if the token on the left of the arrow used no space, then only a single
//! token will be consumed. As a result of this step, the following token split will occur:
//! `[process [(read file][)] [content-> print content]`, which is exactly what we wanted.
//!
//! # Resolving patterns of macro segments.
//! In this step, all macro segment patterns are being resolved and errors are reported in case it
//! was not possible. If tokens in a segment match the segment pattern, they are sent to the
//! operator precedence resolver for final transformation.
//!
//! # Operator precedence resolution.
//! Each token stream sent to the operator resolver is processed by a modified Shunting Yard
//! algorithm, which handles such situations as multiple operators placed next to each other,
//! multiple identifiers placed next to each other, and also takes spacing into consideration in
//! order to implement spacing-aware precedence rules. After all segments are resolved, the macro
//! is being treated as a single token in one of the segments of the parent macro, and is being
//! processed by the operator precedence resolver as well. In the end, a single [`syntax::Tree`] is
//! produced, containing the parsed expression.
#![recursion_limit = "256"]
// === Features ===
#![allow(incomplete_features)]
#![feature(let_chains)]
#![feature(allocator_api)]
#![feature(exact_size_is_empty)]
#![feature(test)]
#![feature(specialization)]
#![feature(if_let_guard)]
#![feature(box_patterns)]
#![feature(option_get_or_insert_default)]
// === Standard Linter Configuration ===
#![deny(non_ascii_idents)]
#![warn(unsafe_code)]
#![allow(clippy::bool_to_int_with_if)]
#![allow(clippy::let_and_return)]
// === Non-Standard Linter Configuration ===
#![allow(clippy::option_map_unit_fn)]
#![allow(clippy::precedence)]
#![allow(dead_code)]
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
use crate::prelude::*;
// ==============
// === Export ===
// ==============
pub mod lexer;
pub mod macros;
pub mod metadata;
pub mod serialization;
pub mod source;
pub mod syntax;
/// Popular utilities, imported by most modules of this crate.
pub mod prelude {
pub use enso_prelude::serde_reexports::*;
pub use enso_prelude::*;
pub use enso_reflect as reflect;
pub use enso_reflect::Reflect;
pub use enso_types::traits::*;
pub use enso_types::unit2::Bytes;
/// Wraps return value for functions whose implementations don't handle all cases yet. When the
/// parser is complete, this type will be eliminated.
pub type WipResult<T> = Result<T, String>;
/// Return type for functions that will only fail in case of a bug in the implementation.
#[derive(Debug, Default)]
pub struct ParseResult<T> {
/// The result of the operation. If `internal_error` is set, this is a best-effort value
/// that cannot be assumed to be accurate; otherwise, it should be correct.
pub value: T,
/// Internal error encountered while computing this result.
pub internal_error: Option<String>,
}
impl<T> ParseResult<T> {
/// Return a new [`ParseResult`] whose value is the result of applying the given function to
/// the input's value, and whose `internal_error` field is the same as the input.
pub fn map<U, F>(self, f: F) -> ParseResult<U>
where F: FnOnce(T) -> U {
let ParseResult { value, internal_error } = self;
let value = f(value);
ParseResult { value, internal_error }
}
/// Panic if the result contains an internal error; otherwise, return the contained value.
pub fn unwrap(self) -> T {
assert_eq!(self.internal_error, None);
self.value
}
}
}
// ==============
// === Parser ===
// ==============
/// Enso parser. See the module documentation to learn more about how it works.
#[allow(missing_docs)]
#[derive(Debug)]
pub struct Parser {
pub macros: macros::resolver::MacroMap,
}
impl Parser {
/// Constructor.
pub fn new() -> Self {
let macros = macros::built_in::all();
Self { macros }
}
/// Main entry point.
pub fn run<'s>(&self, code: &'s str) -> syntax::Tree<'s> {
let tokens = lexer::run(code);
let resolver = macros::resolver::Resolver::new_statement();
let result = tokens.map(|tokens| resolver.run(&self.macros, tokens));
let value = result.value;
if let Some(error) = result.internal_error {
return value.with_error(format!("Internal error: {error}"));
}
value
}
}
impl Default for Parser {
fn default() -> Self {
Self::new()
}
}
// == Parsing helpers ==
/// Reinterpret an expression in a statement context (i.e. as a top level member of a block).
///
/// In statement context, an expression that has an assignment operator at its top level is
/// interpreted as a variable assignment or method definition.
fn expression_to_statement(mut tree: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
let mut left_offset = source::span::Offset::default();
if let Tree { variant: box Variant::Annotated(annotated),.. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::AnnotatedBuiltin(annotated),.. } = &mut tree {
annotated.expression = annotated.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::Documented(documented),.. } = &mut tree {
documented.expression = documented.expression.take().map(expression_to_statement);
return tree;
}
if let Tree { variant: box Variant::TypeAnnotated(annotated), span } = tree {
let colon = annotated.operator;
let type_ = annotated.type_;
let variable = annotated.expression;
let mut tree = Tree::type_signature(variable, colon, type_);
tree.span.left_offset += span.left_offset;
return tree;
}
let tree_ = &mut tree;
let opr_app = match tree_ {
Tree { variant: box Variant::OprApp(opr_app), span } => {
left_offset += &span.left_offset;
opr_app
}
_ => return tree,
};
if let OprApp { lhs: Some(lhs), opr: Ok(opr), rhs } = opr_app && opr.properties.is_assignment() {
let (leftmost, args) = collect_arguments(lhs.clone());
if let Some(rhs) = rhs {
if let Variant::Ident(ident) = &*leftmost.variant && ident.token.variant.is_type {
// If the LHS is a type, this is a (destructuring) assignment.
let lhs = expression_to_pattern(mem::take(lhs));
let mut result = Tree::assignment(lhs, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
if args.is_empty() &&!is_body_block(rhs) {
// If the LHS has no arguments, and there is a RHS, and the RHS is not a body block,
// this is a variable assignment.
let mut result = Tree::assignment(leftmost, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
if is_qualified_name(&leftmost) {
// If this is not a variable assignment, and the leftmost leaf of the `App` tree is
// a qualified name, this is a function definition.
let mut result = Tree::function(leftmost, args, mem::take(opr), mem::take(rhs));
result.span.left_offset += left_offset;
return result;
}
}
tree
}
fn is_qualified_name(tree: &syntax::Tree) -> bool {
use syntax::tree::*;
match &*tree.variant {
Variant::Ident(_) => true,
Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) })
if matches!(&*rhs.variant, Variant::Ident(_)) && opr.properties.is_dot() =>
is_qualified_name(lhs),
_ => false,
}
}
fn expression_to_type(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast,.. }) => expression_to_type(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_type(body)), close),
box Variant::OprApp(OprApp { lhs, opr, rhs }) =>
Tree::opr_app(lhs.map(expression_to_type), opr, rhs.map(expression_to_type)),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_type(func), expression_to_type(arg)),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn expression_to_pattern(mut input: syntax::Tree<'_>) -> syntax::Tree<'_> {
use syntax::tree::*;
if let Variant::Wildcard(wildcard) = &mut *input.variant {
wildcard.de_bruijn_index = None;
return input;
}
let mut out = match input.variant {
box Variant::TemplateFunction(TemplateFunction { ast,.. }) => expression_to_pattern(ast),
box Variant::Group(Group { open, body: Some(body), close }) =>
Tree::group(open, Some(expression_to_pattern(body)), close),
box Variant::App(App { func, arg }) =>
Tree::app(expression_to_pattern(func), expression_to_pattern(arg)),
box Variant::TypeAnnotated(TypeAnnotated { expression, operator, type_ }) =>
Tree::type_annotated(expression_to_pattern(expression), operator, type_),
_ => return input,
};
out.span.left_offset += input.span.left_offset;
out
}
fn collect_arguments(tree: syntax::Tree) -> (syntax::Tree, Vec<syntax::tree::ArgumentDefinition>) {
let mut args = vec![];
let tree = unroll_arguments(tree, &mut args);
args.reverse();
(tree, args)
}
fn collect_arguments_inclusive(tree: syntax::Tree) -> Vec<syntax::tree::ArgumentDefinition> |
fn unroll_arguments<'s>(
mut tree: syntax::Tree<'s>,
args: &mut Vec<syntax::tree::ArgumentDefinition<'s>>,
) -> syntax::Tree<'s> {
while let Some(arg) = parse_argument_application(&mut tree) {
args.push(arg);
}
tree
}
/// Try to parse the expression as an application of a function to an `ArgumentDefinition`. If it
/// matches, replace the expression with its LHS, and return the `ArgumentDefinition` node.
pub fn parse_argument_application<'s>(
expression: &'_ mut syntax::Tree<'s>,
) -> Option<syntax::tree::ArgumentDefinition<'s>> {
use syntax::tree::*;
match &mut expression.variant {
box Variant::App(App { func, arg }) => {
let arg = parse_argument_definition(arg.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(arg)
}
box Variant::NamedApp(NamedApp { func, open, name, equals, arg, close }) => {
let open = mem::take(open);
let close = mem::take(close);
let equals = equals.clone();
let pattern = Tree::ident(name.clone());
let open2 = default();
let suspension = default();
let close2 = default();
let type_ = default();
let default = Some(ArgumentDefault { equals, expression: arg.clone() });
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open,
open2,
pattern,
suspension,
default,
close2,
type_,
close,
})
}
box Variant::DefaultApp(DefaultApp { func, default: default_ }) => {
let pattern = Tree::ident(default_.clone());
func.span.left_offset += mem::take(&mut expression.span.left_offset);
*expression = func.clone();
Some(ArgumentDefinition {
open: default(),
open2: default(),
suspension: default(),
pattern,
type_: default(),
close2: default(),
default: default(),
close: default(),
})
}
_ => None,
}
}
/// Interpret the expression as an element of an argument definition sequence.
pub fn parse_argument_definition(mut pattern: syntax::Tree) -> syntax::tree::ArgumentDefinition {
use syntax::tree::*;
let mut open1 = default();
let mut close1 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open1 = open;
close1 = close;
pattern = body;
}
let mut default_ = default();
if let Variant::OprApp(OprApp { lhs: Some(lhs), opr: Ok(opr), rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_assignment() {
let left_offset = pattern.span.left_offset;
default_ = Some(ArgumentDefault { equals: opr.clone(), expression: rhs.clone() });
pattern = lhs.clone();
pattern.span.left_offset += left_offset;
}
let mut open2 = default();
let mut close2 = default();
if let box Variant::Group(Group { mut open, body: Some(mut body), close }) = pattern.variant {
*(if let Some(open) = open.as_mut() {
&mut open.left_offset
} else {
&mut body.span.left_offset
}) += pattern.span.left_offset;
open2 = open;
close2 = close;
pattern = body;
}
let mut type__ = default();
if let box Variant::TypeAnnotated(TypeAnnotated { mut expression, operator, type_ }) =
pattern.variant
{
expression.span.left_offset += pattern.span.left_offset;
type__ = Some(ArgumentType { operator, type_ });
pattern = expression;
}
let mut suspension = default();
if let box Variant::TemplateFunction(TemplateFunction { mut ast,.. }) = pattern.variant {
ast.span.left_offset += pattern.span.left_offset;
pattern = ast;
}
if let Variant::UnaryOprApp(UnaryOprApp { opr, rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_suspension() {
let mut opr = opr.clone();
opr.left_offset += pattern.span.left_offset;
suspension = Some(opr);
pattern = rhs.clone();
}
let pattern = expression_to_pattern(pattern);
let open = open1;
let close = close1;
let type_ = type__;
ArgumentDefinition { open, open2, pattern, suspension, default: default_, close2, type_, close }
}
/// Return whether the expression is a body block.
fn is_body_block(expression: &syntax::tree::Tree<'_>) -> bool {
matches!(&*expression.variant, syntax::tree::Variant::BodyBlock {.. })
}
// ==================
// === Benchmarks ===
// ==================
#[cfg(test)]
mod benches {
use super::*;
extern crate test;
use test::Bencher;
#[bench]
fn bench_parsing_type_defs(bencher: &mut Bencher) {
let reps = 1_000;
let str = "type Option a b c\n".repeat(reps);
let parser = Parser::new();
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_blocks(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 10_000;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let mut indent = 0u32;
for _ in 0..lines {
// Indent:
// 1/8 chance of increasing.
// 1/8 chance of decreasing.
// 3/4 chance of leaving unchanged.
match rng.gen_range(0..8) {
0u32 => indent = indent.saturating_sub(1),
1 => indent += 1,
_ => (),
}
for _ in 0..indent {
str.push(' ');
}
// 1/4 chance of operator-block line syntax.
if rng.gen_range(0..4) == 0u32 {
str.push_str("* ");
}
str.push('x');
// Equal chance of the next line being interpreted as a body block or argument block
// line, if it is indented and doesn't match the operator-block syntax.
// The `=` operator is chosen to exercise the expression-to-statement conversion path.
if rng.gen() {
str.push_str(" =");
}
str.push('\n');
}
let parser = Parser::new();
bencher.bytes = str.len() as u64;
bencher.iter(move || {
parser.run(&str);
});
}
#[bench]
fn bench_expressions(bencher: &mut Bencher) {
use rand::prelude::*;
use rand_chacha::ChaCha8Rng;
let lines = 100;
let avg_group_len = 20;
let avg_groups_per_line = 20;
let mut str = String::new();
let mut rng = ChaCha8Rng::seed_from_u64(0);
let normal = rand_distr::StandardNormal;
for _ in 0..lines {
let operators = ['=', '+', '-', '*', ':'];
let groups: f64 = normal.sample(&mut rng);
let groups = (groups * avg_groups_per_line as f64) as usize;
for _ in 0..groups {
let len: f64 = normal.sample(&mut rng);
let len = (len * avg_group_len as f64) as usize;
str.push('x');
for _ in 0..len {
let i = rng.gen_range(0..operators.len());
| {
let mut args = vec![];
let first = unroll_arguments(tree, &mut args);
args.push(parse_argument_definition(first));
args.reverse();
args
} | identifier_body |
lib.rs | use counters::flavors::{Counter, CounterType};
use counters::Counters;
use crossbeam_queue::ArrayQueue;
use log::Logger;
use packet::BoxPkt;
use packet::PacketPool;
use perf::Perf;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::sync::Arc;
// We preallocate space for these many graph nodes, of course it can grow beyond that,
// but the goal is as much as possible to pre-allocate space
const GRAPH_INIT_SZ: usize = 1024;
/// The size of the packet queue to each graph node. Beyond this, packets to that node
/// will get dropped
pub const VEC_SIZE: usize = 256;
pub trait Driver: Sync {
fn fd(&self) -> Option<i32>;
fn sendmsg(&mut self, pool: &mut dyn PacketPool, pkt: BoxPkt) -> usize;
fn recvmsg(&mut self, pool: &mut dyn PacketPool, headroom: usize) -> Option<BoxPkt>;
}
/// Every graph node feature/client needs to implement these methods/APIs
pub trait Gclient<T>: Send {
/// Make a clone() of the node, usually to be used in another thread. It is upto the
/// client to decide what should be cloned/copied and what should be shared. For example,
/// counters are always per thread and cant be shared, a new set of counters need to be
/// made per thread
fn clone(&self, _counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>>;
/// This API is called to hand over packets to the client for processing. Dispatch has
/// pop() API to get packets destined for the node, and push() API to push packets to
/// other graph nodes
fn dispatch(&mut self, _thread: usize, _vectors: &mut Dispatch);
/// This API is called when a node gets a message from control plane, like for example
/// to modify the nodes forwarding tables etc..
fn control_msg(&mut self, _thread: usize, _message: T) {}
}
/// This structure provides methods to get packets queued up for a node, and for
/// the node to queue up packets to other nodes
pub struct Dispatch<'d> {
node: usize,
pub pool: &'d mut dyn PacketPool,
vectors: &'d mut Vec<VecDeque<BoxPkt>>,
counters: &'d mut Vec<GnodeCntrs>,
nodes: &'d Vec<usize>,
work: bool,
wakeup: usize,
}
impl<'d> Dispatch<'d> {
/// Get one of the packets queued up for a node
pub fn pop(&mut self) -> Option<BoxPkt> |
/// Queue one packet to another node
pub fn push(&mut self, node: usize, pkt: BoxPkt) -> bool {
let node = self.nodes[node];
if self.vectors[node].capacity() >= 1 {
self.vectors[node].push_back(pkt);
if node <= self.node {
self.work = true;
self.wakeup = 0;
}
self.counters[node].enqed.incr();
true
} else {
self.counters[node].drops.incr();
false
}
}
/// Specify the time when this node has work again/needs to be scheduled again
/// wakeup of zero means it has work right now, non zero wakeup indicates time
/// in nanoseconds from now when the node has work
pub fn wakeup(&mut self, wakeup: usize) {
if self.work {
if wakeup < self.wakeup {
self.wakeup = wakeup;
}
} else {
self.work = true;
self.wakeup = wakeup;
}
}
}
/// The parameters each feature/client node needs to specify if it wants to be added
/// to the graph
pub struct GnodeInit {
/// A unique name for the node
pub name: String,
/// Names of all the nodes this node will have edges to (ie will send packets to)
pub next_names: Vec<String>,
/// A set of generic counters that tracks the node's enqueue/dequeue/drops etc..
pub cntrs: GnodeCntrs,
pub perf: Perf,
}
impl GnodeInit {
pub fn clone(&self, counters: &mut Counters) -> GnodeInit {
GnodeInit {
name: self.name.clone(),
next_names: self.next_names.clone(),
cntrs: GnodeCntrs::new(&self.name, counters),
perf: Perf::new(&self.name, counters),
}
}
}
pub struct GnodeCntrs {
enqed: Counter,
drops: Counter,
}
impl GnodeCntrs {
pub fn new(name: &str, counters: &mut Counters) -> GnodeCntrs {
let enqed = Counter::new(counters, name, CounterType::Pkts, "GraphEnq");
let drops = Counter::new(counters, name, CounterType::Error, "GraphDrop");
GnodeCntrs { enqed, drops }
}
}
// The Gnode structure holds the exact node feature/client object and some metadata
// associated with the client
struct Gnode<T> {
// The feature/client object
client: Box<dyn Gclient<T>>,
// Name of the feature/client
name: String,
// Names of all the nodes this node will have edges to (ie will send packets to)
next_names: Vec<String>,
// Node ids corresponding to the names in next_names
next_nodes: Vec<usize>,
}
impl<T> Gnode<T> {
fn new(client: Box<dyn Gclient<T>>, name: String, next_names: Vec<String>) -> Self {
Gnode {
client,
name,
next_names,
next_nodes: Vec::new(),
}
}
fn clone(&self, counters: &mut Counters, log: Arc<Logger>) -> Self {
Gnode {
client: self.client.clone(counters, log),
name: self.name.clone(),
next_names: self.next_names.clone(),
next_nodes: self.next_nodes.clone(),
}
}
}
// The Graph object, basically a collection of graph nodes and edges from node to node
// Usually there is one Graph per thread, the graphs in each thread are copies of each other
pub struct Graph<T> {
// The thread this graph belongs to
thread: usize,
// The graph nodes
nodes: Vec<Gnode<T>>,
// Graph node performance info
perf: Vec<Perf>,
// A per node packet queue, to hold packets from other nodes to this node
vectors: Vec<VecDeque<BoxPkt>>,
// Generic enq/deq/drop counters per node
counters: Vec<GnodeCntrs>,
// Each graph node has an index which is an offset into the nodes Vec in this structure.
// This hashmap provides a mapping from a graph node name to its index
indices: HashMap<String, usize>,
// Packet/Particle pool
pool: Box<dyn PacketPool>,
// Freed packets are queued here
queue: Arc<ArrayQueue<BoxPkt>>,
}
impl<T> Graph<T> {
/// A new graph is created with just one node in it, a Drop Node that just drops any packet
/// it receives.
pub fn new(
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
) -> Self {
let mut g = Graph {
thread,
nodes: Vec::with_capacity(GRAPH_INIT_SZ),
perf: Vec::with_capacity(GRAPH_INIT_SZ),
vectors: Vec::with_capacity(GRAPH_INIT_SZ),
counters: Vec::with_capacity(GRAPH_INIT_SZ),
indices: HashMap::with_capacity(GRAPH_INIT_SZ),
pool,
queue,
};
let init = GnodeInit {
name: names::DROP.to_string(),
next_names: vec![],
cntrs: GnodeCntrs::new(names::DROP, counters),
perf: Perf::new(names::DROP, counters),
};
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
g.add(Box::new(DropNode { count }), init);
g
}
/// Clone the entire graph. That relies on each graph node feature/client providing
/// an ability to clone() itself
pub fn clone(
&self,
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
log: Arc<Logger>,
) -> Self {
let mut nodes = Vec::with_capacity(GRAPH_INIT_SZ);
let mut perf = Vec::with_capacity(GRAPH_INIT_SZ);
let mut vectors = Vec::with_capacity(GRAPH_INIT_SZ);
let mut cntrs = Vec::with_capacity(GRAPH_INIT_SZ);
for n in self.nodes.iter() {
nodes.push(n.clone(counters, log.clone()));
perf.push(Perf::new(&n.name, counters));
vectors.push(VecDeque::with_capacity(VEC_SIZE));
cntrs.push(GnodeCntrs::new(&n.name, counters));
}
Graph {
thread,
nodes,
perf,
vectors,
counters: cntrs,
indices: self.indices.clone(),
pool,
queue,
}
}
/// Add a new feature/client node to the graph.
pub fn add(&mut self, client: Box<dyn Gclient<T>>, init: GnodeInit) {
let index = self.index(&init.name);
if index!= 0 {
return; // Gclient already registered
}
self.nodes
.push(Gnode::new(client, init.name.clone(), init.next_names));
self.perf.push(init.perf);
self.vectors.push(VecDeque::with_capacity(VEC_SIZE));
self.counters.push(init.cntrs);
let index = self.nodes.len() - 1; // 0 based index
self.indices.insert(init.name, index);
}
fn index(&self, name: &str) -> usize {
if let Some(&index) = self.indices.get(name) {
index
} else {
0
}
}
/// Any time a new node is added to the graph, there might be other nodes that have
/// specified this new node as their next node - so we have to resolve those names
/// to a proper node index. The finalize() will walk through all nodes and resolve
/// next_name to node index. This is typically called after a new node is added
pub fn finalize(&mut self) {
for n in 0..self.nodes.len() {
let node = &self.nodes[n];
for l in 0..node.next_names.len() {
let node = &self.nodes[n];
let index = self.index(&node.next_names[l]);
let node = &mut self.nodes[n];
if node.next_nodes.len() <= l {
node.next_nodes.resize(l + 1, 0);
}
node.next_nodes[l] = index;
}
}
}
// Run through all the nodes one single time, do whatever work is possible in that
// iteration, and return values which say if more work is pending and at what time
// the work has to be done
pub fn run(&mut self) -> (bool, usize) {
// First return all the free packets back to the pool
while let Ok(p) = self.queue.pop() {
self.pool.free(p);
}
let mut nsecs = std::usize::MAX;
let mut work = false;
for n in 0..self.nodes.len() {
let node = &mut self.nodes[n];
let client = &mut node.client;
let mut d = Dispatch {
node: n,
pool: &mut *self.pool,
vectors: &mut self.vectors,
counters: &mut self.counters,
nodes: &node.next_nodes,
work: false,
wakeup: std::usize::MAX,
};
self.perf[n].start();
client.dispatch(self.thread, &mut d);
self.perf[n].stop();
// Does client have more work pending, and when does it need to do that work?
if d.work {
work = true;
if d.wakeup < nsecs {
nsecs = d.wakeup;
}
}
}
(work, nsecs)
}
pub fn control_msg(&mut self, name: &str, message: T) -> bool {
let index = self.index(name);
if index == 0 {
false
} else {
self.nodes[index].client.control_msg(self.thread, message);
true
}
}
}
struct DropNode {
count: Counter,
}
impl<T> Gclient<T> for DropNode {
fn clone(&self, counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>> {
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
Box::new(DropNode { count })
}
fn dispatch(&mut self, _thread: usize, vectors: &mut Dispatch) {
while let Some(_) = vectors.pop() {
self.count.incr();
}
}
}
#[cfg(test)]
mod test;
| {
self.vectors[self.node].pop_front()
} | identifier_body |
lib.rs | use counters::flavors::{Counter, CounterType};
use counters::Counters;
use crossbeam_queue::ArrayQueue;
use log::Logger;
use packet::BoxPkt;
use packet::PacketPool;
use perf::Perf;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::sync::Arc;
// We preallocate space for these many graph nodes, of course it can grow beyond that,
// but the goal is as much as possible to pre-allocate space
const GRAPH_INIT_SZ: usize = 1024;
/// The size of the packet queue to each graph node. Beyond this, packets to that node
/// will get dropped
pub const VEC_SIZE: usize = 256;
pub trait Driver: Sync {
fn fd(&self) -> Option<i32>;
fn sendmsg(&mut self, pool: &mut dyn PacketPool, pkt: BoxPkt) -> usize;
fn recvmsg(&mut self, pool: &mut dyn PacketPool, headroom: usize) -> Option<BoxPkt>;
}
/// Every graph node feature/client needs to implement these methods/APIs
pub trait Gclient<T>: Send {
/// Make a clone() of the node, usually to be used in another thread. It is upto the
/// client to decide what should be cloned/copied and what should be shared. For example,
/// counters are always per thread and cant be shared, a new set of counters need to be
/// made per thread
fn clone(&self, _counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>>;
/// This API is called to hand over packets to the client for processing. Dispatch has
/// pop() API to get packets destined for the node, and push() API to push packets to
/// other graph nodes
fn dispatch(&mut self, _thread: usize, _vectors: &mut Dispatch);
/// This API is called when a node gets a message from control plane, like for example
/// to modify the nodes forwarding tables etc..
fn control_msg(&mut self, _thread: usize, _message: T) {}
}
/// This structure provides methods to get packets queued up for a node, and for
/// the node to queue up packets to other nodes
pub struct Dispatch<'d> {
node: usize,
pub pool: &'d mut dyn PacketPool,
vectors: &'d mut Vec<VecDeque<BoxPkt>>,
counters: &'d mut Vec<GnodeCntrs>,
nodes: &'d Vec<usize>,
work: bool,
wakeup: usize,
}
impl<'d> Dispatch<'d> {
/// Get one of the packets queued up for a node
pub fn pop(&mut self) -> Option<BoxPkt> {
self.vectors[self.node].pop_front()
}
/// Queue one packet to another node
pub fn push(&mut self, node: usize, pkt: BoxPkt) -> bool {
let node = self.nodes[node];
if self.vectors[node].capacity() >= 1 {
self.vectors[node].push_back(pkt);
if node <= self.node {
self.work = true;
self.wakeup = 0;
}
self.counters[node].enqed.incr();
true
} else {
self.counters[node].drops.incr();
false
}
}
/// Specify the time when this node has work again/needs to be scheduled again
/// wakeup of zero means it has work right now, non zero wakeup indicates time
/// in nanoseconds from now when the node has work
pub fn wakeup(&mut self, wakeup: usize) {
if self.work | else {
self.work = true;
self.wakeup = wakeup;
}
}
}
/// The parameters each feature/client node needs to specify if it wants to be added
/// to the graph
pub struct GnodeInit {
/// A unique name for the node
pub name: String,
/// Names of all the nodes this node will have edges to (ie will send packets to)
pub next_names: Vec<String>,
/// A set of generic counters that tracks the node's enqueue/dequeue/drops etc..
pub cntrs: GnodeCntrs,
pub perf: Perf,
}
impl GnodeInit {
pub fn clone(&self, counters: &mut Counters) -> GnodeInit {
GnodeInit {
name: self.name.clone(),
next_names: self.next_names.clone(),
cntrs: GnodeCntrs::new(&self.name, counters),
perf: Perf::new(&self.name, counters),
}
}
}
pub struct GnodeCntrs {
enqed: Counter,
drops: Counter,
}
impl GnodeCntrs {
pub fn new(name: &str, counters: &mut Counters) -> GnodeCntrs {
let enqed = Counter::new(counters, name, CounterType::Pkts, "GraphEnq");
let drops = Counter::new(counters, name, CounterType::Error, "GraphDrop");
GnodeCntrs { enqed, drops }
}
}
// The Gnode structure holds the exact node feature/client object and some metadata
// associated with the client
struct Gnode<T> {
// The feature/client object
client: Box<dyn Gclient<T>>,
// Name of the feature/client
name: String,
// Names of all the nodes this node will have edges to (ie will send packets to)
next_names: Vec<String>,
// Node ids corresponding to the names in next_names
next_nodes: Vec<usize>,
}
impl<T> Gnode<T> {
fn new(client: Box<dyn Gclient<T>>, name: String, next_names: Vec<String>) -> Self {
Gnode {
client,
name,
next_names,
next_nodes: Vec::new(),
}
}
fn clone(&self, counters: &mut Counters, log: Arc<Logger>) -> Self {
Gnode {
client: self.client.clone(counters, log),
name: self.name.clone(),
next_names: self.next_names.clone(),
next_nodes: self.next_nodes.clone(),
}
}
}
// The Graph object, basically a collection of graph nodes and edges from node to node
// Usually there is one Graph per thread, the graphs in each thread are copies of each other
pub struct Graph<T> {
// The thread this graph belongs to
thread: usize,
// The graph nodes
nodes: Vec<Gnode<T>>,
// Graph node performance info
perf: Vec<Perf>,
// A per node packet queue, to hold packets from other nodes to this node
vectors: Vec<VecDeque<BoxPkt>>,
// Generic enq/deq/drop counters per node
counters: Vec<GnodeCntrs>,
// Each graph node has an index which is an offset into the nodes Vec in this structure.
// This hashmap provides a mapping from a graph node name to its index
indices: HashMap<String, usize>,
// Packet/Particle pool
pool: Box<dyn PacketPool>,
// Freed packets are queued here
queue: Arc<ArrayQueue<BoxPkt>>,
}
impl<T> Graph<T> {
/// A new graph is created with just one node in it, a Drop Node that just drops any packet
/// it receives.
pub fn new(
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
) -> Self {
let mut g = Graph {
thread,
nodes: Vec::with_capacity(GRAPH_INIT_SZ),
perf: Vec::with_capacity(GRAPH_INIT_SZ),
vectors: Vec::with_capacity(GRAPH_INIT_SZ),
counters: Vec::with_capacity(GRAPH_INIT_SZ),
indices: HashMap::with_capacity(GRAPH_INIT_SZ),
pool,
queue,
};
let init = GnodeInit {
name: names::DROP.to_string(),
next_names: vec![],
cntrs: GnodeCntrs::new(names::DROP, counters),
perf: Perf::new(names::DROP, counters),
};
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
g.add(Box::new(DropNode { count }), init);
g
}
/// Clone the entire graph. That relies on each graph node feature/client providing
/// an ability to clone() itself
pub fn clone(
&self,
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
log: Arc<Logger>,
) -> Self {
let mut nodes = Vec::with_capacity(GRAPH_INIT_SZ);
let mut perf = Vec::with_capacity(GRAPH_INIT_SZ);
let mut vectors = Vec::with_capacity(GRAPH_INIT_SZ);
let mut cntrs = Vec::with_capacity(GRAPH_INIT_SZ);
for n in self.nodes.iter() {
nodes.push(n.clone(counters, log.clone()));
perf.push(Perf::new(&n.name, counters));
vectors.push(VecDeque::with_capacity(VEC_SIZE));
cntrs.push(GnodeCntrs::new(&n.name, counters));
}
Graph {
thread,
nodes,
perf,
vectors,
counters: cntrs,
indices: self.indices.clone(),
pool,
queue,
}
}
/// Add a new feature/client node to the graph.
pub fn add(&mut self, client: Box<dyn Gclient<T>>, init: GnodeInit) {
let index = self.index(&init.name);
if index!= 0 {
return; // Gclient already registered
}
self.nodes
.push(Gnode::new(client, init.name.clone(), init.next_names));
self.perf.push(init.perf);
self.vectors.push(VecDeque::with_capacity(VEC_SIZE));
self.counters.push(init.cntrs);
let index = self.nodes.len() - 1; // 0 based index
self.indices.insert(init.name, index);
}
fn index(&self, name: &str) -> usize {
if let Some(&index) = self.indices.get(name) {
index
} else {
0
}
}
/// Any time a new node is added to the graph, there might be other nodes that have
/// specified this new node as their next node - so we have to resolve those names
/// to a proper node index. The finalize() will walk through all nodes and resolve
/// next_name to node index. This is typically called after a new node is added
pub fn finalize(&mut self) {
for n in 0..self.nodes.len() {
let node = &self.nodes[n];
for l in 0..node.next_names.len() {
let node = &self.nodes[n];
let index = self.index(&node.next_names[l]);
let node = &mut self.nodes[n];
if node.next_nodes.len() <= l {
node.next_nodes.resize(l + 1, 0);
}
node.next_nodes[l] = index;
}
}
}
// Run through all the nodes one single time, do whatever work is possible in that
// iteration, and return values which say if more work is pending and at what time
// the work has to be done
pub fn run(&mut self) -> (bool, usize) {
// First return all the free packets back to the pool
while let Ok(p) = self.queue.pop() {
self.pool.free(p);
}
let mut nsecs = std::usize::MAX;
let mut work = false;
for n in 0..self.nodes.len() {
let node = &mut self.nodes[n];
let client = &mut node.client;
let mut d = Dispatch {
node: n,
pool: &mut *self.pool,
vectors: &mut self.vectors,
counters: &mut self.counters,
nodes: &node.next_nodes,
work: false,
wakeup: std::usize::MAX,
};
self.perf[n].start();
client.dispatch(self.thread, &mut d);
self.perf[n].stop();
// Does client have more work pending, and when does it need to do that work?
if d.work {
work = true;
if d.wakeup < nsecs {
nsecs = d.wakeup;
}
}
}
(work, nsecs)
}
pub fn control_msg(&mut self, name: &str, message: T) -> bool {
let index = self.index(name);
if index == 0 {
false
} else {
self.nodes[index].client.control_msg(self.thread, message);
true
}
}
}
struct DropNode {
count: Counter,
}
impl<T> Gclient<T> for DropNode {
fn clone(&self, counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>> {
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
Box::new(DropNode { count })
}
fn dispatch(&mut self, _thread: usize, vectors: &mut Dispatch) {
while let Some(_) = vectors.pop() {
self.count.incr();
}
}
}
#[cfg(test)]
mod test;
| {
if wakeup < self.wakeup {
self.wakeup = wakeup;
}
} | conditional_block |
lib.rs | use counters::flavors::{Counter, CounterType};
use counters::Counters;
use crossbeam_queue::ArrayQueue;
use log::Logger;
use packet::BoxPkt;
use packet::PacketPool;
use perf::Perf;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::sync::Arc;
// We preallocate space for these many graph nodes, of course it can grow beyond that,
// but the goal is as much as possible to pre-allocate space
const GRAPH_INIT_SZ: usize = 1024;
/// The size of the packet queue to each graph node. Beyond this, packets to that node
/// will get dropped
pub const VEC_SIZE: usize = 256;
pub trait Driver: Sync {
fn fd(&self) -> Option<i32>;
fn sendmsg(&mut self, pool: &mut dyn PacketPool, pkt: BoxPkt) -> usize;
fn recvmsg(&mut self, pool: &mut dyn PacketPool, headroom: usize) -> Option<BoxPkt>;
}
/// Every graph node feature/client needs to implement these methods/APIs
pub trait Gclient<T>: Send {
/// Make a clone() of the node, usually to be used in another thread. It is upto the
/// client to decide what should be cloned/copied and what should be shared. For example,
/// counters are always per thread and cant be shared, a new set of counters need to be
/// made per thread
fn clone(&self, _counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>>;
/// This API is called to hand over packets to the client for processing. Dispatch has
/// pop() API to get packets destined for the node, and push() API to push packets to
/// other graph nodes
fn dispatch(&mut self, _thread: usize, _vectors: &mut Dispatch);
/// This API is called when a node gets a message from control plane, like for example
/// to modify the nodes forwarding tables etc..
fn control_msg(&mut self, _thread: usize, _message: T) {}
}
/// This structure provides methods to get packets queued up for a node, and for
/// the node to queue up packets to other nodes
pub struct Dispatch<'d> {
node: usize,
pub pool: &'d mut dyn PacketPool,
vectors: &'d mut Vec<VecDeque<BoxPkt>>,
counters: &'d mut Vec<GnodeCntrs>,
nodes: &'d Vec<usize>,
work: bool,
wakeup: usize,
}
impl<'d> Dispatch<'d> {
/// Get one of the packets queued up for a node
pub fn pop(&mut self) -> Option<BoxPkt> {
self.vectors[self.node].pop_front()
}
/// Queue one packet to another node
pub fn push(&mut self, node: usize, pkt: BoxPkt) -> bool {
let node = self.nodes[node];
if self.vectors[node].capacity() >= 1 {
self.vectors[node].push_back(pkt);
if node <= self.node {
self.work = true;
self.wakeup = 0;
}
self.counters[node].enqed.incr();
true
} else {
self.counters[node].drops.incr();
false
}
}
/// Specify the time when this node has work again/needs to be scheduled again
/// wakeup of zero means it has work right now, non zero wakeup indicates time
/// in nanoseconds from now when the node has work
pub fn wakeup(&mut self, wakeup: usize) {
if self.work {
if wakeup < self.wakeup {
self.wakeup = wakeup;
}
} else {
self.work = true;
self.wakeup = wakeup;
}
}
}
/// The parameters each feature/client node needs to specify if it wants to be added
/// to the graph
pub struct GnodeInit {
/// A unique name for the node
pub name: String,
/// Names of all the nodes this node will have edges to (ie will send packets to)
pub next_names: Vec<String>,
/// A set of generic counters that tracks the node's enqueue/dequeue/drops etc..
pub cntrs: GnodeCntrs,
pub perf: Perf,
}
impl GnodeInit {
pub fn clone(&self, counters: &mut Counters) -> GnodeInit {
GnodeInit {
name: self.name.clone(),
next_names: self.next_names.clone(),
cntrs: GnodeCntrs::new(&self.name, counters),
perf: Perf::new(&self.name, counters),
}
}
}
pub struct GnodeCntrs {
enqed: Counter,
drops: Counter,
}
impl GnodeCntrs {
pub fn new(name: &str, counters: &mut Counters) -> GnodeCntrs {
let enqed = Counter::new(counters, name, CounterType::Pkts, "GraphEnq");
let drops = Counter::new(counters, name, CounterType::Error, "GraphDrop");
GnodeCntrs { enqed, drops }
}
}
// The Gnode structure holds the exact node feature/client object and some metadata
// associated with the client
struct Gnode<T> {
// The feature/client object
client: Box<dyn Gclient<T>>,
// Name of the feature/client
name: String,
// Names of all the nodes this node will have edges to (ie will send packets to)
next_names: Vec<String>,
// Node ids corresponding to the names in next_names
next_nodes: Vec<usize>,
}
impl<T> Gnode<T> {
fn new(client: Box<dyn Gclient<T>>, name: String, next_names: Vec<String>) -> Self {
Gnode {
client,
name,
next_names,
next_nodes: Vec::new(),
}
}
fn clone(&self, counters: &mut Counters, log: Arc<Logger>) -> Self {
Gnode {
client: self.client.clone(counters, log),
name: self.name.clone(),
next_names: self.next_names.clone(),
next_nodes: self.next_nodes.clone(),
}
}
}
// The Graph object, basically a collection of graph nodes and edges from node to node
// Usually there is one Graph per thread, the graphs in each thread are copies of each other
pub struct | <T> {
// The thread this graph belongs to
thread: usize,
// The graph nodes
nodes: Vec<Gnode<T>>,
// Graph node performance info
perf: Vec<Perf>,
// A per node packet queue, to hold packets from other nodes to this node
vectors: Vec<VecDeque<BoxPkt>>,
// Generic enq/deq/drop counters per node
counters: Vec<GnodeCntrs>,
// Each graph node has an index which is an offset into the nodes Vec in this structure.
// This hashmap provides a mapping from a graph node name to its index
indices: HashMap<String, usize>,
// Packet/Particle pool
pool: Box<dyn PacketPool>,
// Freed packets are queued here
queue: Arc<ArrayQueue<BoxPkt>>,
}
impl<T> Graph<T> {
/// A new graph is created with just one node in it, a Drop Node that just drops any packet
/// it receives.
pub fn new(
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
) -> Self {
let mut g = Graph {
thread,
nodes: Vec::with_capacity(GRAPH_INIT_SZ),
perf: Vec::with_capacity(GRAPH_INIT_SZ),
vectors: Vec::with_capacity(GRAPH_INIT_SZ),
counters: Vec::with_capacity(GRAPH_INIT_SZ),
indices: HashMap::with_capacity(GRAPH_INIT_SZ),
pool,
queue,
};
let init = GnodeInit {
name: names::DROP.to_string(),
next_names: vec![],
cntrs: GnodeCntrs::new(names::DROP, counters),
perf: Perf::new(names::DROP, counters),
};
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
g.add(Box::new(DropNode { count }), init);
g
}
/// Clone the entire graph. That relies on each graph node feature/client providing
/// an ability to clone() itself
pub fn clone(
&self,
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
log: Arc<Logger>,
) -> Self {
let mut nodes = Vec::with_capacity(GRAPH_INIT_SZ);
let mut perf = Vec::with_capacity(GRAPH_INIT_SZ);
let mut vectors = Vec::with_capacity(GRAPH_INIT_SZ);
let mut cntrs = Vec::with_capacity(GRAPH_INIT_SZ);
for n in self.nodes.iter() {
nodes.push(n.clone(counters, log.clone()));
perf.push(Perf::new(&n.name, counters));
vectors.push(VecDeque::with_capacity(VEC_SIZE));
cntrs.push(GnodeCntrs::new(&n.name, counters));
}
Graph {
thread,
nodes,
perf,
vectors,
counters: cntrs,
indices: self.indices.clone(),
pool,
queue,
}
}
/// Add a new feature/client node to the graph.
pub fn add(&mut self, client: Box<dyn Gclient<T>>, init: GnodeInit) {
let index = self.index(&init.name);
if index!= 0 {
return; // Gclient already registered
}
self.nodes
.push(Gnode::new(client, init.name.clone(), init.next_names));
self.perf.push(init.perf);
self.vectors.push(VecDeque::with_capacity(VEC_SIZE));
self.counters.push(init.cntrs);
let index = self.nodes.len() - 1; // 0 based index
self.indices.insert(init.name, index);
}
fn index(&self, name: &str) -> usize {
if let Some(&index) = self.indices.get(name) {
index
} else {
0
}
}
/// Any time a new node is added to the graph, there might be other nodes that have
/// specified this new node as their next node - so we have to resolve those names
/// to a proper node index. The finalize() will walk through all nodes and resolve
/// next_name to node index. This is typically called after a new node is added
pub fn finalize(&mut self) {
for n in 0..self.nodes.len() {
let node = &self.nodes[n];
for l in 0..node.next_names.len() {
let node = &self.nodes[n];
let index = self.index(&node.next_names[l]);
let node = &mut self.nodes[n];
if node.next_nodes.len() <= l {
node.next_nodes.resize(l + 1, 0);
}
node.next_nodes[l] = index;
}
}
}
// Run through all the nodes one single time, do whatever work is possible in that
// iteration, and return values which say if more work is pending and at what time
// the work has to be done
pub fn run(&mut self) -> (bool, usize) {
// First return all the free packets back to the pool
while let Ok(p) = self.queue.pop() {
self.pool.free(p);
}
let mut nsecs = std::usize::MAX;
let mut work = false;
for n in 0..self.nodes.len() {
let node = &mut self.nodes[n];
let client = &mut node.client;
let mut d = Dispatch {
node: n,
pool: &mut *self.pool,
vectors: &mut self.vectors,
counters: &mut self.counters,
nodes: &node.next_nodes,
work: false,
wakeup: std::usize::MAX,
};
self.perf[n].start();
client.dispatch(self.thread, &mut d);
self.perf[n].stop();
// Does client have more work pending, and when does it need to do that work?
if d.work {
work = true;
if d.wakeup < nsecs {
nsecs = d.wakeup;
}
}
}
(work, nsecs)
}
pub fn control_msg(&mut self, name: &str, message: T) -> bool {
let index = self.index(name);
if index == 0 {
false
} else {
self.nodes[index].client.control_msg(self.thread, message);
true
}
}
}
struct DropNode {
count: Counter,
}
impl<T> Gclient<T> for DropNode {
fn clone(&self, counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>> {
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
Box::new(DropNode { count })
}
fn dispatch(&mut self, _thread: usize, vectors: &mut Dispatch) {
while let Some(_) = vectors.pop() {
self.count.incr();
}
}
}
#[cfg(test)]
mod test;
| Graph | identifier_name |
lib.rs | use counters::flavors::{Counter, CounterType};
use counters::Counters;
use crossbeam_queue::ArrayQueue;
use log::Logger;
use packet::BoxPkt;
use packet::PacketPool;
use perf::Perf;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::sync::Arc;
// We preallocate space for these many graph nodes, of course it can grow beyond that,
// but the goal is as much as possible to pre-allocate space
const GRAPH_INIT_SZ: usize = 1024;
/// The size of the packet queue to each graph node. Beyond this, packets to that node
/// will get dropped
pub const VEC_SIZE: usize = 256;
pub trait Driver: Sync {
fn fd(&self) -> Option<i32>;
fn sendmsg(&mut self, pool: &mut dyn PacketPool, pkt: BoxPkt) -> usize;
fn recvmsg(&mut self, pool: &mut dyn PacketPool, headroom: usize) -> Option<BoxPkt>;
}
/// Every graph node feature/client needs to implement these methods/APIs
pub trait Gclient<T>: Send {
/// Make a clone() of the node, usually to be used in another thread. It is upto the
/// client to decide what should be cloned/copied and what should be shared. For example,
/// counters are always per thread and cant be shared, a new set of counters need to be
/// made per thread
fn clone(&self, _counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>>;
/// This API is called to hand over packets to the client for processing. Dispatch has
/// pop() API to get packets destined for the node, and push() API to push packets to
/// other graph nodes
fn dispatch(&mut self, _thread: usize, _vectors: &mut Dispatch);
/// This API is called when a node gets a message from control plane, like for example
/// to modify the nodes forwarding tables etc..
fn control_msg(&mut self, _thread: usize, _message: T) {}
}
/// This structure provides methods to get packets queued up for a node, and for
/// the node to queue up packets to other nodes
pub struct Dispatch<'d> {
node: usize,
pub pool: &'d mut dyn PacketPool,
vectors: &'d mut Vec<VecDeque<BoxPkt>>,
counters: &'d mut Vec<GnodeCntrs>,
nodes: &'d Vec<usize>,
work: bool,
wakeup: usize,
}
impl<'d> Dispatch<'d> {
/// Get one of the packets queued up for a node
pub fn pop(&mut self) -> Option<BoxPkt> {
self.vectors[self.node].pop_front()
}
/// Queue one packet to another node
pub fn push(&mut self, node: usize, pkt: BoxPkt) -> bool {
let node = self.nodes[node];
if self.vectors[node].capacity() >= 1 {
self.vectors[node].push_back(pkt);
if node <= self.node {
self.work = true;
self.wakeup = 0;
}
self.counters[node].enqed.incr();
true
} else {
self.counters[node].drops.incr();
false
}
}
/// Specify the time when this node has work again/needs to be scheduled again
/// wakeup of zero means it has work right now, non zero wakeup indicates time
/// in nanoseconds from now when the node has work
pub fn wakeup(&mut self, wakeup: usize) {
if self.work {
if wakeup < self.wakeup {
self.wakeup = wakeup;
}
} else {
self.work = true;
self.wakeup = wakeup;
}
}
}
/// The parameters each feature/client node needs to specify if it wants to be added
/// to the graph
pub struct GnodeInit {
/// A unique name for the node
pub name: String,
/// Names of all the nodes this node will have edges to (ie will send packets to)
pub next_names: Vec<String>,
/// A set of generic counters that tracks the node's enqueue/dequeue/drops etc..
pub cntrs: GnodeCntrs,
pub perf: Perf,
}
impl GnodeInit {
pub fn clone(&self, counters: &mut Counters) -> GnodeInit {
GnodeInit {
name: self.name.clone(),
next_names: self.next_names.clone(),
cntrs: GnodeCntrs::new(&self.name, counters),
perf: Perf::new(&self.name, counters),
}
}
}
pub struct GnodeCntrs {
enqed: Counter,
drops: Counter,
}
impl GnodeCntrs {
pub fn new(name: &str, counters: &mut Counters) -> GnodeCntrs {
let enqed = Counter::new(counters, name, CounterType::Pkts, "GraphEnq");
let drops = Counter::new(counters, name, CounterType::Error, "GraphDrop");
GnodeCntrs { enqed, drops }
}
}
// The Gnode structure holds the exact node feature/client object and some metadata
// associated with the client
struct Gnode<T> {
// The feature/client object
client: Box<dyn Gclient<T>>,
// Name of the feature/client
name: String,
// Names of all the nodes this node will have edges to (ie will send packets to)
next_names: Vec<String>,
// Node ids corresponding to the names in next_names
next_nodes: Vec<usize>,
}
impl<T> Gnode<T> {
fn new(client: Box<dyn Gclient<T>>, name: String, next_names: Vec<String>) -> Self {
Gnode {
client,
name,
next_names, | next_nodes: Vec::new(),
}
}
fn clone(&self, counters: &mut Counters, log: Arc<Logger>) -> Self {
Gnode {
client: self.client.clone(counters, log),
name: self.name.clone(),
next_names: self.next_names.clone(),
next_nodes: self.next_nodes.clone(),
}
}
}
// The Graph object, basically a collection of graph nodes and edges from node to node
// Usually there is one Graph per thread, the graphs in each thread are copies of each other
pub struct Graph<T> {
// The thread this graph belongs to
thread: usize,
// The graph nodes
nodes: Vec<Gnode<T>>,
// Graph node performance info
perf: Vec<Perf>,
// A per node packet queue, to hold packets from other nodes to this node
vectors: Vec<VecDeque<BoxPkt>>,
// Generic enq/deq/drop counters per node
counters: Vec<GnodeCntrs>,
// Each graph node has an index which is an offset into the nodes Vec in this structure.
// This hashmap provides a mapping from a graph node name to its index
indices: HashMap<String, usize>,
// Packet/Particle pool
pool: Box<dyn PacketPool>,
// Freed packets are queued here
queue: Arc<ArrayQueue<BoxPkt>>,
}
impl<T> Graph<T> {
/// A new graph is created with just one node in it, a Drop Node that just drops any packet
/// it receives.
pub fn new(
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
) -> Self {
let mut g = Graph {
thread,
nodes: Vec::with_capacity(GRAPH_INIT_SZ),
perf: Vec::with_capacity(GRAPH_INIT_SZ),
vectors: Vec::with_capacity(GRAPH_INIT_SZ),
counters: Vec::with_capacity(GRAPH_INIT_SZ),
indices: HashMap::with_capacity(GRAPH_INIT_SZ),
pool,
queue,
};
let init = GnodeInit {
name: names::DROP.to_string(),
next_names: vec![],
cntrs: GnodeCntrs::new(names::DROP, counters),
perf: Perf::new(names::DROP, counters),
};
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
g.add(Box::new(DropNode { count }), init);
g
}
/// Clone the entire graph. That relies on each graph node feature/client providing
/// an ability to clone() itself
pub fn clone(
&self,
thread: usize,
pool: Box<dyn PacketPool>,
queue: Arc<ArrayQueue<BoxPkt>>,
counters: &mut Counters,
log: Arc<Logger>,
) -> Self {
let mut nodes = Vec::with_capacity(GRAPH_INIT_SZ);
let mut perf = Vec::with_capacity(GRAPH_INIT_SZ);
let mut vectors = Vec::with_capacity(GRAPH_INIT_SZ);
let mut cntrs = Vec::with_capacity(GRAPH_INIT_SZ);
for n in self.nodes.iter() {
nodes.push(n.clone(counters, log.clone()));
perf.push(Perf::new(&n.name, counters));
vectors.push(VecDeque::with_capacity(VEC_SIZE));
cntrs.push(GnodeCntrs::new(&n.name, counters));
}
Graph {
thread,
nodes,
perf,
vectors,
counters: cntrs,
indices: self.indices.clone(),
pool,
queue,
}
}
/// Add a new feature/client node to the graph.
pub fn add(&mut self, client: Box<dyn Gclient<T>>, init: GnodeInit) {
let index = self.index(&init.name);
if index!= 0 {
return; // Gclient already registered
}
self.nodes
.push(Gnode::new(client, init.name.clone(), init.next_names));
self.perf.push(init.perf);
self.vectors.push(VecDeque::with_capacity(VEC_SIZE));
self.counters.push(init.cntrs);
let index = self.nodes.len() - 1; // 0 based index
self.indices.insert(init.name, index);
}
fn index(&self, name: &str) -> usize {
if let Some(&index) = self.indices.get(name) {
index
} else {
0
}
}
/// Any time a new node is added to the graph, there might be other nodes that have
/// specified this new node as their next node - so we have to resolve those names
/// to a proper node index. The finalize() will walk through all nodes and resolve
/// next_name to node index. This is typically called after a new node is added
pub fn finalize(&mut self) {
for n in 0..self.nodes.len() {
let node = &self.nodes[n];
for l in 0..node.next_names.len() {
let node = &self.nodes[n];
let index = self.index(&node.next_names[l]);
let node = &mut self.nodes[n];
if node.next_nodes.len() <= l {
node.next_nodes.resize(l + 1, 0);
}
node.next_nodes[l] = index;
}
}
}
// Run through all the nodes one single time, do whatever work is possible in that
// iteration, and return values which say if more work is pending and at what time
// the work has to be done
pub fn run(&mut self) -> (bool, usize) {
// First return all the free packets back to the pool
while let Ok(p) = self.queue.pop() {
self.pool.free(p);
}
let mut nsecs = std::usize::MAX;
let mut work = false;
for n in 0..self.nodes.len() {
let node = &mut self.nodes[n];
let client = &mut node.client;
let mut d = Dispatch {
node: n,
pool: &mut *self.pool,
vectors: &mut self.vectors,
counters: &mut self.counters,
nodes: &node.next_nodes,
work: false,
wakeup: std::usize::MAX,
};
self.perf[n].start();
client.dispatch(self.thread, &mut d);
self.perf[n].stop();
// Does client have more work pending, and when does it need to do that work?
if d.work {
work = true;
if d.wakeup < nsecs {
nsecs = d.wakeup;
}
}
}
(work, nsecs)
}
pub fn control_msg(&mut self, name: &str, message: T) -> bool {
let index = self.index(name);
if index == 0 {
false
} else {
self.nodes[index].client.control_msg(self.thread, message);
true
}
}
}
struct DropNode {
count: Counter,
}
impl<T> Gclient<T> for DropNode {
fn clone(&self, counters: &mut Counters, _log: Arc<Logger>) -> Box<dyn Gclient<T>> {
let count = Counter::new(counters, names::DROP, CounterType::Pkts, "count");
Box::new(DropNode { count })
}
fn dispatch(&mut self, _thread: usize, vectors: &mut Dispatch) {
while let Some(_) = vectors.pop() {
self.count.incr();
}
}
}
#[cfg(test)]
mod test; | random_line_split |
|
claim_name.rs | use std::{ops::Deref, sync::Arc};
use aho_corasick::{AhoCorasick, AhoCorasickBuilder};
use bathbot_macros::SlashCommand;
use bathbot_model::{
rkyv_util::time::{DateRkyv, DateTimeRkyv},
rosu_v2::user::{ArchivedUser, User, UserHighestRank as UserHighestRankRkyv, UserStatistics},
};
use bathbot_util::{constants::OSU_API_ISSUE, MessageBuilder};
use eyre::{Report, Result};
use futures::{future, stream::FuturesUnordered, TryStreamExt};
use once_cell::sync::OnceCell;
use rkyv::{
with::{DeserializeWith, Map},
Archived, Infallible,
};
use rosu_v2::prelude::{CountryCode, GameMode, OsuError, UserHighestRank, Username};
use time::{OffsetDateTime, Time};
use twilight_interactions::command::{CommandModel, CreateCommand};
use crate::{
core::Context,
embeds::ClaimNameEmbed,
embeds::EmbedData,
manager::redis::{osu::UserArgs, RedisData},
util::{interaction::InteractionCommand, InteractionCommandExt},
};
#[derive(CommandModel, CreateCommand, SlashCommand)]
#[command(
name = "claimname",
desc = "Check how much longer to wait until a name is up for grabs",
help = "If a player has not signed in for at least 6 months and has no plays,\
their username may be claimed.\n\
If that player does have any plays across all game modes, \
a [non-linear function](https://www.desmos.com/calculator/b89siyv9j8) is used to calculate \
how much extra time is added to those 6 months.\n\
This is to prevent people from stealing the usernames of active or recently retired players."
)]
pub struct ClaimName {
#[command(desc = "Specify a username")]
name: String,
}
async fn slash_claimname(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> {
let ClaimName { name } = ClaimName::from_interaction(command.input_data())?;
let content = if name.chars().count() > 15 {
Some("Names can have at most 15 characters so your name won't be accepted".to_owned())
} else if let Some(c) = name
.chars()
.find(|c|!matches!(c, 'A'..='Z' | 'a'..='z' | '0'..='9' | '-' | '[' | ']' | '_' |''))
{
Some(format!(
"`{c}` is an invalid character for usernames so `{name}` won't be accepted"
))
} else if name.len() < 3 {
Some(format!(
"Names must be at least 3 characters long so `{name}` won't be accepted"
))
} else if name.contains('_') && name.contains(' ') {
Some(format!(
"Names may contains underscores or spaces but not both \
so `{name}` won't be accepted"
))
} else if name.starts_with(' ') || name.ends_with(' ') {
Some(format!(
"Names can't start or end with spaces so `{name}` won't be accepted"
))
} else {
None
};
if let Some(content) = content {
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
let user_id = match UserArgs::username(&ctx, &name).await {
UserArgs::Args(args) => args.user_id,
UserArgs::User { user,.. } => user.user_id,
UserArgs::Err(OsuError::NotFound) => {
let content = if ClaimNameValidator::is_valid(&name) {
format!("User `{name}` was not found, the name should be available to claim")
} else {
format!("`{name}` does not seem to be taken but it likely won't be accepted")
};
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
UserArgs::Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let args = [
GameMode::Osu,
GameMode::Taiko,
GameMode::Catch,
GameMode::Mania,
]
.map(|mode| UserArgs::user_id(user_id).mode(mode));
let user_fut = args
.into_iter()
.map(|args| ctx.redis().osu_user(args))
.collect::<FuturesUnordered<_>>()
.try_fold(None, |user: Option<ClaimNameUser>, next| match user {
Some(mut user) => {
let next_stats = next.stats();
match user.statistics {
Some(ref mut accum) => accum.playcount += next_stats.playcount(),
None => user.statistics = Some(next_stats.to_owned()),
}
let (next_highest_rank, next_last_visit) = match next {
RedisData::Original(next) => {
let rank = next.highest_rank;
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| count.start_date.with_time(Time::MIDNIGHT).assume_utc());
let last_visit = match (next.last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
RedisData::Archive(next) => {
let next: &Archived<User> = &next;
let rank = Map::<UserHighestRankRkyv>::deserialize_with(
&next.highest_rank,
&mut Infallible,
)
.unwrap();
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| {
DateRkyv::deserialize_with(&count.start_date, &mut Infallible)
.unwrap()
.with_time(Time::MIDNIGHT)
.assume_utc()
});
let last_visit = next.last_visit.as_ref().map(|time| {
DateTimeRkyv::deserialize_with(time, &mut Infallible).unwrap()
});
let last_visit = match (last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
};
match (user.highest_rank.as_mut(), next_highest_rank) {
(Some(curr), Some(next)) if curr.rank > next.rank => *curr = next,
(None, next @ Some(_)) => user.highest_rank = next,
_ => {}
}
match (user.last_visit.as_mut(), next_last_visit) {
(Some(curr), Some(next)) if *curr < next => *curr = next,
(None, next @ Some(_)) => user.last_visit = next,
_ => {}
}
future::ready(Ok(Some(user)))
}
None => future::ready(Ok(Some(ClaimNameUser::from(next)))),
});
let user = match user_fut.await {
Ok(user) => user.unwrap(),
Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let embed = ClaimNameEmbed::new(&user, &name).build();
let builder = MessageBuilder::new().embed(embed);
command.update(&ctx, builder).await?;
Ok(())
}
pub struct ClaimNameUser {
pub avatar_url: Box<str>,
pub country_code: CountryCode,
pub has_badges: bool,
pub has_ranked_mapsets: bool,
pub highest_rank: Option<UserHighestRank>,
pub last_visit: Option<OffsetDateTime>,
pub statistics: Option<UserStatistics>,
pub username: Username,
pub user_id: u32,
}
impl From<User> for ClaimNameUser {
#[inline]
fn from(user: User) -> Self {
Self {
avatar_url: user.avatar_url,
country_code: user.country_code,
has_badges:!user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: user.highest_rank,
last_visit: user.last_visit,
statistics: user.statistics.map(UserStatistics::from),
username: user.username,
user_id: user.user_id,
}
}
}
impl From<&ArchivedUser> for ClaimNameUser {
#[inline]
fn from(user: &ArchivedUser) -> Self {
Self {
avatar_url: user.avatar_url.as_ref().into(),
country_code: user.country_code.as_str().into(),
has_badges:!user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: Map::<UserHighestRankRkyv>::deserialize_with(
&user.highest_rank,
&mut Infallible,
)
.unwrap(),
last_visit: Map::<DateTimeRkyv>::deserialize_with(&user.last_visit, &mut Infallible)
.unwrap(),
statistics: user.statistics.as_ref().cloned(),
username: user.username.as_str().into(),
user_id: user.user_id,
}
}
}
impl From<RedisData<User>> for ClaimNameUser {
#[inline]
fn from(user: RedisData<User>) -> Self |
}
pub struct ClaimNameValidator;
impl ClaimNameValidator {
pub fn is_valid(prefix: &str) -> bool {
!VALIDATOR
.get_or_init(|| {
let needles = [
"qfqqz",
"dppljf{",
"difbu",
"ojhhfs",
"mpmj",
"gvdl",
"ejmep",
"gbhhpu",
"dvou",
"tijhfupsb",
"qpso",
"cbodip",
"qfojt",
"wbhjob",
"qvttz",
"ejdl",
"dpdl",
"brvjmb",
"ijumfs",
"ibdl",
"tibwju",
"gsjfoepl",
]
.into_iter()
.map(String::from)
.map(|mut needle| {
unsafe { needle.as_bytes_mut() }
.iter_mut()
.for_each(|byte| *byte -= 1);
needle
});
AhoCorasickBuilder::new()
.ascii_case_insensitive(true)
.dfa(true)
.build_with_size(needles)
.unwrap()
})
.is_match(prefix)
}
}
static VALIDATOR: OnceCell<AhoCorasick<u16>> = OnceCell::new();
| {
match user {
RedisData::Original(user) => Self::from(user),
RedisData::Archive(user) => Self::from(user.deref()),
}
} | identifier_body |
claim_name.rs | use std::{ops::Deref, sync::Arc};
use aho_corasick::{AhoCorasick, AhoCorasickBuilder};
use bathbot_macros::SlashCommand;
use bathbot_model::{
rkyv_util::time::{DateRkyv, DateTimeRkyv},
rosu_v2::user::{ArchivedUser, User, UserHighestRank as UserHighestRankRkyv, UserStatistics},
};
use bathbot_util::{constants::OSU_API_ISSUE, MessageBuilder};
use eyre::{Report, Result};
use futures::{future, stream::FuturesUnordered, TryStreamExt};
use once_cell::sync::OnceCell;
use rkyv::{
with::{DeserializeWith, Map},
Archived, Infallible,
};
use rosu_v2::prelude::{CountryCode, GameMode, OsuError, UserHighestRank, Username};
use time::{OffsetDateTime, Time};
use twilight_interactions::command::{CommandModel, CreateCommand};
use crate::{
core::Context,
embeds::ClaimNameEmbed,
embeds::EmbedData,
manager::redis::{osu::UserArgs, RedisData},
util::{interaction::InteractionCommand, InteractionCommandExt},
};
#[derive(CommandModel, CreateCommand, SlashCommand)]
#[command(
name = "claimname",
desc = "Check how much longer to wait until a name is up for grabs",
help = "If a player has not signed in for at least 6 months and has no plays,\
their username may be claimed.\n\
If that player does have any plays across all game modes, \
a [non-linear function](https://www.desmos.com/calculator/b89siyv9j8) is used to calculate \
how much extra time is added to those 6 months.\n\
This is to prevent people from stealing the usernames of active or recently retired players."
)]
pub struct ClaimName {
#[command(desc = "Specify a username")]
name: String,
}
async fn slash_claimname(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> {
let ClaimName { name } = ClaimName::from_interaction(command.input_data())?;
let content = if name.chars().count() > 15 {
Some("Names can have at most 15 characters so your name won't be accepted".to_owned())
} else if let Some(c) = name
.chars()
.find(|c|!matches!(c, 'A'..='Z' | 'a'..='z' | '0'..='9' | '-' | '[' | ']' | '_' |''))
{
Some(format!(
"`{c}` is an invalid character for usernames so `{name}` won't be accepted"
))
} else if name.len() < 3 {
Some(format!(
"Names must be at least 3 characters long so `{name}` won't be accepted"
))
} else if name.contains('_') && name.contains(' ') {
Some(format!(
"Names may contains underscores or spaces but not both \
so `{name}` won't be accepted"
))
} else if name.starts_with(' ') || name.ends_with(' ') {
Some(format!(
"Names can't start or end with spaces so `{name}` won't be accepted"
))
} else {
None
};
if let Some(content) = content {
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
let user_id = match UserArgs::username(&ctx, &name).await {
UserArgs::Args(args) => args.user_id,
UserArgs::User { user,.. } => user.user_id,
UserArgs::Err(OsuError::NotFound) => {
let content = if ClaimNameValidator::is_valid(&name) {
format!("User `{name}` was not found, the name should be available to claim")
} else {
format!("`{name}` does not seem to be taken but it likely won't be accepted")
};
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
UserArgs::Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let args = [
GameMode::Osu,
GameMode::Taiko,
GameMode::Catch,
GameMode::Mania,
]
.map(|mode| UserArgs::user_id(user_id).mode(mode));
let user_fut = args
.into_iter()
.map(|args| ctx.redis().osu_user(args))
.collect::<FuturesUnordered<_>>()
.try_fold(None, |user: Option<ClaimNameUser>, next| match user {
Some(mut user) => {
let next_stats = next.stats();
match user.statistics {
Some(ref mut accum) => accum.playcount += next_stats.playcount(),
None => user.statistics = Some(next_stats.to_owned()),
}
let (next_highest_rank, next_last_visit) = match next {
RedisData::Original(next) => {
let rank = next.highest_rank;
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| count.start_date.with_time(Time::MIDNIGHT).assume_utc());
let last_visit = match (next.last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
RedisData::Archive(next) => {
let next: &Archived<User> = &next;
let rank = Map::<UserHighestRankRkyv>::deserialize_with(
&next.highest_rank,
&mut Infallible,
)
.unwrap();
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| {
DateRkyv::deserialize_with(&count.start_date, &mut Infallible)
.unwrap()
.with_time(Time::MIDNIGHT)
.assume_utc()
});
let last_visit = next.last_visit.as_ref().map(|time| {
DateTimeRkyv::deserialize_with(time, &mut Infallible).unwrap()
});
let last_visit = match (last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
};
match (user.highest_rank.as_mut(), next_highest_rank) {
(Some(curr), Some(next)) if curr.rank > next.rank => *curr = next,
(None, next @ Some(_)) => user.highest_rank = next,
_ => {}
}
match (user.last_visit.as_mut(), next_last_visit) {
(Some(curr), Some(next)) if *curr < next => *curr = next,
(None, next @ Some(_)) => user.last_visit = next,
_ => {}
}
future::ready(Ok(Some(user)))
}
None => future::ready(Ok(Some(ClaimNameUser::from(next)))),
});
let user = match user_fut.await {
Ok(user) => user.unwrap(),
Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let embed = ClaimNameEmbed::new(&user, &name).build();
let builder = MessageBuilder::new().embed(embed);
command.update(&ctx, builder).await?;
Ok(())
}
pub struct ClaimNameUser {
pub avatar_url: Box<str>,
pub country_code: CountryCode,
pub has_badges: bool,
pub has_ranked_mapsets: bool,
pub highest_rank: Option<UserHighestRank>,
pub last_visit: Option<OffsetDateTime>,
pub statistics: Option<UserStatistics>,
pub username: Username,
pub user_id: u32,
}
impl From<User> for ClaimNameUser {
#[inline]
fn from(user: User) -> Self {
Self {
avatar_url: user.avatar_url,
country_code: user.country_code,
has_badges:!user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: user.highest_rank,
last_visit: user.last_visit,
statistics: user.statistics.map(UserStatistics::from),
username: user.username,
user_id: user.user_id,
}
}
}
impl From<&ArchivedUser> for ClaimNameUser {
#[inline]
fn from(user: &ArchivedUser) -> Self {
Self {
avatar_url: user.avatar_url.as_ref().into(),
country_code: user.country_code.as_str().into(),
has_badges:!user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: Map::<UserHighestRankRkyv>::deserialize_with(
&user.highest_rank,
&mut Infallible,
)
.unwrap(),
last_visit: Map::<DateTimeRkyv>::deserialize_with(&user.last_visit, &mut Infallible)
.unwrap(),
statistics: user.statistics.as_ref().cloned(),
username: user.username.as_str().into(),
user_id: user.user_id,
}
}
}
impl From<RedisData<User>> for ClaimNameUser {
#[inline]
fn | (user: RedisData<User>) -> Self {
match user {
RedisData::Original(user) => Self::from(user),
RedisData::Archive(user) => Self::from(user.deref()),
}
}
}
pub struct ClaimNameValidator;
impl ClaimNameValidator {
pub fn is_valid(prefix: &str) -> bool {
!VALIDATOR
.get_or_init(|| {
let needles = [
"qfqqz",
"dppljf{",
"difbu",
"ojhhfs",
"mpmj",
"gvdl",
"ejmep",
"gbhhpu",
"dvou",
"tijhfupsb",
"qpso",
"cbodip",
"qfojt",
"wbhjob",
"qvttz",
"ejdl",
"dpdl",
"brvjmb",
"ijumfs",
"ibdl",
"tibwju",
"gsjfoepl",
]
.into_iter()
.map(String::from)
.map(|mut needle| {
unsafe { needle.as_bytes_mut() }
.iter_mut()
.for_each(|byte| *byte -= 1);
needle
});
AhoCorasickBuilder::new()
.ascii_case_insensitive(true)
.dfa(true)
.build_with_size(needles)
.unwrap()
})
.is_match(prefix)
}
}
static VALIDATOR: OnceCell<AhoCorasick<u16>> = OnceCell::new();
| from | identifier_name |
claim_name.rs | use std::{ops::Deref, sync::Arc};
use aho_corasick::{AhoCorasick, AhoCorasickBuilder};
use bathbot_macros::SlashCommand;
use bathbot_model::{
rkyv_util::time::{DateRkyv, DateTimeRkyv},
rosu_v2::user::{ArchivedUser, User, UserHighestRank as UserHighestRankRkyv, UserStatistics},
};
use bathbot_util::{constants::OSU_API_ISSUE, MessageBuilder};
use eyre::{Report, Result};
use futures::{future, stream::FuturesUnordered, TryStreamExt};
use once_cell::sync::OnceCell;
use rkyv::{
with::{DeserializeWith, Map},
Archived, Infallible,
};
use rosu_v2::prelude::{CountryCode, GameMode, OsuError, UserHighestRank, Username};
use time::{OffsetDateTime, Time};
use twilight_interactions::command::{CommandModel, CreateCommand};
use crate::{
core::Context,
embeds::ClaimNameEmbed,
embeds::EmbedData,
manager::redis::{osu::UserArgs, RedisData},
util::{interaction::InteractionCommand, InteractionCommandExt},
};
#[derive(CommandModel, CreateCommand, SlashCommand)]
#[command(
name = "claimname",
desc = "Check how much longer to wait until a name is up for grabs",
help = "If a player has not signed in for at least 6 months and has no plays,\
their username may be claimed.\n\
If that player does have any plays across all game modes, \
a [non-linear function](https://www.desmos.com/calculator/b89siyv9j8) is used to calculate \
how much extra time is added to those 6 months.\n\
This is to prevent people from stealing the usernames of active or recently retired players."
)]
pub struct ClaimName {
#[command(desc = "Specify a username")]
name: String,
}
async fn slash_claimname(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> {
let ClaimName { name } = ClaimName::from_interaction(command.input_data())?;
let content = if name.chars().count() > 15 {
Some("Names can have at most 15 characters so your name won't be accepted".to_owned())
} else if let Some(c) = name
.chars()
.find(|c|!matches!(c, 'A'..='Z' | 'a'..='z' | '0'..='9' | '-' | '[' | ']' | '_' |''))
{
Some(format!(
"`{c}` is an invalid character for usernames so `{name}` won't be accepted"
))
} else if name.len() < 3 {
Some(format!(
"Names must be at least 3 characters long so `{name}` won't be accepted"
))
} else if name.contains('_') && name.contains(' ') {
Some(format!(
"Names may contains underscores or spaces but not both \
so `{name}` won't be accepted"
))
} else if name.starts_with(' ') || name.ends_with(' ') {
Some(format!(
"Names can't start or end with spaces so `{name}` won't be accepted"
))
} else {
None
};
if let Some(content) = content {
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
let user_id = match UserArgs::username(&ctx, &name).await {
UserArgs::Args(args) => args.user_id,
UserArgs::User { user,.. } => user.user_id,
UserArgs::Err(OsuError::NotFound) => {
let content = if ClaimNameValidator::is_valid(&name) {
format!("User `{name}` was not found, the name should be available to claim")
} else {
format!("`{name}` does not seem to be taken but it likely won't be accepted")
};
let builder = MessageBuilder::new().embed(content);
command.update(&ctx, builder).await?;
return Ok(());
}
UserArgs::Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let args = [
GameMode::Osu,
GameMode::Taiko,
GameMode::Catch,
GameMode::Mania,
]
.map(|mode| UserArgs::user_id(user_id).mode(mode));
let user_fut = args
.into_iter()
.map(|args| ctx.redis().osu_user(args))
.collect::<FuturesUnordered<_>>()
.try_fold(None, |user: Option<ClaimNameUser>, next| match user {
Some(mut user) => {
let next_stats = next.stats();
match user.statistics {
Some(ref mut accum) => accum.playcount += next_stats.playcount(),
None => user.statistics = Some(next_stats.to_owned()),
}
let (next_highest_rank, next_last_visit) = match next {
RedisData::Original(next) => {
let rank = next.highest_rank;
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| count.start_date.with_time(Time::MIDNIGHT).assume_utc());
let last_visit = match (next.last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
RedisData::Archive(next) => {
let next: &Archived<User> = &next;
let rank = Map::<UserHighestRankRkyv>::deserialize_with(
&next.highest_rank,
&mut Infallible,
)
.unwrap();
let last_playcount = next
.monthly_playcounts
.iter()
.rev()
.find(|count| count.count > 0)
.map(|count| {
DateRkyv::deserialize_with(&count.start_date, &mut Infallible)
.unwrap()
.with_time(Time::MIDNIGHT)
.assume_utc()
});
let last_visit = next.last_visit.as_ref().map(|time| {
DateTimeRkyv::deserialize_with(time, &mut Infallible).unwrap()
});
let last_visit = match (last_visit, last_playcount) {
(Some(a), Some(b)) => Some(a.max(b)),
(Some(a), _) | (_, Some(a)) => Some(a),
_ => None,
};
(rank, last_visit)
}
};
match (user.highest_rank.as_mut(), next_highest_rank) {
(Some(curr), Some(next)) if curr.rank > next.rank => *curr = next,
(None, next @ Some(_)) => user.highest_rank = next,
_ => {}
}
match (user.last_visit.as_mut(), next_last_visit) {
(Some(curr), Some(next)) if *curr < next => *curr = next,
(None, next @ Some(_)) => user.last_visit = next,
_ => {}
}
future::ready(Ok(Some(user)))
}
None => future::ready(Ok(Some(ClaimNameUser::from(next)))),
});
let user = match user_fut.await {
Ok(user) => user.unwrap(),
Err(err) => {
let _ = command.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("Failed to get user");
return Err(err);
}
};
let embed = ClaimNameEmbed::new(&user, &name).build();
let builder = MessageBuilder::new().embed(embed);
command.update(&ctx, builder).await?;
Ok(())
}
pub struct ClaimNameUser {
pub avatar_url: Box<str>,
pub country_code: CountryCode,
pub has_badges: bool,
pub has_ranked_mapsets: bool,
pub highest_rank: Option<UserHighestRank>,
pub last_visit: Option<OffsetDateTime>,
pub statistics: Option<UserStatistics>, | pub user_id: u32,
}
impl From<User> for ClaimNameUser {
#[inline]
fn from(user: User) -> Self {
Self {
avatar_url: user.avatar_url,
country_code: user.country_code,
has_badges:!user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: user.highest_rank,
last_visit: user.last_visit,
statistics: user.statistics.map(UserStatistics::from),
username: user.username,
user_id: user.user_id,
}
}
}
impl From<&ArchivedUser> for ClaimNameUser {
#[inline]
fn from(user: &ArchivedUser) -> Self {
Self {
avatar_url: user.avatar_url.as_ref().into(),
country_code: user.country_code.as_str().into(),
has_badges:!user.badges.is_empty(),
has_ranked_mapsets: user.ranked_mapset_count > 0,
highest_rank: Map::<UserHighestRankRkyv>::deserialize_with(
&user.highest_rank,
&mut Infallible,
)
.unwrap(),
last_visit: Map::<DateTimeRkyv>::deserialize_with(&user.last_visit, &mut Infallible)
.unwrap(),
statistics: user.statistics.as_ref().cloned(),
username: user.username.as_str().into(),
user_id: user.user_id,
}
}
}
impl From<RedisData<User>> for ClaimNameUser {
#[inline]
fn from(user: RedisData<User>) -> Self {
match user {
RedisData::Original(user) => Self::from(user),
RedisData::Archive(user) => Self::from(user.deref()),
}
}
}
pub struct ClaimNameValidator;
impl ClaimNameValidator {
pub fn is_valid(prefix: &str) -> bool {
!VALIDATOR
.get_or_init(|| {
let needles = [
"qfqqz",
"dppljf{",
"difbu",
"ojhhfs",
"mpmj",
"gvdl",
"ejmep",
"gbhhpu",
"dvou",
"tijhfupsb",
"qpso",
"cbodip",
"qfojt",
"wbhjob",
"qvttz",
"ejdl",
"dpdl",
"brvjmb",
"ijumfs",
"ibdl",
"tibwju",
"gsjfoepl",
]
.into_iter()
.map(String::from)
.map(|mut needle| {
unsafe { needle.as_bytes_mut() }
.iter_mut()
.for_each(|byte| *byte -= 1);
needle
});
AhoCorasickBuilder::new()
.ascii_case_insensitive(true)
.dfa(true)
.build_with_size(needles)
.unwrap()
})
.is_match(prefix)
}
}
static VALIDATOR: OnceCell<AhoCorasick<u16>> = OnceCell::new(); | pub username: Username, | random_line_split |
piston.rs | 4 v_Color;
uniform mat4 u_Projection;
uniform mat4 u_View;
uniform mat4 u_Model;
uniform vec4 u_Color;
uniform vec3 u_LightDirection;
void main() {
vec3 normal = normalize(vec3(u_Model * vec4(a_Normal, 0.0)));
float dot = max(dot(normal, u_LightDirection), 0.0);
v_Color = u_Color * (dot + 2) / 3;
gl_Position = u_Projection * u_View * u_Model * vec4(a_Pos, 1.0);
}
"
};
static FRAGMENT_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
smooth in vec4 v_Color;
out vec4 o_Color;
void main() {
o_Color = v_Color;
}
"
};
struct Renderer<C : device::draw::CommandBuffer, D: gfx::Device<C>> {
graphics: gfx::Graphics<D, C>,
tile_batch: Batch,
creature_batch: Batch,
projection: Matrix4<f32>,
view: Matrix4<f32>,
frame: gfx::Frame,
cd: gfx::ClearData,
}
type Color = [f32,..4];
static BACKGROUND_COLOR: Color = [0.0f32, 0.0, 0.0, 1.0];
static PLAYER_COLOR : Color = [0.0f32, 0.0, 1.0, 1.0];
static WALL_COLOR : Color = [0.3f32, 0.2, 0.0, 1.0];
static GLASSWALL_COLOR : Color = [0.7f32, 0.7, 0.95, 1.0];
static SAND_COLOR : Color = [1.0f32, 1.0, 0.8, 1.0];
static FLOOR_COLOR : Color = [1.0f32, 0.9, 0.9, 1.0];
static SCOUT_COLOR : Color = [0.0f32, 0.8, 0.0, 1.0];
static GRUNT_COLOR : Color = [0.0f32, 0.6, 0.0, 1.0];
static HEAVY_COLOR : Color = [0.0f32, 0.4, 0.0, 1.0];
static WALL_HEIGHT : f32 = 0.3f32;
static HACK_PLAYER_KNOWS_ALL : bool = false;
static HACK_PLAYER_SEES_EVERYONE : bool = false;
fn grey_out(c : Color) -> Color {
let [r, g, b, a] = c;
[ (r+0.4f32)/4.0f32, (g + 0.4f32)/4.0f32, (b + 0.4f32)/4.0f32, a]
}
static BILLION : f32 = 1000000000f32;
static TAU : f32 = std::f32::consts::PI_2;
static TILE_OUTER_R : f32 = 1.0f32;
//static tile_inner_r : f32 = TILE_OUTER_R * 3f32.sqrt() / 2f32;
fn tile_inner_r() -> f32 {
TILE_OUTER_R * 3f32.sqrt() / 2f32
}
#[allow(dead_code)]
fn edge_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32
}
#[allow(dead_code)]
fn side_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32 + TAU / 12f32
}
fn dir_to_angle(d : AbsoluteDirection) -> f32 {
-(d.to_uint() as f32 * TAU) / 6.0f32
}
type IndexVector = Vec<u8>;
type VertexVector = Vec<Vertex>;
pub fn load_hex(path : &str) -> (IndexVector, VertexVector) {
let obj = obj::load(&Path::new(path)).unwrap();
let mut index_data : Vec<u8> = vec!();
let mut vertex_data : Vec<Vertex> = vec!();
{
let mut indexer = genmesh::LruIndexer::new(16, |_, v| {
vertex_data.push(v);
});
for o in obj.object_iter() {
for g in o.group_iter() {
for i in g.indices().iter() {
match i {
&genmesh::PolyTri(poly) => {
for i in vec!(poly.x, poly.y, poly.z).iter() {
match i {
&(v, _, Some(n)) => {
let normal = obj.normal()[n];
let vertex = obj.position()[v];
let index = indexer.index(
Vertex {
pos: vertex,
normal: normal,
}
);
index_data.push(index as u8);
},
_ => { panic!() }
}
}
},
_ => { panic!() },
}
}
}
}
}
(index_data, vertex_data)
}
pub fn point_to_coordinate(p : Point) -> (f32, f32) {
(
p.x as f32 * TILE_OUTER_R * 3f32 / 2f32,
-((p.y * 2) as f32 + p.x as f32) * tile_inner_r()
)
}
impl<C : CommandBuffer, D: gfx::Device<C>> Renderer<C, D> {
fn new(mut device: D, frame: gfx::Frame) -> Renderer<C, D> {
let (w, h) = (frame.width, frame.height);
let (tile_index_data, tile_vertex_data) = load_hex("assets/hex.obj");
let (creature_index_data, creature_vertex_data) = load_hex("assets/creature.obj");
let tile_mesh = device.create_mesh(tile_vertex_data.as_slice());
let creature_mesh = device.create_mesh(creature_vertex_data.as_slice());
let tile_slice = device.create_buffer_static::<u8>(tile_index_data.as_slice())
.to_slice(gfx::TriangleList);
let creature_slice = device.create_buffer_static::<u8>(creature_index_data.as_slice())
.to_slice(gfx::TriangleList);
let program = device.link_program(VERTEX_SRC.clone(), FRAGMENT_SRC.clone())
.unwrap();
let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true).multi_sample();
let mut graphics = gfx::Graphics::new(device);
let tile : Batch = graphics.make_batch(&program, &tile_mesh, tile_slice, &state).unwrap();
let creature : Batch = graphics.make_batch(&program, &creature_mesh, creature_slice, &state).unwrap();
let aspect = w as f32 / h as f32;
let proj = cgmath::perspective(cgmath::deg(45.0f32), aspect, 1.0, 100.0);
Renderer {
graphics: graphics,
frame: frame,
tile_batch : tile,
creature_batch : creature,
projection: proj,
view: proj,
cd: gfx::ClearData {
color: BACKGROUND_COLOR,
depth: 1.0,
stencil: 0,
},
}
}
fn render_params(&self, px : f32, py : f32, pz : f32, rotation : f32, color : Color) -> Params {
let mut model = Matrix4::identity();
model[3] = Vector4::new(px, py, pz, 1.0f32);
let rot = Matrix3::from_angle_z(rad(rotation)).to_matrix4();
//
//model = rot.rotate_vector(&model);
let model = model.mul_m(&rot);
Params {
projection: self.projection.into_fixed(),
view: self.view.into_fixed(),
color : color,
model: model.into_fixed(),
light: Vector3::unit_z().into_fixed(),
}
}
fn set_view(&mut self, view: &AffineMatrix3<f32>) {
self.view = view.mat;
}
/// Clear
fn clear(&mut self) {
self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame);
}
fn end_frame(&mut self) {
self.graphics.end_frame();
}
fn render_batch(&mut self, batch : &Batch, params : &Params) {
self.graphics.draw(batch, params, &self.frame);
}
pub fn render_tile(&mut self, p : Point, c : Color, elevate : bool) {
let (px, py) = point_to_coordinate(p);
let params = self.render_params(px, py, if elevate {WALL_HEIGHT} else {0.0}, 0.0, c);
let batch = self.tile_batch;
self.render_batch(&batch, ¶ms);
}
pub fn render_creature(&mut self, pos : Position, c : Color) {
let (px, py) = point_to_coordinate(pos.p);
let params = self.render_params(px, py, 0.3, dir_to_angle(pos.dir), c);
let batch = self.creature_batch;
self.render_batch(&batch, ¶ms);
}
}
/// linearly interpolate between two values
fn mix<F : FloatMath> (x : F, y : F, a : F) -> F {
assert!(a >= zero());
assert!(a <= one());
y * a + x * (one::<F>() - a)
}
struct SmoothMovement<T> {
speed : f32,
destination: T,
pub current: T,
}
impl<V : cgmath::EuclideanVector<f32>, T : cgmath::Point<f32, V>> SmoothMovement<T> {
pub fn new(speed : f32) -> SmoothMovement<T> {
SmoothMovement {
speed: speed,
destination: cgmath::Point::origin(),
current: cgmath::Point::origin(),
}
}
pub fn update(&mut self, dt : f32) {
let d = self.destination.sub_p(&self.current);
self.current.add_self_v(&d.mul_s(dt * self.speed));
}
pub fn set_destination(&mut self, dest : T) {
self.destination = dest;
}
pub fn finish_immediately(&mut self) {
self.current = self.destination.clone();
}
}
pub struct PistonUI {
renderer : Renderer<GlCommandBuffer, GlDevice>,
render_controller : RenderController,
input_controller: InputController,
}
pub struct RenderController {
player_pos: Position,
camera_pos : SmoothMovement<Point3<f32>>,
camera_focus : SmoothMovement<Point3<f32>>,
}
pub struct InputController {
shift_pressed: bool,
alt_pressed: bool,
ctrl_pressed: bool,
is_running: bool,
action_queue: RingBuf<Action>,
}
impl InputController {
pub fn new() -> InputController {
InputController {
shift_pressed: false,
alt_pressed: false,
ctrl_pressed: false,
is_running: true,
action_queue: RingBuf::new(),
}
}
fn move_or_run(&self, dir : Direction) -> Action {
if self.is_running {
Run(dir)
} else {
Move(dir)
}
}
fn push_move_or_run(&mut self, dir : Direction) {
let a = self.move_or_run(dir);
self.action_queue.push_back(a)
}
fn push_turn(&mut self, dir : Direction) {
self.action_queue.push_back(Turn(dir))
}
fn push_melee(&mut self, dir : Direction) {
self.action_queue.push_back(Melee(dir))
}
fn push_wait(&mut self) {
self.action_queue.push_back(Wait)
}
pub fn push_input(&mut self, i : InputEvent) {
match i {
Press(Keyboard(k)) => {
match (k, self.shift_pressed, self.ctrl_pressed) {
(key::LShift, _, _) => self.shift_pressed = true,
(key::RShift, _, _) => self.shift_pressed = true,
(key::LAlt, _, _) => self.alt_pressed = true,
(key::RAlt, _, _) => self.alt_pressed = true,
(key::LCtrl, _, _) => self.ctrl_pressed = true,
(key::RCtrl, _, _) => self.ctrl_pressed = true,
(key::R, _, _) => self.is_running =!self.is_running,
(key::K, _, false) => self.push_move_or_run(Forward),
(key::L, true, false) => self.push_move_or_run(Right),
(key::H, true, false) => self.push_move_or_run(Left),
(key::J, _, false) => self.push_move_or_run(Backward),
(key::L, false, false) => self.push_turn(Right),
(key::H, false, false) => self.push_turn(Left),
(key::K, _, true) => self.push_melee(Forward),
(key::L, _, true) => self.push_melee(Right),
(key::H, _, true) => self.push_melee(Left),
(key::Period, _, _) => self.push_wait(),
_ => { }
}
},
Release(Keyboard(k)) => {
match k {
key::LShift|key::RShift => {
self.shift_pressed = false
},
key::LAlt|key::RAlt => {
self.alt_pressed = false
},
key::LCtrl|key::RCtrl=> {
self.ctrl_pressed = false
},
_ => {}
}
},
_ => {}
}
}
pub fn pop_action(&mut self) -> Option<Action> {
self.action_queue.pop_front()
}
}
impl RenderController {
fn new() -> RenderController {
let cp = SmoothMovement::new(1.0f32);
let cf = SmoothMovement::new(2.0f32);
RenderController {
player_pos: Position::new(Point::new(0,0), North),
camera_pos: cp,
camera_focus: cf,
}
}
pub fn render_map(
&self,
renderer : &mut Renderer<GlCommandBuffer, GlDevice>, game : &GameState) {
let &GameState {
ref player,
..
} = game;
let player = player.as_ref().and_then(|pl| pl.try_borrow());
game.map.for_each_point(|ap| {
if player.as_ref().map_or(true, |pl| pl.knows(ap) || HACK_PLAYER_KNOWS_ALL) {
let tiletype = game.map.at(ap).tiletype;
let (color, elevate) = match tiletype {
Wall => (WALL_COLOR, true),
GlassWall => (GLASSWALL_COLOR, true),
Floor => (FLOOR_COLOR, false),
Sand => (SAND_COLOR, false),
};
let color = if player.as_ref().map_or(
false, |pl|!pl.sees(ap) && pl.is_alive()
) {
grey_out(color)
} else {
color
};
renderer.render_tile(ap, color, elevate);
};
});
for creature in game.creatures_iter() {
let creature = creature.borrow();
let ap = creature.pos().p;
if!player.as_ref().map_or(
true, |pl| pl.sees(ap) || HACK_PLAYER_SEES_EVERYONE
) {
continue;
}
match self.creature_color(&*creature) {
Some(color) => renderer.render_creature(*creature.pos(), color),
None => {}
}
};
}
fn creature_color(&self, cr : &Creature) -> Option<Color> {
let now_ns = time::precise_time_ns();
let duration_s = 0.8f32;
let base_color = if cr.is_player() {
PLAYER_COLOR
} else {
match cr.race() {
Scout => SCOUT_COLOR,
Grunt => GRUNT_COLOR,
Heavy => HEAVY_COLOR,
Human => panic!(),
}
};
let color = base_color;
let since_s = (now_ns - cr.was_attacked_ns()) as f32 / BILLION;
let color = if since_s < duration_s {
let f = since_s / duration_s;
[
mix(1f32, color[0], f),
mix(0f32, color[1], f),
mix(0f32, color[2], f),
color[3],
]
} else {
color
};
let color = if!cr.is_alive() {
let since_s = (now_ns - cr.death_ns()) as f32 / BILLION;
let f = since_s / duration_s;
if f < 1.0 {
Some([
mix(color[0], FLOOR_COLOR[0], f),
mix(color[1], FLOOR_COLOR[1], f),
mix(color[2], FLOOR_COLOR[2], f),
color[3],
])
} else {
None
}
} else {
Some(color)
};
color
}
fn move_camera_to_destination(&mut self) {
self.camera_pos.finish_immediately();
self.camera_focus.finish_immediately();
}
fn set_player_pos(&mut self, pl: &Creature) {
let pos = *pl.pos();
if self.player_pos == pos {
return;
} |
let (fx, fy) = point_to_coordinate(front);
let (x, y) = point_to_coordinate(pos.p);
let (dx, dy) = (fx - x, fy - y);
let how_much_ | self.player_pos = pos;
let front = pos.p + pos.dir; | random_line_split |
piston.rs | .0);
}
"
};
static FRAGMENT_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
smooth in vec4 v_Color;
out vec4 o_Color;
void main() {
o_Color = v_Color;
}
"
};
struct Renderer<C : device::draw::CommandBuffer, D: gfx::Device<C>> {
graphics: gfx::Graphics<D, C>,
tile_batch: Batch,
creature_batch: Batch,
projection: Matrix4<f32>,
view: Matrix4<f32>,
frame: gfx::Frame,
cd: gfx::ClearData,
}
type Color = [f32,..4];
static BACKGROUND_COLOR: Color = [0.0f32, 0.0, 0.0, 1.0];
static PLAYER_COLOR : Color = [0.0f32, 0.0, 1.0, 1.0];
static WALL_COLOR : Color = [0.3f32, 0.2, 0.0, 1.0];
static GLASSWALL_COLOR : Color = [0.7f32, 0.7, 0.95, 1.0];
static SAND_COLOR : Color = [1.0f32, 1.0, 0.8, 1.0];
static FLOOR_COLOR : Color = [1.0f32, 0.9, 0.9, 1.0];
static SCOUT_COLOR : Color = [0.0f32, 0.8, 0.0, 1.0];
static GRUNT_COLOR : Color = [0.0f32, 0.6, 0.0, 1.0];
static HEAVY_COLOR : Color = [0.0f32, 0.4, 0.0, 1.0];
static WALL_HEIGHT : f32 = 0.3f32;
static HACK_PLAYER_KNOWS_ALL : bool = false;
static HACK_PLAYER_SEES_EVERYONE : bool = false;
fn grey_out(c : Color) -> Color {
let [r, g, b, a] = c;
[ (r+0.4f32)/4.0f32, (g + 0.4f32)/4.0f32, (b + 0.4f32)/4.0f32, a]
}
static BILLION : f32 = 1000000000f32;
static TAU : f32 = std::f32::consts::PI_2;
static TILE_OUTER_R : f32 = 1.0f32;
//static tile_inner_r : f32 = TILE_OUTER_R * 3f32.sqrt() / 2f32;
fn tile_inner_r() -> f32 {
TILE_OUTER_R * 3f32.sqrt() / 2f32
}
#[allow(dead_code)]
fn edge_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32
}
#[allow(dead_code)]
fn side_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32 + TAU / 12f32
}
fn dir_to_angle(d : AbsoluteDirection) -> f32 {
-(d.to_uint() as f32 * TAU) / 6.0f32
}
type IndexVector = Vec<u8>;
type VertexVector = Vec<Vertex>;
pub fn load_hex(path : &str) -> (IndexVector, VertexVector) {
let obj = obj::load(&Path::new(path)).unwrap();
let mut index_data : Vec<u8> = vec!();
let mut vertex_data : Vec<Vertex> = vec!();
{
let mut indexer = genmesh::LruIndexer::new(16, |_, v| {
vertex_data.push(v);
});
for o in obj.object_iter() {
for g in o.group_iter() {
for i in g.indices().iter() {
match i {
&genmesh::PolyTri(poly) => {
for i in vec!(poly.x, poly.y, poly.z).iter() {
match i {
&(v, _, Some(n)) => {
let normal = obj.normal()[n];
let vertex = obj.position()[v];
let index = indexer.index(
Vertex {
pos: vertex,
normal: normal,
}
);
index_data.push(index as u8);
},
_ => { panic!() }
}
}
},
_ => { panic!() },
}
}
}
}
}
(index_data, vertex_data)
}
pub fn point_to_coordinate(p : Point) -> (f32, f32) {
(
p.x as f32 * TILE_OUTER_R * 3f32 / 2f32,
-((p.y * 2) as f32 + p.x as f32) * tile_inner_r()
)
}
impl<C : CommandBuffer, D: gfx::Device<C>> Renderer<C, D> {
fn new(mut device: D, frame: gfx::Frame) -> Renderer<C, D> {
let (w, h) = (frame.width, frame.height);
let (tile_index_data, tile_vertex_data) = load_hex("assets/hex.obj");
let (creature_index_data, creature_vertex_data) = load_hex("assets/creature.obj");
let tile_mesh = device.create_mesh(tile_vertex_data.as_slice());
let creature_mesh = device.create_mesh(creature_vertex_data.as_slice());
let tile_slice = device.create_buffer_static::<u8>(tile_index_data.as_slice())
.to_slice(gfx::TriangleList);
let creature_slice = device.create_buffer_static::<u8>(creature_index_data.as_slice())
.to_slice(gfx::TriangleList);
let program = device.link_program(VERTEX_SRC.clone(), FRAGMENT_SRC.clone())
.unwrap();
let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true).multi_sample();
let mut graphics = gfx::Graphics::new(device);
let tile : Batch = graphics.make_batch(&program, &tile_mesh, tile_slice, &state).unwrap();
let creature : Batch = graphics.make_batch(&program, &creature_mesh, creature_slice, &state).unwrap();
let aspect = w as f32 / h as f32;
let proj = cgmath::perspective(cgmath::deg(45.0f32), aspect, 1.0, 100.0);
Renderer {
graphics: graphics,
frame: frame,
tile_batch : tile,
creature_batch : creature,
projection: proj,
view: proj,
cd: gfx::ClearData {
color: BACKGROUND_COLOR,
depth: 1.0,
stencil: 0,
},
}
}
fn render_params(&self, px : f32, py : f32, pz : f32, rotation : f32, color : Color) -> Params {
let mut model = Matrix4::identity();
model[3] = Vector4::new(px, py, pz, 1.0f32);
let rot = Matrix3::from_angle_z(rad(rotation)).to_matrix4();
//
//model = rot.rotate_vector(&model);
let model = model.mul_m(&rot);
Params {
projection: self.projection.into_fixed(),
view: self.view.into_fixed(),
color : color,
model: model.into_fixed(),
light: Vector3::unit_z().into_fixed(),
}
}
fn set_view(&mut self, view: &AffineMatrix3<f32>) {
self.view = view.mat;
}
/// Clear
fn clear(&mut self) {
self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame);
}
fn end_frame(&mut self) {
self.graphics.end_frame();
}
fn render_batch(&mut self, batch : &Batch, params : &Params) {
self.graphics.draw(batch, params, &self.frame);
}
pub fn render_tile(&mut self, p : Point, c : Color, elevate : bool) {
let (px, py) = point_to_coordinate(p);
let params = self.render_params(px, py, if elevate {WALL_HEIGHT} else {0.0}, 0.0, c);
let batch = self.tile_batch;
self.render_batch(&batch, ¶ms);
}
pub fn render_creature(&mut self, pos : Position, c : Color) {
let (px, py) = point_to_coordinate(pos.p);
let params = self.render_params(px, py, 0.3, dir_to_angle(pos.dir), c);
let batch = self.creature_batch;
self.render_batch(&batch, ¶ms);
}
}
/// linearly interpolate between two values
fn mix<F : FloatMath> (x : F, y : F, a : F) -> F {
assert!(a >= zero());
assert!(a <= one());
y * a + x * (one::<F>() - a)
}
struct SmoothMovement<T> {
speed : f32,
destination: T,
pub current: T,
}
impl<V : cgmath::EuclideanVector<f32>, T : cgmath::Point<f32, V>> SmoothMovement<T> {
pub fn new(speed : f32) -> SmoothMovement<T> {
SmoothMovement {
speed: speed,
destination: cgmath::Point::origin(),
current: cgmath::Point::origin(),
}
}
pub fn update(&mut self, dt : f32) {
let d = self.destination.sub_p(&self.current);
self.current.add_self_v(&d.mul_s(dt * self.speed));
}
pub fn set_destination(&mut self, dest : T) {
self.destination = dest;
}
pub fn finish_immediately(&mut self) {
self.current = self.destination.clone();
}
}
pub struct PistonUI {
renderer : Renderer<GlCommandBuffer, GlDevice>,
render_controller : RenderController,
input_controller: InputController,
}
pub struct RenderController {
player_pos: Position,
camera_pos : SmoothMovement<Point3<f32>>,
camera_focus : SmoothMovement<Point3<f32>>,
}
pub struct InputController {
shift_pressed: bool,
alt_pressed: bool,
ctrl_pressed: bool,
is_running: bool,
action_queue: RingBuf<Action>,
}
impl InputController {
pub fn new() -> InputController {
InputController {
shift_pressed: false,
alt_pressed: false,
ctrl_pressed: false,
is_running: true,
action_queue: RingBuf::new(),
}
}
fn move_or_run(&self, dir : Direction) -> Action {
if self.is_running {
Run(dir)
} else {
Move(dir)
}
}
fn push_move_or_run(&mut self, dir : Direction) {
let a = self.move_or_run(dir);
self.action_queue.push_back(a)
}
fn push_turn(&mut self, dir : Direction) {
self.action_queue.push_back(Turn(dir))
}
fn push_melee(&mut self, dir : Direction) {
self.action_queue.push_back(Melee(dir))
}
fn push_wait(&mut self) {
self.action_queue.push_back(Wait)
}
pub fn push_input(&mut self, i : InputEvent) {
match i {
Press(Keyboard(k)) => {
match (k, self.shift_pressed, self.ctrl_pressed) {
(key::LShift, _, _) => self.shift_pressed = true,
(key::RShift, _, _) => self.shift_pressed = true,
(key::LAlt, _, _) => self.alt_pressed = true,
(key::RAlt, _, _) => self.alt_pressed = true,
(key::LCtrl, _, _) => self.ctrl_pressed = true,
(key::RCtrl, _, _) => self.ctrl_pressed = true,
(key::R, _, _) => self.is_running =!self.is_running,
(key::K, _, false) => self.push_move_or_run(Forward),
(key::L, true, false) => self.push_move_or_run(Right),
(key::H, true, false) => self.push_move_or_run(Left),
(key::J, _, false) => self.push_move_or_run(Backward),
(key::L, false, false) => self.push_turn(Right),
(key::H, false, false) => self.push_turn(Left),
(key::K, _, true) => self.push_melee(Forward),
(key::L, _, true) => self.push_melee(Right),
(key::H, _, true) => self.push_melee(Left),
(key::Period, _, _) => self.push_wait(),
_ => { }
}
},
Release(Keyboard(k)) => {
match k {
key::LShift|key::RShift => {
self.shift_pressed = false
},
key::LAlt|key::RAlt => {
self.alt_pressed = false
},
key::LCtrl|key::RCtrl=> {
self.ctrl_pressed = false
},
_ => {}
}
},
_ => {}
}
}
pub fn pop_action(&mut self) -> Option<Action> {
self.action_queue.pop_front()
}
}
impl RenderController {
fn new() -> RenderController {
let cp = SmoothMovement::new(1.0f32);
let cf = SmoothMovement::new(2.0f32);
RenderController {
player_pos: Position::new(Point::new(0,0), North),
camera_pos: cp,
camera_focus: cf,
}
}
pub fn render_map(
&self,
renderer : &mut Renderer<GlCommandBuffer, GlDevice>, game : &GameState) {
let &GameState {
ref player,
..
} = game;
let player = player.as_ref().and_then(|pl| pl.try_borrow());
game.map.for_each_point(|ap| {
if player.as_ref().map_or(true, |pl| pl.knows(ap) || HACK_PLAYER_KNOWS_ALL) {
let tiletype = game.map.at(ap).tiletype;
let (color, elevate) = match tiletype {
Wall => (WALL_COLOR, true),
GlassWall => (GLASSWALL_COLOR, true),
Floor => (FLOOR_COLOR, false),
Sand => (SAND_COLOR, false),
};
let color = if player.as_ref().map_or(
false, |pl|!pl.sees(ap) && pl.is_alive()
) {
grey_out(color)
} else {
color
};
renderer.render_tile(ap, color, elevate);
};
});
for creature in game.creatures_iter() {
let creature = creature.borrow();
let ap = creature.pos().p;
if!player.as_ref().map_or(
true, |pl| pl.sees(ap) || HACK_PLAYER_SEES_EVERYONE
) {
continue;
}
match self.creature_color(&*creature) {
Some(color) => renderer.render_creature(*creature.pos(), color),
None => {}
}
};
}
fn creature_color(&self, cr : &Creature) -> Option<Color> {
let now_ns = time::precise_time_ns();
let duration_s = 0.8f32;
let base_color = if cr.is_player() {
PLAYER_COLOR
} else {
match cr.race() {
Scout => SCOUT_COLOR,
Grunt => GRUNT_COLOR,
Heavy => HEAVY_COLOR,
Human => panic!(),
}
};
let color = base_color;
let since_s = (now_ns - cr.was_attacked_ns()) as f32 / BILLION;
let color = if since_s < duration_s {
let f = since_s / duration_s;
[
mix(1f32, color[0], f),
mix(0f32, color[1], f),
mix(0f32, color[2], f),
color[3],
]
} else {
color
};
let color = if!cr.is_alive() {
let since_s = (now_ns - cr.death_ns()) as f32 / BILLION;
let f = since_s / duration_s;
if f < 1.0 {
Some([
mix(color[0], FLOOR_COLOR[0], f),
mix(color[1], FLOOR_COLOR[1], f),
mix(color[2], FLOOR_COLOR[2], f),
color[3],
])
} else {
None
}
} else {
Some(color)
};
color
}
fn move_camera_to_destination(&mut self) {
self.camera_pos.finish_immediately();
self.camera_focus.finish_immediately();
}
fn set_player_pos(&mut self, pl: &Creature) {
let pos = *pl.pos();
if self.player_pos == pos {
return;
}
self.player_pos = pos;
let front = pos.p + pos.dir;
let (fx, fy) = point_to_coordinate(front);
let (x, y) = point_to_coordinate(pos.p);
let (dx, dy) = (fx - x, fy - y);
let how_much_behind = 5f32;
let how_much_front = 3f32;
let (dbx, dby) = (dx * how_much_behind, dy * how_much_behind);
let (dfx, dfy) = (dx * how_much_front, dy * how_much_front);
self.camera_pos.set_destination(Point3::new(x - dbx, y - dby, 8.0));
self.camera_focus.set_destination(Point3::new(x + dfx, y + dfy, 0.0));
}
fn up | date_movement(& | identifier_name |
|
piston.rs | v_Color;
uniform mat4 u_Projection;
uniform mat4 u_View;
uniform mat4 u_Model;
uniform vec4 u_Color;
uniform vec3 u_LightDirection;
void main() {
vec3 normal = normalize(vec3(u_Model * vec4(a_Normal, 0.0)));
float dot = max(dot(normal, u_LightDirection), 0.0);
v_Color = u_Color * (dot + 2) / 3;
gl_Position = u_Projection * u_View * u_Model * vec4(a_Pos, 1.0);
}
"
};
static FRAGMENT_SRC: gfx::ShaderSource<'static> = shaders! {
GLSL_150: b"
#version 150 core
smooth in vec4 v_Color;
out vec4 o_Color;
void main() {
o_Color = v_Color;
}
"
};
struct Renderer<C : device::draw::CommandBuffer, D: gfx::Device<C>> {
graphics: gfx::Graphics<D, C>,
tile_batch: Batch,
creature_batch: Batch,
projection: Matrix4<f32>,
view: Matrix4<f32>,
frame: gfx::Frame,
cd: gfx::ClearData,
}
type Color = [f32,..4];
static BACKGROUND_COLOR: Color = [0.0f32, 0.0, 0.0, 1.0];
static PLAYER_COLOR : Color = [0.0f32, 0.0, 1.0, 1.0];
static WALL_COLOR : Color = [0.3f32, 0.2, 0.0, 1.0];
static GLASSWALL_COLOR : Color = [0.7f32, 0.7, 0.95, 1.0];
static SAND_COLOR : Color = [1.0f32, 1.0, 0.8, 1.0];
static FLOOR_COLOR : Color = [1.0f32, 0.9, 0.9, 1.0];
static SCOUT_COLOR : Color = [0.0f32, 0.8, 0.0, 1.0];
static GRUNT_COLOR : Color = [0.0f32, 0.6, 0.0, 1.0];
static HEAVY_COLOR : Color = [0.0f32, 0.4, 0.0, 1.0];
static WALL_HEIGHT : f32 = 0.3f32;
static HACK_PLAYER_KNOWS_ALL : bool = false;
static HACK_PLAYER_SEES_EVERYONE : bool = false;
fn grey_out(c : Color) -> Color {
let [r, g, b, a] = c;
[ (r+0.4f32)/4.0f32, (g + 0.4f32)/4.0f32, (b + 0.4f32)/4.0f32, a]
}
static BILLION : f32 = 1000000000f32;
static TAU : f32 = std::f32::consts::PI_2;
static TILE_OUTER_R : f32 = 1.0f32;
//static tile_inner_r : f32 = TILE_OUTER_R * 3f32.sqrt() / 2f32;
fn tile_inner_r() -> f32 {
TILE_OUTER_R * 3f32.sqrt() / 2f32
}
#[allow(dead_code)]
fn edge_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32
}
#[allow(dead_code)]
fn side_to_angle(i : uint) -> f32 {
i as f32 * TAU / 6.0f32 + TAU / 12f32
}
fn dir_to_angle(d : AbsoluteDirection) -> f32 {
-(d.to_uint() as f32 * TAU) / 6.0f32
}
type IndexVector = Vec<u8>;
type VertexVector = Vec<Vertex>;
pub fn load_hex(path : &str) -> (IndexVector, VertexVector) {
let obj = obj::load(&Path::new(path)).unwrap();
let mut index_data : Vec<u8> = vec!();
let mut vertex_data : Vec<Vertex> = vec!();
{
let mut indexer = genmesh::LruIndexer::new(16, |_, v| {
vertex_data.push(v);
});
for o in obj.object_iter() {
for g in o.group_iter() {
for i in g.indices().iter() {
match i {
&genmesh::PolyTri(poly) => {
for i in vec!(poly.x, poly.y, poly.z).iter() {
match i {
&(v, _, Some(n)) => {
let normal = obj.normal()[n];
let vertex = obj.position()[v];
let index = indexer.index(
Vertex {
pos: vertex,
normal: normal,
}
);
index_data.push(index as u8);
},
_ => { panic!() }
}
}
},
_ => { panic!() },
}
}
}
}
}
(index_data, vertex_data)
}
pub fn point_to_coordinate(p : Point) -> (f32, f32) {
(
p.x as f32 * TILE_OUTER_R * 3f32 / 2f32,
-((p.y * 2) as f32 + p.x as f32) * tile_inner_r()
)
}
impl<C : CommandBuffer, D: gfx::Device<C>> Renderer<C, D> {
fn new(mut device: D, frame: gfx::Frame) -> Renderer<C, D> {
let (w, h) = (frame.width, frame.height);
let (tile_index_data, tile_vertex_data) = load_hex("assets/hex.obj");
let (creature_index_data, creature_vertex_data) = load_hex("assets/creature.obj");
let tile_mesh = device.create_mesh(tile_vertex_data.as_slice());
let creature_mesh = device.create_mesh(creature_vertex_data.as_slice());
let tile_slice = device.create_buffer_static::<u8>(tile_index_data.as_slice())
.to_slice(gfx::TriangleList);
let creature_slice = device.create_buffer_static::<u8>(creature_index_data.as_slice())
.to_slice(gfx::TriangleList);
let program = device.link_program(VERTEX_SRC.clone(), FRAGMENT_SRC.clone())
.unwrap();
let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true).multi_sample();
let mut graphics = gfx::Graphics::new(device);
let tile : Batch = graphics.make_batch(&program, &tile_mesh, tile_slice, &state).unwrap();
let creature : Batch = graphics.make_batch(&program, &creature_mesh, creature_slice, &state).unwrap();
let aspect = w as f32 / h as f32;
let proj = cgmath::perspective(cgmath::deg(45.0f32), aspect, 1.0, 100.0);
Renderer {
graphics: graphics,
frame: frame,
tile_batch : tile,
creature_batch : creature,
projection: proj,
view: proj,
cd: gfx::ClearData {
color: BACKGROUND_COLOR,
depth: 1.0,
stencil: 0,
},
}
}
fn render_params(&self, px : f32, py : f32, pz : f32, rotation : f32, color : Color) -> Params {
let mut model = Matrix4::identity();
model[3] = Vector4::new(px, py, pz, 1.0f32);
let rot = Matrix3::from_angle_z(rad(rotation)).to_matrix4();
//
//model = rot.rotate_vector(&model);
let model = model.mul_m(&rot);
Params {
projection: self.projection.into_fixed(),
view: self.view.into_fixed(),
color : color,
model: model.into_fixed(),
light: Vector3::unit_z().into_fixed(),
}
}
fn set_view(&mut self, view: &AffineMatrix3<f32>) {
self.view = view.mat;
}
/// Clear
fn clear(&mut self) {
self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame);
}
fn end_frame(&mut self) {
self.graphics.end_frame();
}
fn render_batch(&mut self, batch : &Batch, params : &Params) {
self.graphics.draw(batch, params, &self.frame);
}
pub fn render_tile(&mut self, p : Point, c : Color, elevate : bool) {
let (px, py) = point_to_coordinate(p);
let params = self.render_params(px, py, if elevate {WALL_HEIGHT} else {0.0}, 0.0, c);
let batch = self.tile_batch;
self.render_batch(&batch, ¶ms);
}
pub fn render_creature(&mut self, pos : Position, c : Color) {
let (px, py) = point_to_coordinate(pos.p);
let params = self.render_params(px, py, 0.3, dir_to_angle(pos.dir), c);
let batch = self.creature_batch;
self.render_batch(&batch, ¶ms);
}
}
/// linearly interpolate between two values
fn mix<F : FloatMath> (x : F, y : F, a : F) -> F {
assert!(a >= zero());
assert!(a <= one());
y * a + x * (one::<F>() - a)
}
struct SmoothMovement<T> {
speed : f32,
destination: T,
pub current: T,
}
impl<V : cgmath::EuclideanVector<f32>, T : cgmath::Point<f32, V>> SmoothMovement<T> {
pub fn new(speed : f32) -> SmoothMovement<T> {
SmoothMovement {
speed: speed,
destination: cgmath::Point::origin(),
current: cgmath::Point::origin(),
}
}
pub fn update(&mut self, dt : f32) {
let d = self.destination.sub_p(&self.current);
self.current.add_self_v(&d.mul_s(dt * self.speed));
}
pub fn set_destination(&mut self, dest : T) {
| pub fn finish_immediately(&mut self) {
self.current = self.destination.clone();
}
}
pub struct PistonUI {
renderer : Renderer<GlCommandBuffer, GlDevice>,
render_controller : RenderController,
input_controller: InputController,
}
pub struct RenderController {
player_pos: Position,
camera_pos : SmoothMovement<Point3<f32>>,
camera_focus : SmoothMovement<Point3<f32>>,
}
pub struct InputController {
shift_pressed: bool,
alt_pressed: bool,
ctrl_pressed: bool,
is_running: bool,
action_queue: RingBuf<Action>,
}
impl InputController {
pub fn new() -> InputController {
InputController {
shift_pressed: false,
alt_pressed: false,
ctrl_pressed: false,
is_running: true,
action_queue: RingBuf::new(),
}
}
fn move_or_run(&self, dir : Direction) -> Action {
if self.is_running {
Run(dir)
} else {
Move(dir)
}
}
fn push_move_or_run(&mut self, dir : Direction) {
let a = self.move_or_run(dir);
self.action_queue.push_back(a)
}
fn push_turn(&mut self, dir : Direction) {
self.action_queue.push_back(Turn(dir))
}
fn push_melee(&mut self, dir : Direction) {
self.action_queue.push_back(Melee(dir))
}
fn push_wait(&mut self) {
self.action_queue.push_back(Wait)
}
pub fn push_input(&mut self, i : InputEvent) {
match i {
Press(Keyboard(k)) => {
match (k, self.shift_pressed, self.ctrl_pressed) {
(key::LShift, _, _) => self.shift_pressed = true,
(key::RShift, _, _) => self.shift_pressed = true,
(key::LAlt, _, _) => self.alt_pressed = true,
(key::RAlt, _, _) => self.alt_pressed = true,
(key::LCtrl, _, _) => self.ctrl_pressed = true,
(key::RCtrl, _, _) => self.ctrl_pressed = true,
(key::R, _, _) => self.is_running =!self.is_running,
(key::K, _, false) => self.push_move_or_run(Forward),
(key::L, true, false) => self.push_move_or_run(Right),
(key::H, true, false) => self.push_move_or_run(Left),
(key::J, _, false) => self.push_move_or_run(Backward),
(key::L, false, false) => self.push_turn(Right),
(key::H, false, false) => self.push_turn(Left),
(key::K, _, true) => self.push_melee(Forward),
(key::L, _, true) => self.push_melee(Right),
(key::H, _, true) => self.push_melee(Left),
(key::Period, _, _) => self.push_wait(),
_ => { }
}
},
Release(Keyboard(k)) => {
match k {
key::LShift|key::RShift => {
self.shift_pressed = false
},
key::LAlt|key::RAlt => {
self.alt_pressed = false
},
key::LCtrl|key::RCtrl=> {
self.ctrl_pressed = false
},
_ => {}
}
},
_ => {}
}
}
pub fn pop_action(&mut self) -> Option<Action> {
self.action_queue.pop_front()
}
}
impl RenderController {
fn new() -> RenderController {
let cp = SmoothMovement::new(1.0f32);
let cf = SmoothMovement::new(2.0f32);
RenderController {
player_pos: Position::new(Point::new(0,0), North),
camera_pos: cp,
camera_focus: cf,
}
}
pub fn render_map(
&self,
renderer : &mut Renderer<GlCommandBuffer, GlDevice>, game : &GameState) {
let &GameState {
ref player,
..
} = game;
let player = player.as_ref().and_then(|pl| pl.try_borrow());
game.map.for_each_point(|ap| {
if player.as_ref().map_or(true, |pl| pl.knows(ap) || HACK_PLAYER_KNOWS_ALL) {
let tiletype = game.map.at(ap).tiletype;
let (color, elevate) = match tiletype {
Wall => (WALL_COLOR, true),
GlassWall => (GLASSWALL_COLOR, true),
Floor => (FLOOR_COLOR, false),
Sand => (SAND_COLOR, false),
};
let color = if player.as_ref().map_or(
false, |pl|!pl.sees(ap) && pl.is_alive()
) {
grey_out(color)
} else {
color
};
renderer.render_tile(ap, color, elevate);
};
});
for creature in game.creatures_iter() {
let creature = creature.borrow();
let ap = creature.pos().p;
if!player.as_ref().map_or(
true, |pl| pl.sees(ap) || HACK_PLAYER_SEES_EVERYONE
) {
continue;
}
match self.creature_color(&*creature) {
Some(color) => renderer.render_creature(*creature.pos(), color),
None => {}
}
};
}
fn creature_color(&self, cr : &Creature) -> Option<Color> {
let now_ns = time::precise_time_ns();
let duration_s = 0.8f32;
let base_color = if cr.is_player() {
PLAYER_COLOR
} else {
match cr.race() {
Scout => SCOUT_COLOR,
Grunt => GRUNT_COLOR,
Heavy => HEAVY_COLOR,
Human => panic!(),
}
};
let color = base_color;
let since_s = (now_ns - cr.was_attacked_ns()) as f32 / BILLION;
let color = if since_s < duration_s {
let f = since_s / duration_s;
[
mix(1f32, color[0], f),
mix(0f32, color[1], f),
mix(0f32, color[2], f),
color[3],
]
} else {
color
};
let color = if!cr.is_alive() {
let since_s = (now_ns - cr.death_ns()) as f32 / BILLION;
let f = since_s / duration_s;
if f < 1.0 {
Some([
mix(color[0], FLOOR_COLOR[0], f),
mix(color[1], FLOOR_COLOR[1], f),
mix(color[2], FLOOR_COLOR[2], f),
color[3],
])
} else {
None
}
} else {
Some(color)
};
color
}
fn move_camera_to_destination(&mut self) {
self.camera_pos.finish_immediately();
self.camera_focus.finish_immediately();
}
fn set_player_pos(&mut self, pl: &Creature) {
let pos = *pl.pos();
if self.player_pos == pos {
return;
}
self.player_pos = pos;
let front = pos.p + pos.dir;
let (fx, fy) = point_to_coordinate(front);
let (x, y) = point_to_coordinate(pos.p);
let (dx, dy) = (fx - x, fy - y);
let how_much | self.destination = dest;
}
| identifier_body |
piston.rs | | {
vertex_data.push(v);
});
for o in obj.object_iter() {
for g in o.group_iter() {
for i in g.indices().iter() {
match i {
&genmesh::PolyTri(poly) => {
for i in vec!(poly.x, poly.y, poly.z).iter() {
match i {
&(v, _, Some(n)) => {
let normal = obj.normal()[n];
let vertex = obj.position()[v];
let index = indexer.index(
Vertex {
pos: vertex,
normal: normal,
}
);
index_data.push(index as u8);
},
_ => { panic!() }
}
}
},
_ => { panic!() },
}
}
}
}
}
(index_data, vertex_data)
}
pub fn point_to_coordinate(p : Point) -> (f32, f32) {
(
p.x as f32 * TILE_OUTER_R * 3f32 / 2f32,
-((p.y * 2) as f32 + p.x as f32) * tile_inner_r()
)
}
impl<C : CommandBuffer, D: gfx::Device<C>> Renderer<C, D> {
fn new(mut device: D, frame: gfx::Frame) -> Renderer<C, D> {
let (w, h) = (frame.width, frame.height);
let (tile_index_data, tile_vertex_data) = load_hex("assets/hex.obj");
let (creature_index_data, creature_vertex_data) = load_hex("assets/creature.obj");
let tile_mesh = device.create_mesh(tile_vertex_data.as_slice());
let creature_mesh = device.create_mesh(creature_vertex_data.as_slice());
let tile_slice = device.create_buffer_static::<u8>(tile_index_data.as_slice())
.to_slice(gfx::TriangleList);
let creature_slice = device.create_buffer_static::<u8>(creature_index_data.as_slice())
.to_slice(gfx::TriangleList);
let program = device.link_program(VERTEX_SRC.clone(), FRAGMENT_SRC.clone())
.unwrap();
let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true).multi_sample();
let mut graphics = gfx::Graphics::new(device);
let tile : Batch = graphics.make_batch(&program, &tile_mesh, tile_slice, &state).unwrap();
let creature : Batch = graphics.make_batch(&program, &creature_mesh, creature_slice, &state).unwrap();
let aspect = w as f32 / h as f32;
let proj = cgmath::perspective(cgmath::deg(45.0f32), aspect, 1.0, 100.0);
Renderer {
graphics: graphics,
frame: frame,
tile_batch : tile,
creature_batch : creature,
projection: proj,
view: proj,
cd: gfx::ClearData {
color: BACKGROUND_COLOR,
depth: 1.0,
stencil: 0,
},
}
}
fn render_params(&self, px : f32, py : f32, pz : f32, rotation : f32, color : Color) -> Params {
let mut model = Matrix4::identity();
model[3] = Vector4::new(px, py, pz, 1.0f32);
let rot = Matrix3::from_angle_z(rad(rotation)).to_matrix4();
//
//model = rot.rotate_vector(&model);
let model = model.mul_m(&rot);
Params {
projection: self.projection.into_fixed(),
view: self.view.into_fixed(),
color : color,
model: model.into_fixed(),
light: Vector3::unit_z().into_fixed(),
}
}
fn set_view(&mut self, view: &AffineMatrix3<f32>) {
self.view = view.mat;
}
/// Clear
fn clear(&mut self) {
self.graphics.clear(self.cd, gfx::COLOR | gfx::DEPTH, &self.frame);
}
fn end_frame(&mut self) {
self.graphics.end_frame();
}
fn render_batch(&mut self, batch : &Batch, params : &Params) {
self.graphics.draw(batch, params, &self.frame);
}
pub fn render_tile(&mut self, p : Point, c : Color, elevate : bool) {
let (px, py) = point_to_coordinate(p);
let params = self.render_params(px, py, if elevate {WALL_HEIGHT} else {0.0}, 0.0, c);
let batch = self.tile_batch;
self.render_batch(&batch, ¶ms);
}
pub fn render_creature(&mut self, pos : Position, c : Color) {
let (px, py) = point_to_coordinate(pos.p);
let params = self.render_params(px, py, 0.3, dir_to_angle(pos.dir), c);
let batch = self.creature_batch;
self.render_batch(&batch, ¶ms);
}
}
/// linearly interpolate between two values
fn mix<F : FloatMath> (x : F, y : F, a : F) -> F {
assert!(a >= zero());
assert!(a <= one());
y * a + x * (one::<F>() - a)
}
struct SmoothMovement<T> {
speed : f32,
destination: T,
pub current: T,
}
impl<V : cgmath::EuclideanVector<f32>, T : cgmath::Point<f32, V>> SmoothMovement<T> {
pub fn new(speed : f32) -> SmoothMovement<T> {
SmoothMovement {
speed: speed,
destination: cgmath::Point::origin(),
current: cgmath::Point::origin(),
}
}
pub fn update(&mut self, dt : f32) {
let d = self.destination.sub_p(&self.current);
self.current.add_self_v(&d.mul_s(dt * self.speed));
}
pub fn set_destination(&mut self, dest : T) {
self.destination = dest;
}
pub fn finish_immediately(&mut self) {
self.current = self.destination.clone();
}
}
pub struct PistonUI {
renderer : Renderer<GlCommandBuffer, GlDevice>,
render_controller : RenderController,
input_controller: InputController,
}
pub struct RenderController {
player_pos: Position,
camera_pos : SmoothMovement<Point3<f32>>,
camera_focus : SmoothMovement<Point3<f32>>,
}
pub struct InputController {
shift_pressed: bool,
alt_pressed: bool,
ctrl_pressed: bool,
is_running: bool,
action_queue: RingBuf<Action>,
}
impl InputController {
pub fn new() -> InputController {
InputController {
shift_pressed: false,
alt_pressed: false,
ctrl_pressed: false,
is_running: true,
action_queue: RingBuf::new(),
}
}
fn move_or_run(&self, dir : Direction) -> Action {
if self.is_running {
Run(dir)
} else {
Move(dir)
}
}
fn push_move_or_run(&mut self, dir : Direction) {
let a = self.move_or_run(dir);
self.action_queue.push_back(a)
}
fn push_turn(&mut self, dir : Direction) {
self.action_queue.push_back(Turn(dir))
}
fn push_melee(&mut self, dir : Direction) {
self.action_queue.push_back(Melee(dir))
}
fn push_wait(&mut self) {
self.action_queue.push_back(Wait)
}
pub fn push_input(&mut self, i : InputEvent) {
match i {
Press(Keyboard(k)) => {
match (k, self.shift_pressed, self.ctrl_pressed) {
(key::LShift, _, _) => self.shift_pressed = true,
(key::RShift, _, _) => self.shift_pressed = true,
(key::LAlt, _, _) => self.alt_pressed = true,
(key::RAlt, _, _) => self.alt_pressed = true,
(key::LCtrl, _, _) => self.ctrl_pressed = true,
(key::RCtrl, _, _) => self.ctrl_pressed = true,
(key::R, _, _) => self.is_running =!self.is_running,
(key::K, _, false) => self.push_move_or_run(Forward),
(key::L, true, false) => self.push_move_or_run(Right),
(key::H, true, false) => self.push_move_or_run(Left),
(key::J, _, false) => self.push_move_or_run(Backward),
(key::L, false, false) => self.push_turn(Right),
(key::H, false, false) => self.push_turn(Left),
(key::K, _, true) => self.push_melee(Forward),
(key::L, _, true) => self.push_melee(Right),
(key::H, _, true) => self.push_melee(Left),
(key::Period, _, _) => self.push_wait(),
_ => { }
}
},
Release(Keyboard(k)) => {
match k {
key::LShift|key::RShift => {
self.shift_pressed = false
},
key::LAlt|key::RAlt => {
self.alt_pressed = false
},
key::LCtrl|key::RCtrl=> {
self.ctrl_pressed = false
},
_ => {}
}
},
_ => {}
}
}
pub fn pop_action(&mut self) -> Option<Action> {
self.action_queue.pop_front()
}
}
impl RenderController {
fn new() -> RenderController {
let cp = SmoothMovement::new(1.0f32);
let cf = SmoothMovement::new(2.0f32);
RenderController {
player_pos: Position::new(Point::new(0,0), North),
camera_pos: cp,
camera_focus: cf,
}
}
pub fn render_map(
&self,
renderer : &mut Renderer<GlCommandBuffer, GlDevice>, game : &GameState) {
let &GameState {
ref player,
..
} = game;
let player = player.as_ref().and_then(|pl| pl.try_borrow());
game.map.for_each_point(|ap| {
if player.as_ref().map_or(true, |pl| pl.knows(ap) || HACK_PLAYER_KNOWS_ALL) {
let tiletype = game.map.at(ap).tiletype;
let (color, elevate) = match tiletype {
Wall => (WALL_COLOR, true),
GlassWall => (GLASSWALL_COLOR, true),
Floor => (FLOOR_COLOR, false),
Sand => (SAND_COLOR, false),
};
let color = if player.as_ref().map_or(
false, |pl|!pl.sees(ap) && pl.is_alive()
) {
grey_out(color)
} else {
color
};
renderer.render_tile(ap, color, elevate);
};
});
for creature in game.creatures_iter() {
let creature = creature.borrow();
let ap = creature.pos().p;
if!player.as_ref().map_or(
true, |pl| pl.sees(ap) || HACK_PLAYER_SEES_EVERYONE
) {
continue;
}
match self.creature_color(&*creature) {
Some(color) => renderer.render_creature(*creature.pos(), color),
None => {}
}
};
}
fn creature_color(&self, cr : &Creature) -> Option<Color> {
let now_ns = time::precise_time_ns();
let duration_s = 0.8f32;
let base_color = if cr.is_player() {
PLAYER_COLOR
} else {
match cr.race() {
Scout => SCOUT_COLOR,
Grunt => GRUNT_COLOR,
Heavy => HEAVY_COLOR,
Human => panic!(),
}
};
let color = base_color;
let since_s = (now_ns - cr.was_attacked_ns()) as f32 / BILLION;
let color = if since_s < duration_s {
let f = since_s / duration_s;
[
mix(1f32, color[0], f),
mix(0f32, color[1], f),
mix(0f32, color[2], f),
color[3],
]
} else {
color
};
let color = if!cr.is_alive() {
let since_s = (now_ns - cr.death_ns()) as f32 / BILLION;
let f = since_s / duration_s;
if f < 1.0 {
Some([
mix(color[0], FLOOR_COLOR[0], f),
mix(color[1], FLOOR_COLOR[1], f),
mix(color[2], FLOOR_COLOR[2], f),
color[3],
])
} else {
None
}
} else {
Some(color)
};
color
}
fn move_camera_to_destination(&mut self) {
self.camera_pos.finish_immediately();
self.camera_focus.finish_immediately();
}
fn set_player_pos(&mut self, pl: &Creature) {
let pos = *pl.pos();
if self.player_pos == pos {
return;
}
self.player_pos = pos;
let front = pos.p + pos.dir;
let (fx, fy) = point_to_coordinate(front);
let (x, y) = point_to_coordinate(pos.p);
let (dx, dy) = (fx - x, fy - y);
let how_much_behind = 5f32;
let how_much_front = 3f32;
let (dbx, dby) = (dx * how_much_behind, dy * how_much_behind);
let (dfx, dfy) = (dx * how_much_front, dy * how_much_front);
self.camera_pos.set_destination(Point3::new(x - dbx, y - dby, 8.0));
self.camera_focus.set_destination(Point3::new(x + dfx, y + dfy, 0.0));
}
fn update_movement(&mut self, dt : f32) {
self.camera_pos.update(dt);
self.camera_focus.update(dt);
}
fn update_camera(&mut self, renderer : &mut Renderer<GlCommandBuffer, GlDevice>) {
let view : AffineMatrix3<f32> = Transform::look_at(
&self.camera_pos.current,
&self.camera_focus.current,
&Vector3::unit_z(),
);
renderer.set_view(&view);
}
}
impl PistonUI {
pub fn new() -> (PistonUI, Window) {
let width = 800;
let height = 600;
let window = Window::new(
shader_version::opengl::OpenGL_3_2,
WindowSettings {
title: "Rustyhex".to_string(),
size: [width, height],
fullscreen: false,
exit_on_esc: true,
samples: 4,
}
);
let frame = gfx::Frame::new(width as u16, height as u16);
let device = gfx::GlDevice::new(|s| window.window.get_proc_address(s));
let renderer = Renderer::new(device, frame);
(PistonUI {
render_controller: RenderController::new(),
input_controller: InputController::new(),
renderer: renderer,
}, window)
}
fn game_update(&mut self, game : &mut GameState) {
loop {
if game.tick() {
match self.input_controller.pop_action() {
Some(action) => {
game.player.as_ref().map(|pl| pl.borrow_mut().action_set(action));
},
_ => {
break;
}
};
} else {
break;
}
}
match game.player {
Some(ref pl) => self.render_controller.set_player_pos(&*pl.borrow()),
None => {} | conditional_block |
||
context.rs | (&self) -> &Context {
self.0.as_ref()
}
}
impl std::borrow::Borrow<Context> for CtxRef {
fn borrow(&self) -> &Context {
self.0.borrow()
}
}
impl std::cmp::PartialEq for CtxRef {
fn eq(&self, other: &CtxRef) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
impl Default for CtxRef {
fn default() -> Self {
Self(Arc::new(Context {
// Start with painting an extra frame to compensate for some widgets
// that take two frames before they "settle":
repaint_requests: AtomicU32::new(1),
..Context::default()
}))
}
}
impl CtxRef {
/// Call at the start of every frame. Match with a call to [`Context::end_frame`].
///
/// This will modify the internal reference to point to a new generation of [`Context`].
/// Any old clones of this [`CtxRef`] will refer to the old [`Context`], which will not get new input.
///
/// Put your widgets into a [`SidePanel`], [`TopBottomPanel`], [`CentralPanel`], [`Window`] or [`Area`].
pub fn begin_frame(&mut self, new_input: RawInput) {
let mut self_: Context = (*self.0).clone();
self_.begin_frame_mut(new_input);
*self = Self(Arc::new(self_));
}
// ---------------------------------------------------------------------
/// If the given [`Id`] is not unique, an error will be printed at the given position.
/// Call this for [`Id`]:s that need interaction or persistence.
pub(crate) fn register_interaction_id(&self, id: Id, new_rect: Rect) {
let prev_rect = self.frame_state().used_ids.insert(id, new_rect);
if let Some(prev_rect) = prev_rect {
// it is ok to reuse the same ID for e.g. a frame around a widget,
// or to check for interaction with the same widget twice:
if prev_rect.expand(0.1).contains_rect(new_rect)
|| new_rect.expand(0.1).contains_rect(prev_rect)
{
return;
}
let show_error = |pos: Pos2, text: String| {
let painter = self.debug_painter();
let rect = painter.error(pos, text);
if let Some(pointer_pos) = self.input.pointer.hover_pos() {
if rect.contains(pointer_pos) {
painter.error(
rect.left_bottom() + vec2(2.0, 4.0),
"ID clashes happens when things like Windows or CollapsingHeaders share names,\n\
or when things like ScrollAreas and Resize areas aren't given unique id_source:s.",
);
}
}
};
let id_str = id.short_debug_format();
if prev_rect.min.distance(new_rect.min) < 4.0 {
show_error(new_rect.min, format!("Double use of ID {}", id_str));
} else {
show_error(prev_rect.min, format!("First use of ID {}", id_str));
show_error(new_rect.min, format!("Second use of ID {}", id_str));
}
}
}
// ---------------------------------------------------------------------
/// Use `ui.interact` instead
#[allow(clippy::too_many_arguments)]
pub(crate) fn interact(
&self,
clip_rect: Rect,
item_spacing: Vec2,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
) -> Response {
let gap = 0.5; // Just to make sure we don't accidentally hover two things at once (a small eps should be sufficient).
// Make it easier to click things:
let interact_rect = rect.expand2(
(0.5 * item_spacing - Vec2::splat(gap))
.at_least(Vec2::splat(0.0))
.at_most(Vec2::splat(5.0)),
); // make it easier to click
let hovered = self.rect_contains_pointer(layer_id, clip_rect.intersect(interact_rect));
self.interact_with_hovered(layer_id, id, rect, sense, enabled, hovered)
}
/// You specify if a thing is hovered, and the function gives a `Response`.
pub(crate) fn interact_with_hovered(
&self,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
hovered: bool,
) -> Response {
let hovered = hovered && enabled; // can't even hover disabled widgets
let mut response = Response {
ctx: self.clone(),
layer_id,
id,
rect,
sense,
enabled,
hovered,
clicked: Default::default(),
double_clicked: Default::default(),
dragged: false,
drag_released: false,
is_pointer_button_down_on: false,
interact_pointer_pos: None,
changed: false, // must be set by the widget itself
};
if!enabled ||!sense.focusable ||!layer_id.allow_interaction() {
// Not interested or allowed input:
self.memory().surrender_focus(id);
return response;
}
// We only want to focus labels if the screen reader is on.
let interested_in_focus =
sense.interactive() || sense.focusable && self.memory().options.screen_reader;
if interested_in_focus {
self.memory().interested_in_focus(id);
}
if sense.click
&& response.has_focus()
&& (self.input().key_pressed(Key::Space) || self.input().key_pressed(Key::Enter))
{
// Space/enter works like a primary click for e.g. selected buttons
response.clicked[PointerButton::Primary as usize] = true;
}
self.register_interaction_id(id, rect);
if sense.click || sense.drag {
let mut memory = self.memory();
memory.interaction.click_interest |= hovered && sense.click;
memory.interaction.drag_interest |= hovered && sense.drag;
response.dragged = memory.interaction.drag_id == Some(id);
response.is_pointer_button_down_on =
memory.interaction.click_id == Some(id) || response.dragged;
for pointer_event in &self.input.pointer.pointer_events {
match pointer_event {
PointerEvent::Moved(_) => {}
PointerEvent::Pressed(_) => {
if hovered {
if sense.click && memory.interaction.click_id.is_none() {
// potential start of a click
memory.interaction.click_id = Some(id);
response.is_pointer_button_down_on = true;
}
// HACK: windows have low priority on dragging.
// This is so that if you drag a slider in a window,
// the slider will steal the drag away from the window.
// This is needed because we do window interaction first (to prevent frame delay),
// and then do content layout.
if sense.drag
&& (memory.interaction.drag_id.is_none()
|| memory.interaction.drag_is_window)
{
// potential start of a drag
memory.interaction.drag_id = Some(id);
memory.interaction.drag_is_window = false;
memory.window_interaction = None; // HACK: stop moving windows (if any)
response.is_pointer_button_down_on = true;
response.dragged = true;
}
}
}
PointerEvent::Released(click) => {
response.drag_released = response.dragged;
response.dragged = false;
if hovered && response.is_pointer_button_down_on {
if let Some(click) = click {
let clicked = hovered && response.is_pointer_button_down_on;
response.clicked[click.button as usize] = clicked;
response.double_clicked[click.button as usize] =
clicked && click.is_double();
}
}
}
}
}
}
if response.is_pointer_button_down_on {
response.interact_pointer_pos = self.input().pointer.interact_pos();
}
if self.input.pointer.any_down() {
response.hovered &= response.is_pointer_button_down_on; // we don't hover widgets while interacting with *other* widgets
}
if response.has_focus() && response.clicked_elsewhere() {
self.memory().surrender_focus(id);
}
response
}
/// Get a full-screen painter for a new or existing layer
pub fn layer_painter(&self, layer_id: LayerId) -> Painter {
Painter::new(self.clone(), layer_id, self.input.screen_rect())
}
/// Paint on top of everything else
pub fn debug_painter(&self) -> Painter {
Self::layer_painter(self, LayerId::debug())
}
}
// ----------------------------------------------------------------------------
/// This is the first thing you need when working with egui. Create using [`CtxRef`].
///
/// Contains the [`InputState`], [`Memory`], [`Output`], and more.
///
/// Your handle to Egui.
///
/// Almost all methods are marked `&self`, `Context` has interior mutability (protected by mutexes).
/// Multi-threaded access to a [`Context`] is behind the feature flag `multi_threaded`.
/// Normally you'd always do all ui work on one thread, or perhaps use multiple contexts,
/// but if you really want to access the same Context from multiple threads, it *SHOULD* be fine,
/// but you are likely the first person to try it.
#[derive(Default)]
pub struct Context {
// We clone the Context each frame so we can set a new `input`.
// This is so we can avoid a mutex lock to access the `InputState`.
// This means everything else needs to be behind an Arc.
// We can probably come up with a nicer design.
//
/// None until first call to `begin_frame`.
fonts: Option<Arc<Fonts>>,
memory: Arc<Mutex<Memory>>,
animation_manager: Arc<Mutex<AnimationManager>>,
input: InputState,
/// State that is collected during a frame and then cleared
frame_state: Arc<Mutex<FrameState>>,
// The output of a frame:
graphics: Arc<Mutex<GraphicLayers>>,
output: Arc<Mutex<Output>>,
paint_stats: Arc<Mutex<PaintStats>>,
/// While positive, keep requesting repaints. Decrement at the end of each frame.
repaint_requests: AtomicU32,
}
impl Clone for Context {
fn clone(&self) -> Self {
Context {
fonts: self.fonts.clone(),
memory: self.memory.clone(),
animation_manager: self.animation_manager.clone(),
input: self.input.clone(),
frame_state: self.frame_state.clone(),
graphics: self.graphics.clone(),
output: self.output.clone(),
paint_stats: self.paint_stats.clone(),
repaint_requests: self.repaint_requests.load(SeqCst).into(),
}
}
}
impl Context {
/// How much space is still available after panels has been added.
/// This is the "background" area, what egui doesn't cover with panels (but may cover with windows).
/// This is also the area to which windows are constrained.
pub fn available_rect(&self) -> Rect {
self.frame_state.lock().available_rect()
}
/// Stores all the egui state.
/// If you want to store/restore egui, serialize this.
pub fn memory(&self) -> MutexGuard<'_, Memory> {
self.memory.lock()
}
pub(crate) fn graphics(&self) -> MutexGuard<'_, GraphicLayers> {
self.graphics.lock()
}
/// What egui outputs each frame.
pub fn output(&self) -> MutexGuard<'_, Output> {
self.output.lock()
}
pub(crate) fn frame_state(&self) -> MutexGuard<'_, FrameState> {
self.frame_state.lock()
}
/// Call this if there is need to repaint the UI, i.e. if you are showing an animation.
/// If this is called at least once in a frame, then there will be another frame right after this.
/// Call as many times as you wish, only one repaint will be issued.
pub fn request_repaint(&self) {
// request two frames of repaint, just to cover some corner cases (frame delays):
let times_to_repaint = 2;
self.repaint_requests.store(times_to_repaint, SeqCst);
}
#[inline(always)]
pub fn input(&self) -> &InputState {
&self.input
}
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn fonts(&self) -> &Fonts {
&*self
.fonts
.as_ref()
.expect("No fonts available until first call to CtxRef::begin_frame()")
}
/// The egui texture, containing font characters etc.
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn texture(&self) -> Arc<epaint::Texture> {
self.fonts().texture()
}
/// Tell `egui` which fonts to use.
///
/// The default `egui` fonts only support latin and cyrillic alphabets,
/// but you can call this to install additional fonts that support e.g. korean characters.
///
/// The new fonts will become active at the start of the next frame.
pub fn set_fonts(&self, font_definitions: FontDefinitions) {
if let Some(current_fonts) = &self.fonts {
// NOTE: this comparison is expensive since it checks TTF data for equality
if current_fonts.definitions() == &font_definitions {
return; // no change - save us from reloading font textures
}
}
self.memory().new_font_definitions = Some(font_definitions);
}
/// The [`Style`] used by all subsequent windows, panels etc.
pub fn style(&self) -> Arc<Style> {
self.memory().options.style.clone()
}
/// The [`Style`] used by all new windows, panels etc.
///
/// You can also use [`Ui::style_mut`] to change the style of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// let mut style: egui::Style = (*ctx.style()).clone();
/// style.spacing.item_spacing = egui::vec2(10.0, 20.0);
/// ctx.set_style(style);
/// ```
pub fn set_style(&self, style: impl Into<Arc<Style>>) {
self.memory().options.style = style.into();
}
/// The [`Visuals`] used by all subsequent windows, panels etc.
///
/// You can also use [`Ui::visuals_mut`] to change the visuals of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// ctx.set_visuals(egui::Visuals::light()); // Switch to light mode
/// ```
pub fn set_visuals(&self, visuals: crate::Visuals) {
std::sync::Arc::make_mut(&mut self.memory().options.style).visuals = visuals;
}
/// The number of physical pixels for each logical point.
#[inline(always)]
pub fn pixels_per_point(&self) -> f32 {
self.input.pixels_per_point()
}
/// Set the number of physical pixels for each logical point.
/// Will become active at the start of the next frame.
///
/// Note that this may be overwritten by input from the integration via [`RawInput::pixels_per_point`].
/// For instance, when using `egui_web` the browsers native zoom level will always be used.
pub fn set_pixels_per_point(&self, pixels_per_point: f32) {
self.memory().new_pixels_per_point = Some(pixels_per_point);
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_to_pixel(&self, point: f32) -> f32 {
let pixels_per_point = self.pixels_per_point();
(point * pixels_per_point).round() / pixels_per_point
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_pos_to_pixels(&self, pos: Pos2) -> Pos2 {
pos2(self.round_to_pixel(pos.x), self.round_to_pixel(pos.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_vec_to_pixels(&self, vec: Vec2) -> Vec2 {
vec2(self.round_to_pixel(vec.x), self.round_to_pixel(vec.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_rect_to_pixels(&self, rect: Rect) -> Rect {
Rect {
min: self.round_pos_to_pixels(rect.min),
max: self.round_pos_to_pixels(rect.max),
}
}
// ---------------------------------------------------------------------
/// Constrain the position of a window/area so it fits within the provided boundary.
///
/// If area is `None`, will constrain to [`Self::available_rect`].
pub(crate) fn constrain_window_rect_to_area(&self, window: Rect, area: Option<Rect>) -> Rect {
let mut area = area.unwrap_or_else(|| self.available_rect());
if window.width() > area.width() {
// Allow overlapping side bars.
// This is important for small screens, e.g. mobiles running the web demo.
area.max.x = self.input().screen_rect().max.x;
area.min.x = self.input().screen_rect().min.x;
}
if window.height() > area.height() {
// Allow overlapping top/bottom bars:
area.max.y = self.input().screen_rect().max.y;
area.min.y = self.input().screen_rect().min.y;
}
let mut pos = window.min;
// Constrain to screen, unless window is too large to fit:
let margin_x = (window.width() - area.width()).at_least(0.0);
let margin_y = (window.height() - area.height()).at_least(0.0);
pos.x = pos.x.at_most(area.right() + margin_x - window.width()); // move left if needed
pos.x = pos.x.at_least(area.left() - margin_x); // move right if needed
pos.y = pos.y.at_most(area.bottom() + margin_y - window.height()); // move right if needed
pos.y = pos.y.at_least(area.top() - margin_y); // move down if needed
pos = self.round_pos_to_pixels(pos);
Rect::from_min_size(pos, window.size())
}
// ---------------------------------------------------------------------
fn begin_frame_mut(&mut self, new_raw_input: RawInput) {
self.memory().begin_frame(&self.input, &new_raw_input);
let mut input = std::mem::take(&mut self.input);
if let Some(new_pixels_per_point) = self.memory().new_pixels_per_point.take() {
input.pixels_per_point = new_pixels_per_point;
| as_ref | identifier_name |
|
context.rs | }
impl Default for CtxRef {
fn default() -> Self {
Self(Arc::new(Context {
// Start with painting an extra frame to compensate for some widgets
// that take two frames before they "settle":
repaint_requests: AtomicU32::new(1),
..Context::default()
}))
}
}
impl CtxRef {
/// Call at the start of every frame. Match with a call to [`Context::end_frame`].
///
/// This will modify the internal reference to point to a new generation of [`Context`].
/// Any old clones of this [`CtxRef`] will refer to the old [`Context`], which will not get new input.
///
/// Put your widgets into a [`SidePanel`], [`TopBottomPanel`], [`CentralPanel`], [`Window`] or [`Area`].
pub fn begin_frame(&mut self, new_input: RawInput) {
let mut self_: Context = (*self.0).clone();
self_.begin_frame_mut(new_input);
*self = Self(Arc::new(self_));
}
// ---------------------------------------------------------------------
/// If the given [`Id`] is not unique, an error will be printed at the given position.
/// Call this for [`Id`]:s that need interaction or persistence.
pub(crate) fn register_interaction_id(&self, id: Id, new_rect: Rect) {
let prev_rect = self.frame_state().used_ids.insert(id, new_rect);
if let Some(prev_rect) = prev_rect {
// it is ok to reuse the same ID for e.g. a frame around a widget,
// or to check for interaction with the same widget twice:
if prev_rect.expand(0.1).contains_rect(new_rect)
|| new_rect.expand(0.1).contains_rect(prev_rect)
{
return;
}
let show_error = |pos: Pos2, text: String| {
let painter = self.debug_painter();
let rect = painter.error(pos, text);
if let Some(pointer_pos) = self.input.pointer.hover_pos() {
if rect.contains(pointer_pos) {
painter.error(
rect.left_bottom() + vec2(2.0, 4.0),
"ID clashes happens when things like Windows or CollapsingHeaders share names,\n\
or when things like ScrollAreas and Resize areas aren't given unique id_source:s.",
);
}
}
};
let id_str = id.short_debug_format();
if prev_rect.min.distance(new_rect.min) < 4.0 {
show_error(new_rect.min, format!("Double use of ID {}", id_str));
} else {
show_error(prev_rect.min, format!("First use of ID {}", id_str));
show_error(new_rect.min, format!("Second use of ID {}", id_str));
}
}
}
// ---------------------------------------------------------------------
/// Use `ui.interact` instead
#[allow(clippy::too_many_arguments)]
pub(crate) fn interact(
&self,
clip_rect: Rect,
item_spacing: Vec2,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
) -> Response {
let gap = 0.5; // Just to make sure we don't accidentally hover two things at once (a small eps should be sufficient).
// Make it easier to click things:
let interact_rect = rect.expand2(
(0.5 * item_spacing - Vec2::splat(gap))
.at_least(Vec2::splat(0.0))
.at_most(Vec2::splat(5.0)),
); // make it easier to click
let hovered = self.rect_contains_pointer(layer_id, clip_rect.intersect(interact_rect));
self.interact_with_hovered(layer_id, id, rect, sense, enabled, hovered)
}
/// You specify if a thing is hovered, and the function gives a `Response`.
pub(crate) fn interact_with_hovered(
&self,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
hovered: bool,
) -> Response {
let hovered = hovered && enabled; // can't even hover disabled widgets
let mut response = Response {
ctx: self.clone(),
layer_id,
id,
rect,
sense,
enabled,
hovered,
clicked: Default::default(),
double_clicked: Default::default(),
dragged: false,
drag_released: false,
is_pointer_button_down_on: false,
interact_pointer_pos: None,
changed: false, // must be set by the widget itself
};
if!enabled ||!sense.focusable ||!layer_id.allow_interaction() {
// Not interested or allowed input:
self.memory().surrender_focus(id);
return response;
}
// We only want to focus labels if the screen reader is on.
let interested_in_focus =
sense.interactive() || sense.focusable && self.memory().options.screen_reader;
if interested_in_focus {
self.memory().interested_in_focus(id);
}
if sense.click
&& response.has_focus()
&& (self.input().key_pressed(Key::Space) || self.input().key_pressed(Key::Enter))
{
// Space/enter works like a primary click for e.g. selected buttons
response.clicked[PointerButton::Primary as usize] = true;
}
self.register_interaction_id(id, rect);
if sense.click || sense.drag {
let mut memory = self.memory();
memory.interaction.click_interest |= hovered && sense.click;
memory.interaction.drag_interest |= hovered && sense.drag;
response.dragged = memory.interaction.drag_id == Some(id);
response.is_pointer_button_down_on =
memory.interaction.click_id == Some(id) || response.dragged;
for pointer_event in &self.input.pointer.pointer_events {
match pointer_event {
PointerEvent::Moved(_) => {}
PointerEvent::Pressed(_) => {
if hovered {
if sense.click && memory.interaction.click_id.is_none() {
// potential start of a click
memory.interaction.click_id = Some(id);
response.is_pointer_button_down_on = true;
}
// HACK: windows have low priority on dragging.
// This is so that if you drag a slider in a window,
// the slider will steal the drag away from the window.
// This is needed because we do window interaction first (to prevent frame delay),
// and then do content layout.
if sense.drag
&& (memory.interaction.drag_id.is_none()
|| memory.interaction.drag_is_window)
{
// potential start of a drag
memory.interaction.drag_id = Some(id);
memory.interaction.drag_is_window = false;
memory.window_interaction = None; // HACK: stop moving windows (if any)
response.is_pointer_button_down_on = true;
response.dragged = true;
}
}
}
PointerEvent::Released(click) => {
response.drag_released = response.dragged;
response.dragged = false;
if hovered && response.is_pointer_button_down_on {
if let Some(click) = click {
let clicked = hovered && response.is_pointer_button_down_on;
response.clicked[click.button as usize] = clicked;
response.double_clicked[click.button as usize] =
clicked && click.is_double();
}
}
}
}
}
}
if response.is_pointer_button_down_on {
response.interact_pointer_pos = self.input().pointer.interact_pos();
}
if self.input.pointer.any_down() {
response.hovered &= response.is_pointer_button_down_on; // we don't hover widgets while interacting with *other* widgets
}
if response.has_focus() && response.clicked_elsewhere() {
self.memory().surrender_focus(id);
}
response
}
/// Get a full-screen painter for a new or existing layer
pub fn layer_painter(&self, layer_id: LayerId) -> Painter {
Painter::new(self.clone(), layer_id, self.input.screen_rect())
}
/// Paint on top of everything else
pub fn debug_painter(&self) -> Painter {
Self::layer_painter(self, LayerId::debug())
}
}
// ----------------------------------------------------------------------------
/// This is the first thing you need when working with egui. Create using [`CtxRef`].
///
/// Contains the [`InputState`], [`Memory`], [`Output`], and more.
///
/// Your handle to Egui.
///
/// Almost all methods are marked `&self`, `Context` has interior mutability (protected by mutexes).
/// Multi-threaded access to a [`Context`] is behind the feature flag `multi_threaded`.
/// Normally you'd always do all ui work on one thread, or perhaps use multiple contexts,
/// but if you really want to access the same Context from multiple threads, it *SHOULD* be fine,
/// but you are likely the first person to try it.
#[derive(Default)]
pub struct Context {
// We clone the Context each frame so we can set a new `input`.
// This is so we can avoid a mutex lock to access the `InputState`.
// This means everything else needs to be behind an Arc.
// We can probably come up with a nicer design.
//
/// None until first call to `begin_frame`.
fonts: Option<Arc<Fonts>>,
memory: Arc<Mutex<Memory>>,
animation_manager: Arc<Mutex<AnimationManager>>,
input: InputState,
/// State that is collected during a frame and then cleared
frame_state: Arc<Mutex<FrameState>>,
// The output of a frame:
graphics: Arc<Mutex<GraphicLayers>>,
output: Arc<Mutex<Output>>,
paint_stats: Arc<Mutex<PaintStats>>,
/// While positive, keep requesting repaints. Decrement at the end of each frame.
repaint_requests: AtomicU32,
}
impl Clone for Context {
fn clone(&self) -> Self {
Context {
fonts: self.fonts.clone(),
memory: self.memory.clone(),
animation_manager: self.animation_manager.clone(),
input: self.input.clone(),
frame_state: self.frame_state.clone(),
graphics: self.graphics.clone(),
output: self.output.clone(),
paint_stats: self.paint_stats.clone(),
repaint_requests: self.repaint_requests.load(SeqCst).into(),
}
}
}
impl Context {
/// How much space is still available after panels has been added.
/// This is the "background" area, what egui doesn't cover with panels (but may cover with windows).
/// This is also the area to which windows are constrained.
pub fn available_rect(&self) -> Rect {
self.frame_state.lock().available_rect()
}
/// Stores all the egui state.
/// If you want to store/restore egui, serialize this.
pub fn memory(&self) -> MutexGuard<'_, Memory> {
self.memory.lock()
}
pub(crate) fn graphics(&self) -> MutexGuard<'_, GraphicLayers> {
self.graphics.lock()
}
/// What egui outputs each frame.
pub fn output(&self) -> MutexGuard<'_, Output> {
self.output.lock()
}
pub(crate) fn frame_state(&self) -> MutexGuard<'_, FrameState> {
self.frame_state.lock()
}
/// Call this if there is need to repaint the UI, i.e. if you are showing an animation.
/// If this is called at least once in a frame, then there will be another frame right after this.
/// Call as many times as you wish, only one repaint will be issued.
pub fn request_repaint(&self) {
// request two frames of repaint, just to cover some corner cases (frame delays):
let times_to_repaint = 2;
self.repaint_requests.store(times_to_repaint, SeqCst);
}
#[inline(always)]
pub fn input(&self) -> &InputState {
&self.input
}
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn fonts(&self) -> &Fonts {
&*self
.fonts
.as_ref()
.expect("No fonts available until first call to CtxRef::begin_frame()")
}
/// The egui texture, containing font characters etc.
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn texture(&self) -> Arc<epaint::Texture> {
self.fonts().texture()
}
/// Tell `egui` which fonts to use.
///
/// The default `egui` fonts only support latin and cyrillic alphabets,
/// but you can call this to install additional fonts that support e.g. korean characters.
///
/// The new fonts will become active at the start of the next frame.
pub fn set_fonts(&self, font_definitions: FontDefinitions) {
if let Some(current_fonts) = &self.fonts {
// NOTE: this comparison is expensive since it checks TTF data for equality
if current_fonts.definitions() == &font_definitions {
return; // no change - save us from reloading font textures
}
}
self.memory().new_font_definitions = Some(font_definitions);
}
/// The [`Style`] used by all subsequent windows, panels etc.
pub fn style(&self) -> Arc<Style> {
self.memory().options.style.clone()
}
/// The [`Style`] used by all new windows, panels etc.
///
/// You can also use [`Ui::style_mut`] to change the style of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// let mut style: egui::Style = (*ctx.style()).clone();
/// style.spacing.item_spacing = egui::vec2(10.0, 20.0);
/// ctx.set_style(style);
/// ```
pub fn set_style(&self, style: impl Into<Arc<Style>>) {
self.memory().options.style = style.into();
}
/// The [`Visuals`] used by all subsequent windows, panels etc.
///
/// You can also use [`Ui::visuals_mut`] to change the visuals of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// ctx.set_visuals(egui::Visuals::light()); // Switch to light mode
/// ```
pub fn set_visuals(&self, visuals: crate::Visuals) {
std::sync::Arc::make_mut(&mut self.memory().options.style).visuals = visuals;
}
/// The number of physical pixels for each logical point.
#[inline(always)]
pub fn pixels_per_point(&self) -> f32 {
self.input.pixels_per_point()
}
/// Set the number of physical pixels for each logical point.
/// Will become active at the start of the next frame.
///
/// Note that this may be overwritten by input from the integration via [`RawInput::pixels_per_point`].
/// For instance, when using `egui_web` the browsers native zoom level will always be used.
pub fn set_pixels_per_point(&self, pixels_per_point: f32) {
self.memory().new_pixels_per_point = Some(pixels_per_point);
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_to_pixel(&self, point: f32) -> f32 {
let pixels_per_point = self.pixels_per_point();
(point * pixels_per_point).round() / pixels_per_point
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_pos_to_pixels(&self, pos: Pos2) -> Pos2 {
pos2(self.round_to_pixel(pos.x), self.round_to_pixel(pos.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_vec_to_pixels(&self, vec: Vec2) -> Vec2 {
vec2(self.round_to_pixel(vec.x), self.round_to_pixel(vec.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_rect_to_pixels(&self, rect: Rect) -> Rect {
Rect {
min: self.round_pos_to_pixels(rect.min),
max: self.round_pos_to_pixels(rect.max),
}
}
// ---------------------------------------------------------------------
/// Constrain the position of a window/area so it fits within the provided boundary.
///
/// If area is `None`, will constrain to [`Self::available_rect`].
pub(crate) fn constrain_window_rect_to_area(&self, window: Rect, area: Option<Rect>) -> Rect {
let mut area = area.unwrap_or_else(|| self.available_rect());
if window.width() > area.width() {
// Allow overlapping side bars.
// This is important for small screens, e.g. mobiles running the web demo.
area.max.x = self.input().screen_rect().max.x;
area.min.x = self.input().screen_rect().min.x;
}
if window.height() > area.height() {
// Allow overlapping top/bottom bars:
area.max.y = self.input().screen_rect().max.y;
area.min.y = self.input().screen_rect().min.y;
}
let mut pos = window.min;
// Constrain to screen, unless window is too large to fit:
let margin_x = (window.width() - area.width()).at_least(0.0);
let margin_y = (window.height() - area.height()).at_least(0.0);
pos.x = pos.x.at_most(area.right() + margin_x - window.width()); // move left if needed
pos.x = pos.x.at_least(area.left() - margin_x); // move right if needed
pos.y = pos.y.at_most(area.bottom() + margin_y - window.height()); // move right if needed
pos.y = pos.y.at_least(area.top() - margin_y); // move down if needed
pos = self.round_pos_to_pixels(pos);
Rect::from_min_size(pos, window.size())
}
// ---------------------------------------------------------------------
fn begin_frame_mut(&mut self, new_raw_input: RawInput) {
self.memory().begin_frame(&self.input, &new_raw_input);
let mut input = std::mem::take(&mut self.input);
if let Some(new_pixels_per_point) = self.memory().new_pixels_per_point.take() {
input.pixels_per_point = new_pixels_per_point;
}
self.input = input.begin_frame(new_raw_input);
self.frame_state.lock().begin_frame(&self.input);
{
// Load new fonts if required:
let new_font_definitions = self.memory().new_font_definitions.take();
let pixels_per_point = self.input.pixels_per_point();
| random_line_split |
||
context.rs |
}
impl std::borrow::Borrow<Context> for CtxRef {
fn borrow(&self) -> &Context {
self.0.borrow()
}
}
impl std::cmp::PartialEq for CtxRef {
fn eq(&self, other: &CtxRef) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
impl Default for CtxRef {
fn default() -> Self {
Self(Arc::new(Context {
// Start with painting an extra frame to compensate for some widgets
// that take two frames before they "settle":
repaint_requests: AtomicU32::new(1),
..Context::default()
}))
}
}
impl CtxRef {
/// Call at the start of every frame. Match with a call to [`Context::end_frame`].
///
/// This will modify the internal reference to point to a new generation of [`Context`].
/// Any old clones of this [`CtxRef`] will refer to the old [`Context`], which will not get new input.
///
/// Put your widgets into a [`SidePanel`], [`TopBottomPanel`], [`CentralPanel`], [`Window`] or [`Area`].
pub fn begin_frame(&mut self, new_input: RawInput) {
let mut self_: Context = (*self.0).clone();
self_.begin_frame_mut(new_input);
*self = Self(Arc::new(self_));
}
// ---------------------------------------------------------------------
/// If the given [`Id`] is not unique, an error will be printed at the given position.
/// Call this for [`Id`]:s that need interaction or persistence.
pub(crate) fn register_interaction_id(&self, id: Id, new_rect: Rect) {
let prev_rect = self.frame_state().used_ids.insert(id, new_rect);
if let Some(prev_rect) = prev_rect {
// it is ok to reuse the same ID for e.g. a frame around a widget,
// or to check for interaction with the same widget twice:
if prev_rect.expand(0.1).contains_rect(new_rect)
|| new_rect.expand(0.1).contains_rect(prev_rect)
{
return;
}
let show_error = |pos: Pos2, text: String| {
let painter = self.debug_painter();
let rect = painter.error(pos, text);
if let Some(pointer_pos) = self.input.pointer.hover_pos() {
if rect.contains(pointer_pos) {
painter.error(
rect.left_bottom() + vec2(2.0, 4.0),
"ID clashes happens when things like Windows or CollapsingHeaders share names,\n\
or when things like ScrollAreas and Resize areas aren't given unique id_source:s.",
);
}
}
};
let id_str = id.short_debug_format();
if prev_rect.min.distance(new_rect.min) < 4.0 {
show_error(new_rect.min, format!("Double use of ID {}", id_str));
} else {
show_error(prev_rect.min, format!("First use of ID {}", id_str));
show_error(new_rect.min, format!("Second use of ID {}", id_str));
}
}
}
// ---------------------------------------------------------------------
/// Use `ui.interact` instead
#[allow(clippy::too_many_arguments)]
pub(crate) fn interact(
&self,
clip_rect: Rect,
item_spacing: Vec2,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
) -> Response {
let gap = 0.5; // Just to make sure we don't accidentally hover two things at once (a small eps should be sufficient).
// Make it easier to click things:
let interact_rect = rect.expand2(
(0.5 * item_spacing - Vec2::splat(gap))
.at_least(Vec2::splat(0.0))
.at_most(Vec2::splat(5.0)),
); // make it easier to click
let hovered = self.rect_contains_pointer(layer_id, clip_rect.intersect(interact_rect));
self.interact_with_hovered(layer_id, id, rect, sense, enabled, hovered)
}
/// You specify if a thing is hovered, and the function gives a `Response`.
pub(crate) fn interact_with_hovered(
&self,
layer_id: LayerId,
id: Id,
rect: Rect,
sense: Sense,
enabled: bool,
hovered: bool,
) -> Response {
let hovered = hovered && enabled; // can't even hover disabled widgets
let mut response = Response {
ctx: self.clone(),
layer_id,
id,
rect,
sense,
enabled,
hovered,
clicked: Default::default(),
double_clicked: Default::default(),
dragged: false,
drag_released: false,
is_pointer_button_down_on: false,
interact_pointer_pos: None,
changed: false, // must be set by the widget itself
};
if!enabled ||!sense.focusable ||!layer_id.allow_interaction() {
// Not interested or allowed input:
self.memory().surrender_focus(id);
return response;
}
// We only want to focus labels if the screen reader is on.
let interested_in_focus =
sense.interactive() || sense.focusable && self.memory().options.screen_reader;
if interested_in_focus {
self.memory().interested_in_focus(id);
}
if sense.click
&& response.has_focus()
&& (self.input().key_pressed(Key::Space) || self.input().key_pressed(Key::Enter))
{
// Space/enter works like a primary click for e.g. selected buttons
response.clicked[PointerButton::Primary as usize] = true;
}
self.register_interaction_id(id, rect);
if sense.click || sense.drag {
let mut memory = self.memory();
memory.interaction.click_interest |= hovered && sense.click;
memory.interaction.drag_interest |= hovered && sense.drag;
response.dragged = memory.interaction.drag_id == Some(id);
response.is_pointer_button_down_on =
memory.interaction.click_id == Some(id) || response.dragged;
for pointer_event in &self.input.pointer.pointer_events {
match pointer_event {
PointerEvent::Moved(_) => {}
PointerEvent::Pressed(_) => {
if hovered {
if sense.click && memory.interaction.click_id.is_none() {
// potential start of a click
memory.interaction.click_id = Some(id);
response.is_pointer_button_down_on = true;
}
// HACK: windows have low priority on dragging.
// This is so that if you drag a slider in a window,
// the slider will steal the drag away from the window.
// This is needed because we do window interaction first (to prevent frame delay),
// and then do content layout.
if sense.drag
&& (memory.interaction.drag_id.is_none()
|| memory.interaction.drag_is_window)
{
// potential start of a drag
memory.interaction.drag_id = Some(id);
memory.interaction.drag_is_window = false;
memory.window_interaction = None; // HACK: stop moving windows (if any)
response.is_pointer_button_down_on = true;
response.dragged = true;
}
}
}
PointerEvent::Released(click) => {
response.drag_released = response.dragged;
response.dragged = false;
if hovered && response.is_pointer_button_down_on {
if let Some(click) = click {
let clicked = hovered && response.is_pointer_button_down_on;
response.clicked[click.button as usize] = clicked;
response.double_clicked[click.button as usize] =
clicked && click.is_double();
}
}
}
}
}
}
if response.is_pointer_button_down_on {
response.interact_pointer_pos = self.input().pointer.interact_pos();
}
if self.input.pointer.any_down() {
response.hovered &= response.is_pointer_button_down_on; // we don't hover widgets while interacting with *other* widgets
}
if response.has_focus() && response.clicked_elsewhere() {
self.memory().surrender_focus(id);
}
response
}
/// Get a full-screen painter for a new or existing layer
pub fn layer_painter(&self, layer_id: LayerId) -> Painter {
Painter::new(self.clone(), layer_id, self.input.screen_rect())
}
/// Paint on top of everything else
pub fn debug_painter(&self) -> Painter {
Self::layer_painter(self, LayerId::debug())
}
}
// ----------------------------------------------------------------------------
/// This is the first thing you need when working with egui. Create using [`CtxRef`].
///
/// Contains the [`InputState`], [`Memory`], [`Output`], and more.
///
/// Your handle to Egui.
///
/// Almost all methods are marked `&self`, `Context` has interior mutability (protected by mutexes).
/// Multi-threaded access to a [`Context`] is behind the feature flag `multi_threaded`.
/// Normally you'd always do all ui work on one thread, or perhaps use multiple contexts,
/// but if you really want to access the same Context from multiple threads, it *SHOULD* be fine,
/// but you are likely the first person to try it.
#[derive(Default)]
pub struct Context {
// We clone the Context each frame so we can set a new `input`.
// This is so we can avoid a mutex lock to access the `InputState`.
// This means everything else needs to be behind an Arc.
// We can probably come up with a nicer design.
//
/// None until first call to `begin_frame`.
fonts: Option<Arc<Fonts>>,
memory: Arc<Mutex<Memory>>,
animation_manager: Arc<Mutex<AnimationManager>>,
input: InputState,
/// State that is collected during a frame and then cleared
frame_state: Arc<Mutex<FrameState>>,
// The output of a frame:
graphics: Arc<Mutex<GraphicLayers>>,
output: Arc<Mutex<Output>>,
paint_stats: Arc<Mutex<PaintStats>>,
/// While positive, keep requesting repaints. Decrement at the end of each frame.
repaint_requests: AtomicU32,
}
impl Clone for Context {
fn clone(&self) -> Self {
Context {
fonts: self.fonts.clone(),
memory: self.memory.clone(),
animation_manager: self.animation_manager.clone(),
input: self.input.clone(),
frame_state: self.frame_state.clone(),
graphics: self.graphics.clone(),
output: self.output.clone(),
paint_stats: self.paint_stats.clone(),
repaint_requests: self.repaint_requests.load(SeqCst).into(),
}
}
}
impl Context {
/// How much space is still available after panels has been added.
/// This is the "background" area, what egui doesn't cover with panels (but may cover with windows).
/// This is also the area to which windows are constrained.
pub fn available_rect(&self) -> Rect {
self.frame_state.lock().available_rect()
}
/// Stores all the egui state.
/// If you want to store/restore egui, serialize this.
pub fn memory(&self) -> MutexGuard<'_, Memory> {
self.memory.lock()
}
pub(crate) fn graphics(&self) -> MutexGuard<'_, GraphicLayers> {
self.graphics.lock()
}
/// What egui outputs each frame.
pub fn output(&self) -> MutexGuard<'_, Output> {
self.output.lock()
}
pub(crate) fn frame_state(&self) -> MutexGuard<'_, FrameState> {
self.frame_state.lock()
}
/// Call this if there is need to repaint the UI, i.e. if you are showing an animation.
/// If this is called at least once in a frame, then there will be another frame right after this.
/// Call as many times as you wish, only one repaint will be issued.
pub fn request_repaint(&self) {
// request two frames of repaint, just to cover some corner cases (frame delays):
let times_to_repaint = 2;
self.repaint_requests.store(times_to_repaint, SeqCst);
}
#[inline(always)]
pub fn input(&self) -> &InputState {
&self.input
}
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn fonts(&self) -> &Fonts {
&*self
.fonts
.as_ref()
.expect("No fonts available until first call to CtxRef::begin_frame()")
}
/// The egui texture, containing font characters etc.
/// Not valid until first call to [`CtxRef::begin_frame()`].
/// That's because since we don't know the proper `pixels_per_point` until then.
pub fn texture(&self) -> Arc<epaint::Texture> {
self.fonts().texture()
}
/// Tell `egui` which fonts to use.
///
/// The default `egui` fonts only support latin and cyrillic alphabets,
/// but you can call this to install additional fonts that support e.g. korean characters.
///
/// The new fonts will become active at the start of the next frame.
pub fn set_fonts(&self, font_definitions: FontDefinitions) {
if let Some(current_fonts) = &self.fonts {
// NOTE: this comparison is expensive since it checks TTF data for equality
if current_fonts.definitions() == &font_definitions {
return; // no change - save us from reloading font textures
}
}
self.memory().new_font_definitions = Some(font_definitions);
}
/// The [`Style`] used by all subsequent windows, panels etc.
pub fn style(&self) -> Arc<Style> {
self.memory().options.style.clone()
}
/// The [`Style`] used by all new windows, panels etc.
///
/// You can also use [`Ui::style_mut`] to change the style of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// let mut style: egui::Style = (*ctx.style()).clone();
/// style.spacing.item_spacing = egui::vec2(10.0, 20.0);
/// ctx.set_style(style);
/// ```
pub fn set_style(&self, style: impl Into<Arc<Style>>) {
self.memory().options.style = style.into();
}
/// The [`Visuals`] used by all subsequent windows, panels etc.
///
/// You can also use [`Ui::visuals_mut`] to change the visuals of a single [`Ui`].
///
/// Example:
/// ```
/// # let mut ctx = egui::CtxRef::default();
/// ctx.set_visuals(egui::Visuals::light()); // Switch to light mode
/// ```
pub fn set_visuals(&self, visuals: crate::Visuals) {
std::sync::Arc::make_mut(&mut self.memory().options.style).visuals = visuals;
}
/// The number of physical pixels for each logical point.
#[inline(always)]
pub fn pixels_per_point(&self) -> f32 {
self.input.pixels_per_point()
}
/// Set the number of physical pixels for each logical point.
/// Will become active at the start of the next frame.
///
/// Note that this may be overwritten by input from the integration via [`RawInput::pixels_per_point`].
/// For instance, when using `egui_web` the browsers native zoom level will always be used.
pub fn set_pixels_per_point(&self, pixels_per_point: f32) {
self.memory().new_pixels_per_point = Some(pixels_per_point);
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_to_pixel(&self, point: f32) -> f32 {
let pixels_per_point = self.pixels_per_point();
(point * pixels_per_point).round() / pixels_per_point
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_pos_to_pixels(&self, pos: Pos2) -> Pos2 {
pos2(self.round_to_pixel(pos.x), self.round_to_pixel(pos.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_vec_to_pixels(&self, vec: Vec2) -> Vec2 {
vec2(self.round_to_pixel(vec.x), self.round_to_pixel(vec.y))
}
/// Useful for pixel-perfect rendering
pub(crate) fn round_rect_to_pixels(&self, rect: Rect) -> Rect {
Rect {
min: self.round_pos_to_pixels(rect.min),
max: self.round_pos_to_pixels(rect.max),
}
}
// ---------------------------------------------------------------------
/// Constrain the position of a window/area so it fits within the provided boundary.
///
/// If area is `None`, will constrain to [`Self::available_rect`].
pub(crate) fn constrain_window_rect_to_area(&self, window: Rect, area: Option<Rect>) -> Rect {
let mut area = area.unwrap_or_else(|| self.available_rect());
if window.width() > area.width() {
// Allow overlapping side bars.
// This is important for small screens, e.g. mobiles running the web demo.
area.max.x = self.input().screen_rect().max.x;
area.min.x = self.input().screen_rect().min.x;
}
if window.height() > area.height() {
// Allow overlapping top/bottom bars:
area.max.y = self.input().screen_rect().max.y;
area.min.y = self.input().screen_rect().min.y;
}
let mut pos = window.min;
// Constrain to screen, unless window is too large to fit:
let margin_x = (window.width() - area.width()).at_least(0.0);
let margin_y = (window.height() - area.height()).at_least(0.0);
pos.x = pos.x.at_most(area.right() + margin_x - window.width()); // move left if needed
pos.x = pos.x.at_least(area.left() - margin_x); // move right if needed
pos.y = pos.y.at_most(area.bottom() + margin_y - window.height()); // move right if needed
pos.y = pos.y.at_least(area.top() - margin_y); // move down if needed
pos = self.round_pos_to_pixels(pos);
Rect::from_min_size(pos, window.size())
}
// ---------------------------------------------------------------------
fn begin_frame_mut(&mut self, new_raw_input: RawInput) {
self.memory().begin_frame(&self.input, &new_raw_input);
let mut input = std::mem::take(&mut self.input);
if let Some(new_pixels_per_point) = self.memory().new_pixels_per_point.take() {
input.pixels_per_point = new_pixels_per_point;
}
| {
self.0.as_ref()
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.