file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | // Copyright 2020 Xavier Gillard
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//! This example show how to implement a solver for the maximum independent set problem
//! using ddo. It is a fairly simple example but it features most of the aspects you will
//! want to copy when implementing your own solver.
use std::{cell::RefCell, path::Path, fs::File, io::{BufReader, BufRead}, num::ParseIntError, time::{Duration, Instant}};
use bit_set::BitSet;
use clap::Parser;
use ddo::*;
use regex::Regex;
#[cfg(test)]
mod tests;
/// This structure represents an instance of the Maximum Independent Set Problem.
/// It is this structure that implements a simple dynamic programming model for the
/// MISP. In that model, the state is simply a bitset where each bit represents
/// a node that may be kept or left out of the MIS.
pub struct Misp {
/// The number of variables in the problem instance
nb_vars: usize,
/// For each vertex 'i' of the original graph, the field 'neighbors[i]' contains
/// a bitmask representing the COMPLEMENT of the adjacency list of i in the
/// original graph. While this may seem a complicated take on the representation
/// of this problem instance, using the complement is helpful as it allows to
/// easily remove all the neighbors of a vertex from a state very efficiently.
neighbors: Vec<BitSet>,
/// For each vertex 'i', the value of 'weight[i]' denotes the weight associated
/// to vertex i in the problem instance. The goal of MISP is to select the nodes
/// from the underlying graph such that the resulting set is an independent set
/// where the sum of the weights of selected vertices is maximum.
weight: Vec<isize>,
}
/// A constant to mean take the node in the independent set.
const YES: isize = 1;
/// A constant to mean leave the node out of the independent set.
const NO: isize = 0;
/// The Misp class implements the 'Problem' trait. This means Misp is the definition
/// of the DP model. That DP model is pretty straightforward, still you might want
/// to check the implementation of the branching heuristic (next_variable method)
/// since it does interesting stuffs.
impl Problem for Misp {
type State = BitSet;
fn nb_variables(&self) -> usize {
self.nb_vars
}
fn initial_state(&self) -> Self::State {
(0..self.nb_variables()).collect()
}
fn initial_value(&self) -> isize {
0
}
fn transition(&self, state: &Self::State, decision: Decision) -> Self::State {
let mut res = state.clone();
res.remove(decision.variable.id());
if decision.value == YES {
// intersect with complement of the neighbors for fast set difference
res.intersect_with(&self.neighbors[decision.variable.id()]);
}
res
}
fn transition_cost(&self, _: &Self::State, decision: Decision) -> isize {
if decision.value == NO {
0
} else {
self.weight[decision.variable.id()]
}
}
fn | (&self, variable: Variable, state: &Self::State, f: &mut dyn DecisionCallback) {
if state.contains(variable.id()) {
f.apply(Decision{variable, value: YES});
f.apply(Decision{variable, value: NO });
} else {
f.apply(Decision{variable, value: NO });
}
}
/// This method is (apparently) a bit more hairy. What it does is it simply decides to branch on
/// the variable that occurs in the least number of states present in the next layer. The intuition
/// here is to limit the max width as much as possible when developing the layers since all
/// nodes that are not impacted by the change on the selectd vertex are simply copied over to the
/// next layer.
fn next_variable(&self, _: usize, next_layer: &mut dyn Iterator<Item = &Self::State>) -> Option<Variable> {
// The thread local stuff is possibly one of the most surprising bits of this code. It declares
// a static variable called VAR_HEURISTIC storing the counts of each vertex in the next layer.
// The fact that it is static means that it will not be re-created (re allocated) upon each
// pass. The fact that it is declared within a thread local block, means that this static var
// will be created with a potentially mutable access for each thread.
thread_local! {
static VAR_HEURISTIC: RefCell<Vec<usize>> = RefCell::new(vec![]);
}
VAR_HEURISTIC.with(|heu| {
let mut heu = heu.borrow_mut();
let heu: &mut Vec<usize> = heu.as_mut();
// initialize
heu.reserve_exact(self.nb_variables());
if heu.is_empty() {
for _ in 0..self.nb_variables() { heu.push(0); }
} else {
heu.iter_mut().for_each(|i| *i = 0);
}
// count the occurrence of each var
for s in next_layer {
for sit in s.iter() {
heu[sit] += 1;
}
}
// take the one occurring the least often
heu.iter().copied().enumerate()
.filter(|(_, v)| *v > 0)
.min_by_key(|(_, v)| *v)
.map(|(x, _)| Variable(x))
})
}
fn is_impacted_by(&self, var: Variable, state: &Self::State) -> bool {
state.contains(var.id())
}
}
/// In addition to a dynamic programming (DP) model of the problem you want to solve,
/// the branch and bound with MDD algorithm (and thus ddo) requires that you provide
/// an additional relaxation allowing to control the maximum amount of space used by
/// the decision diagrams that are compiled.
///
/// That relaxation requires two operations: one to merge several nodes into one
/// merged node that acts as an over approximation of the other nodes. The second
/// operation is used to possibly offset some weight that would otherwise be lost
/// to the arcs entering the newly created merged node.
///
/// The role of this very simple structure is simply to provide an implementation
/// of that relaxation.
///
/// # Note:
/// In addition to the aforementioned two operations, the MispRelax structure implements
/// an optional `fast_upper_bound` method. Which one provides a useful bound to
/// prune some portions of the state-space as the decision diagrams are compiled.
/// (aka rough upper bound pruning).
pub struct MispRelax<'a>{pb: &'a Misp}
impl Relaxation for MispRelax<'_> {
type State = BitSet;
fn merge(&self, states: &mut dyn Iterator<Item = &Self::State>) -> Self::State {
let mut state = BitSet::with_capacity(self.pb.nb_variables());
for s in states {
state.union_with(s);
}
state
}
fn relax(
&self,
_source: &Self::State,
_dest: &Self::State,
_new: &Self::State,
_decision: Decision,
cost: isize,
) -> isize {
cost
}
fn fast_upper_bound(&self, state: &Self::State) -> isize {
state.iter().map(|x| self.pb.weight[x]).sum()
}
}
/// The last bit of information which we need to provide when implementing a ddo-based
/// solver is a `StateRanking`. This is an heuristic which is used to select the most
/// and least promising nodes as a means to only delete/merge the *least* promising nodes
/// when compiling restricted and relaxed DDs.
pub struct MispRanking;
impl StateRanking for MispRanking {
type State = BitSet;
fn compare(&self, a: &Self::State, b: &Self::State) -> std::cmp::Ordering {
a.len().cmp(&b.len())
.then_with(|| a.cmp(b))
}
}
// #########################################################################################
// # THE INFORMATION BEYOND THIS LINE IS NOT DIRECTLY RELATED TO THE IMPLEMENTATION OF #
// # A SOLVER BASED ON DDO. INSTEAD, THAT PORTION OF THE CODE CONTAINS GENERIC FUNCTION #
// # THAT ARE USED TO READ AN INSTANCE FROM FILE, PROCESS COMMAND LINE ARGUMENTS, AND #
// # THE MAIN FUNCTION. THESE ARE THUS NOT REQUIRED 'PER-SE', BUT I BELIEVE IT IS USEFUL #
// # TO SHOW HOW IT CAN BE DONE IN AN EXAMPLE. #
// #########################################################################################
/// This structure uses `clap-derive` annotations and define the arguments that can
/// be passed on to the executable solver.
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// The path to the instance file
fname: String,
/// The number of concurrent threads
#[clap(short, long, default_value = "8")]
threads: usize,
/// The maximum amount of time you would like this solver to run
#[clap(short, long)]
duration: Option<u64>,
/// The maximum number of nodes per layer
#[clap(short, long)]
width: Option<usize>,
}
/// This enumeration simply groups the kind of errors that might occur when parsing a
/// misp instance from file. There can be io errors (file unavailable ?), format error
/// (e.g. the file is not an instance but contains the text of your next paper),
/// or parse int errors (which are actually a variant of the format error since it tells
/// you that the parser expected an integer number but got ... something else).
#[derive(Debug, thiserror::Error)]
enum Error {
/// There was an io related error
#[error("io error {0}")]
Io(#[from] std::io::Error),
/// The parser expected to read something that was an integer but got some garbage
#[error("parse int {0}")]
ParseInt(#[from] ParseIntError),
/// The file was not properly formatted.
#[error("ill formed instance")]
Format,
}
/// This function is used to read a misp instance from file. It returns either a
/// misp instance if everything went on well or an error describing the problem.
fn read_instance<P: AsRef<Path>>(fname: P) -> Result<Misp, Error> {
let f = File::open(fname)?;
let f = BufReader::new(f);
let comment = Regex::new(r"^c\s.*$").unwrap();
let pb_decl = Regex::new(r"^p\s+edge\s+(?P<vars>\d+)\s+(?P<edges>\d+)$").unwrap();
let node_decl = Regex::new(r"^n\s+(?P<node>\d+)\s+(?P<weight>-?\d+)").unwrap();
let edge_decl = Regex::new(r"^e\s+(?P<src>\d+)\s+(?P<dst>\d+)").unwrap();
let mut g = Misp{nb_vars: 0, neighbors: vec![], weight: vec![]};
for line in f.lines() {
let line = line?;
let line = line.trim();
if line.is_empty() {
continue;
}
if comment.is_match(line) {
continue;
}
if let Some(caps) = pb_decl.captures(line) {
let n = caps["vars"].to_string().parse::<usize>()?;
let full = (0..n).collect();
g.nb_vars = n;
g.neighbors = vec![full; n];
g.weight = vec![1; n];
continue;
}
if let Some(caps) = node_decl.captures(line) {
let n = caps["node"].to_string().parse::<usize>()?;
let w = caps["weight"].to_string().parse::<isize>()?;
let n = n - 1;
g.weight[n] = w;
continue;
}
if let Some(caps) = edge_decl.captures(line) {
let src = caps["src"].to_string().parse::<usize>()?;
let dst = caps["dst"].to_string().parse::<usize>()?;
let src = src-1;
let dst = dst-1;
g.neighbors[src].remove(dst);
g.neighbors[dst].remove(src);
continue;
}
// skip
return Err(Error::Format)
}
Ok(g)
}
/// An utility function to return an max width heuristic that can either be a fixed width
/// policy (if w is fixed) or an adaptive policy returning the number of unassigned variables
/// in the overall problem.
fn max_width<P: Problem>(p: &P, w: Option<usize>) -> Box<dyn WidthHeuristic<P::State> + Send + Sync> {
if let Some(w) = w {
Box::new(FixedWidth(w))
} else {
Box::new(NbUnassignedWidth(p.nb_variables()))
}
}
/// An utility function to return a cutoff heuristic that can either be a time budget policy
/// (if timeout is fixed) or no cutoff policy.
fn cutoff(timeout: Option<u64>) -> Box<dyn Cutoff + Send + Sync> {
if let Some(t) = timeout {
Box::new(TimeBudget::new(Duration::from_secs(t)))
} else {
Box::new(NoCutoff)
}
}
/// This is your executable's entry point. It is the place where all the pieces are put together
/// to create a fast an effective solver for the misp problem.
fn main() {
let args = Args::parse();
let fname = &args.fname;
let problem = read_instance(fname).unwrap();
let relaxation = MispRelax {pb: &problem};
let ranking = MispRanking;
let width = max_width(&problem, args.width);
let dominance = EmptyDominanceChecker::default();
let cutoff = cutoff(args.duration);
let mut fringe = NoDupFringe::new(MaxUB::new(&ranking));
// This solver compile DD that allow the definition of long arcs spanning over several layers.
let mut solver = ParNoBarrierSolverLel::custom(
&problem,
&relaxation,
&ranking,
width.as_ref(),
&dominance,
cutoff.as_ref(),
&mut fringe,
args.threads,
);
let start = Instant::now();
let Completion{ is_exact, best_value } = solver.maximize();
let duration = start.elapsed();
let upper_bound = solver.best_upper_bound();
let lower_bound = solver.best_lower_bound();
let gap = solver.gap();
let best_solution: Option<Vec<_>> = solver.best_solution().map(|mut decisions|{
decisions.sort_unstable_by_key(|d| d.variable.id());
decisions.iter()
.filter(|d| d.value == 1)
.map(|d| d.variable.id())
.collect()
});
// check solution
if let Some(bs) = best_solution.as_ref() {
for (i, a) in bs.iter().copied().enumerate() {
for b in bs.iter().copied().skip(i+1) {
if !problem.neighbors[a].contains(b) {
println!("not a solution ! {a} -- {b}");
}
}
}
}
println!("Duration: {:.3} seconds", duration.as_secs_f32());
println!("Objective: {}", best_value.unwrap_or(-1));
println!("Upper Bnd: {}", upper_bound);
println!("Lower Bnd: {}", lower_bound);
println!("Gap: {:.3}", gap);
println!("Aborted: {}", !is_exact);
println!("Solution: {:?}", best_solution.unwrap_or_default());
}
| for_each_in_domain | identifier_name |
main.rs | use clap::Clap;
use itertools::Itertools;
use num::integer::gcd;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
static MAKE_REPORT: AtomicBool = AtomicBool::new(false);
macro_rules! write_report(
($($args:tt)*) => {
if MAKE_REPORT.load(Ordering::SeqCst) {
print!($($args)*);
}
}
);
macro_rules! writeln_report(
($($args:tt)*) => {
if MAKE_REPORT.load(Ordering::SeqCst) {
println!($($args)*);
}
}
);
fn has_square_factor(d: i64) -> Result<(), i64> {
let mut d = d.abs();
assert_ne!(d, 0);
let mut i = 2;
while d != 1 {
if d % i == 0 {
d /= i;
if d % i == 0 {
return Err(i);
}
}
i += 1;
}
Ok(())
}
fn discriminant(d: i64) -> i64 {
let mod4 = (d % 4 + 4) % 4;
writeln_report!(
r"${} \equiv {} \mod 4$であるから,判別式を$D$とすると",
d,
mod4
);
// 負の時の対策
let disc = if mod4 == 1 { d } else { 4 * d };
writeln_report!(r"$D = {}$となる.", disc);
disc
}
fn is_int(x: f64) -> bool {
(x - x.round()).abs() < 1e-8
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_negative(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
// b の範囲を求める (exclusive)
let maxb = {
let sqrt = (disc.abs() as f64 / 3.0).sqrt();
let is_int = is_int(sqrt);
// 1.3 -> 2, 2.9 -> 3, 4.0 -> 5 としたい。
//
// sqrt.floor() + 1.0 でもよいが、 sqrt の精度で整数がわずかに小さい値に
// なって floor で 1 ずれる可能性を心配している。
let maxb = if is_int {
sqrt.round() + 1.0
} else {
sqrt.ceil()
} as i64;
writeln_report!(
r"\[ |b| \leqq \sqrt{{ \frac{{ |{disc}| }}{{ 3 }} }} = \sqrt{{ \frac{{ |{discabs}| }}{{ 3 }} }} {op} {maxb}. \]",
disc = disc,
discabs = disc.abs(),
op = if is_int { r"=" } else { "<" },
maxb = if is_int { maxb - 1 } else { maxb },
);
maxb
};
writeln_report!(
r"$4ac = b^2 + {}$より$b$は{}であるから,",
disc.abs(),
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = (0..maxb)
.filter(|x| x % 2 == disc.abs() % 2)
.flat_map(|x| vec![x, -x])
.dedup()
.collect_vec();
{
let nonzero = bs.iter().filter(|&&x| x > 0);
let has_zero = bs[0] == 0;
if bs.is_empty() {
writeln_report!(r"条件を満たす$b$はない.",);
return Err("no cands; is d = 1?".to_string());
}
if bs.len() == 1 {
writeln_report!(r"条件を満たす$b$は$b = 0$.",);
} else {
writeln_report!(
r"条件を満たす$b$は$b = {}$\pm {}$.", | }
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b != 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
a,
c
);
return true;
}
let left_failure = if !(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if !(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b);
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if !left { r"\not" } else { "" };
let leftend = if left {
"を満たす."
} else {
"となるので不適."
};
let right = a - b + c > 0;
let rightopnot = if !right { r"\not" } else { "" };
let rightstart = if left && right {
"また"
} else {
"このとき"
};
let rightend = if right {
"を満たす."
} else {
"となるので不適."
};
if !left || (left && right) {
writeln_report!(
r"このとき$a + b + c = {} {:+} {:+} = {} {}< 0${}",
a,
b,
c,
a + b + c,
leftopnot,
leftend
);
}
if !right || (left && right) {
writeln_report!(
r"{}$a - b + c = {} {:+} {:+} = {} {}> 0${}",
rightstart,
a,
-b,
c,
a - b + c,
rightopnot,
rightend
);
}
left && right
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
// 条件 (C) を確認する
let res = remove_same_repeat(disc, &res);
writeln_report!();
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
fn remove_same_repeat(disc: i64, cands: &[(i64, i64, i64)]) -> Vec<(i64, i64, i64)> {
writeln_report!("");
writeln_report!("ここまでで得られた$(a, b, c)$の組は,");
writeln_report!(r"${:?}$.", cands.iter().format("$, $"));
writeln_report!(r"これを連分数展開し,循環節が同じものを除く.");
writeln_report!(r"連分数展開の途中に現れた分数を全て除けば良い.");
let cand_fracs = cands
.iter()
.map(|&(a, b, _)| Frac::from_abd(a, b, disc))
.collect_vec();
let map: HashMap<_, _> = cand_fracs
.iter()
.copied()
.zip(cands.iter().copied())
.collect();
let mut notfound: HashSet<_> = map.keys().collect();
let mut res = Vec::new();
for mut frac in cand_fracs {
if !notfound.contains(&frac) {
continue;
}
writeln_report!();
writeln_report!("${:?}$に対応する${}$を連分数展開する.", map[&frac], frac);
res.push(map[&frac]);
notfound.remove(&frac);
let mut obtained = HashSet::new();
while obtained.insert(frac) && !notfound.is_empty() {
write_report!(r"\[ {} = ", frac);
let int = frac.integer_part();
frac = frac.sub_int(int);
write_report!(r"{} + \left({}\right) = ", int, frac);
frac = frac.invert();
writeln_report!(r"{} + \frac{{ 1 }}{{ {} }}. \]", int, frac);
if notfound.contains(&frac) {
writeln_report!(
"${}$は${:?}$に対応するので,${:?}$は除く.",
frac,
map[&frac],
map[&frac]
);
notfound.remove(&frac);
}
}
if !notfound.is_empty() && obtained.contains(&frac) {
writeln_report!(
"ここで${}$は一度現れたので,この連分数はここから循環する.",
frac
);
}
}
res
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
struct Frac {
num: i64,
coeff: i64,
root: i64,
denom: i64,
}
impl fmt::Display for Frac {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
let coeff = if self.coeff == 1 {
"+".to_string()
} else if self.coeff == -1 {
"-".to_string()
} else {
format!("{:+}", self.coeff)
};
let num = format!(r"{} {}\sqrt{{ {} }}", self.num, coeff, self.root);
let frac = if self.denom == 1 {
num
} else {
format!(r"\frac{{ {} }}{{ {} }}", num, self.denom)
};
write!(b, "{}", frac)
}
}
impl Frac {
pub fn from_abd(a: i64, b: i64, disc: i64) -> Frac {
Frac::new(-b, 1, disc, 2 * a)
}
pub fn new(num: i64, coeff: i64, root: i64, denom: i64) -> Frac {
assert!(root > 0);
let mut f = Frac {
num,
coeff,
root,
denom,
};
f.normalize();
f
}
pub fn normalize(&mut self) {
self.normalize_root();
self.reduce();
if self.denom < 0 {
self.denom *= -1;
self.num *= -1;
self.coeff *= -1;
}
}
pub fn invert(self) -> Frac {
let denom = self.num * self.num - self.coeff * self.coeff * self.root;
let num = self.denom * self.num;
let coeff = -self.denom * self.coeff;
let root = self.root;
let mut res = Frac {
denom,
num,
coeff,
root,
};
res.normalize();
res
}
pub fn integer_part(self) -> i64 {
let num = self.num as f64 + self.coeff as f64 * (self.root as f64).sqrt();
let denom = self.denom as f64;
let float = num / denom;
if is_int(float) {
float.round() as i64
} else {
float.floor() as i64
}
}
pub fn sub_int(mut self, int: i64) -> Frac {
self.num -= int * self.denom;
self.normalize();
self
}
fn normalize_root(&mut self) {
while let Err(d) = has_square_factor(self.root) {
self.root /= d * d;
self.coeff *= d;
}
}
fn reduce(&mut self) {
let g = gcd(gcd(self.num, self.coeff), self.denom);
self.num /= g;
self.coeff /= g;
self.denom /= g;
}
}
#[allow(clippy::collapsible_if)]
fn do_main(d: i64) -> Result<(), String> {
// if d.abs() > 999 {
// return Err(format!("input too large: {}", d));
// }
if d == 0 {
writeln_report!("$d = 0$ のときは考えない.");
return Err("d is zero".to_string());
}
if let Err(f) = has_square_factor(d) {
writeln_report!("$d = {}$は平方因子${}$を持つため,考えない.", d, f);
return Err(format!("{} has square factor: {}", d, f));
}
writeln_report!(r"このとき$d = {}$である.", d);
let disc = discriminant(d);
writeln_report!();
let res = if d < 0 {
calc_negative(disc)?
} else {
calc_positive(disc)?
};
if !MAKE_REPORT.load(Ordering::SeqCst) {
println!("d = {}: {} ({:?})", d, res.len(), res);
}
if MAKE_REPORT.load(Ordering::SeqCst) {
writeln_report!("したがって,$h_K = {}$.", res.len());
writeln_report!();
writeln_report!("イデアル類群の代表元は,");
let mut first = true;
for (a, b, _) in res {
if !first {
write_report!(", ");
}
first = false;
if b % 2 == 0 && disc % 4 == 0 {
if b == 0 {
write_report!(r"$\left({}, \sqrt{{ {} }}\right)$", a, disc / 4);
} else {
write_report!(
r"$\left({}, {} + \sqrt{{ {} }}\right)$",
a,
-b / 2,
disc / 4
);
}
} else {
if b == 0 {
write_report!(
r"$\left({}, \frac{{ \sqrt{{ {} }} }}{{ 2 }}\right)$",
a,
disc
);
} else {
write_report!(
r"$\left({}, \frac{{ {} + \sqrt{{ {} }} }}{{ 2 }}\right)$",
a,
-b,
disc
);
}
}
}
writeln_report!(r".");
}
Ok(())
}
#[derive(Clap)]
struct Opt {
#[clap(short = "r", long, about = "Enables reporting")]
make_report: bool,
start: i64,
end: Option<i64>,
}
fn main() {
let opt = Opt::parse();
if opt.make_report {
MAKE_REPORT.store(true, Ordering::SeqCst);
}
let start = opt.start;
let end = opt.end.unwrap_or(opt.start);
for d in start..=end {
writeln_report!();
writeln_report!(r"\section{{ $K = \mathbb{{Q}}(\sqrt{{ {} }})$ }}", d);
writeln_report!();
let _ = do_main(d);
}
} | if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
); | random_line_split |
main.rs | use clap::Clap;
use itertools::Itertools;
use num::integer::gcd;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
static MAKE_REPORT: AtomicBool = AtomicBool::new(false);
macro_rules! write_report(
($($args:tt)*) => {
if MAKE_REPORT.load(Ordering::SeqCst) {
print!($($args)*);
}
}
);
macro_rules! writeln_report(
($($args:tt)*) => {
if MAKE_REPORT.load(Ordering::SeqCst) {
println!($($args)*);
}
}
);
fn has_square_factor(d: i64) -> Result<(), i64> {
let mut d = d.abs();
assert_ne!(d, 0);
let mut i = 2;
while d != 1 {
if d % i == 0 {
d /= i;
if d % i == 0 {
return Err(i);
}
}
i += 1;
}
Ok(())
}
fn discriminant(d: i64) -> i64 {
let mod4 = (d % 4 + 4) % 4;
writeln_report!(
r"${} \equiv {} \mod 4$であるから,判別式を$D$とすると",
d,
mod4
);
// 負の時の対策
let disc = if mod4 == 1 { d } else { 4 * d };
writeln_report!(r"$D = {}$となる.", disc);
disc
}
fn is_int(x: f64) -> bool {
(x - x.round()).abs() < 1e-8
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_negative(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
// b の範囲を求める (exclusive)
let maxb = {
let sqrt = (disc.abs() as f64 / 3.0).sqrt();
let is_int = is_int(sqrt);
// 1.3 -> 2, 2.9 -> 3, 4.0 -> 5 としたい。
//
// sqrt.floor() + 1.0 でもよいが、 sqrt の精度で整数がわずかに小さい値に
// なって floor で 1 ずれる可能性を心配している。
let maxb = if is_int {
sqrt.round() + 1.0
} else {
sqrt.ceil()
} as i64;
writeln_report!(
r"\[ |b| \leqq \sqrt{{ \frac{{ |{disc}| }}{{ 3 }} }} = \sqrt{{ \frac{{ |{discabs}| }}{{ 3 }} }} {op} {maxb}. \]",
disc = disc,
discabs = disc.abs(),
op = if is_int { r"=" } else { "<" },
maxb = if is_int { maxb - 1 } else { maxb },
);
maxb
};
writeln_report!(
r"$4ac = b^2 + {}$より$b$は{}であるから,",
disc.abs(),
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = (0..maxb)
.filter(|x| x % 2 == disc.abs() % 2)
.flat_map(|x| vec![x, -x])
.dedup()
.collect_vec();
{
let nonzero = bs.iter().filter(|&&x| x > 0);
let has_zero = bs[0] == 0;
if bs.is_empty() {
writeln_report!(r"条件を満たす$b$はない.",);
return Err("no cands; is d = 1?".to_string());
}
if bs.len() == 1 {
writeln_report!(r"条件を満たす$b$は$b = 0$.",);
} else {
writeln_report!(
r"条件を満たす$b$は$b = {}$\pm {}$.",
if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
);
}
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b != 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
| a,
c
);
return true;
}
let left_failure = if !(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if !(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b);
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if !left { r"\not" } else { "" };
let leftend = if left {
"を満たす."
} else {
"となるので不適."
};
let right = a - b + c > 0;
let rightopnot = if !right { r"\not" } else { "" };
let rightstart = if left && right {
"また"
} else {
"このとき"
};
let rightend = if right {
"を満たす."
} else {
"となるので不適."
};
if !left || (left && right) {
writeln_report!(
r"このとき$a + b + c = {} {:+} {:+} = {} {}< 0${}",
a,
b,
c,
a + b + c,
leftopnot,
leftend
);
}
if !right || (left && right) {
writeln_report!(
r"{}$a - b + c = {} {:+} {:+} = {} {}> 0${}",
rightstart,
a,
-b,
c,
a - b + c,
rightopnot,
rightend
);
}
left && right
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
// 条件 (C) を確認する
let res = remove_same_repeat(disc, &res);
writeln_report!();
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
fn remove_same_repeat(disc: i64, cands: &[(i64, i64, i64)]) -> Vec<(i64, i64, i64)> {
writeln_report!("");
writeln_report!("ここまでで得られた$(a, b, c)$の組は,");
writeln_report!(r"${:?}$.", cands.iter().format("$, $"));
writeln_report!(r"これを連分数展開し,循環節が同じものを除く.");
writeln_report!(r"連分数展開の途中に現れた分数を全て除けば良い.");
let cand_fracs = cands
.iter()
.map(|&(a, b, _)| Frac::from_abd(a, b, disc))
.collect_vec();
let map: HashMap<_, _> = cand_fracs
.iter()
.copied()
.zip(cands.iter().copied())
.collect();
let mut notfound: HashSet<_> = map.keys().collect();
let mut res = Vec::new();
for mut frac in cand_fracs {
if !notfound.contains(&frac) {
continue;
}
writeln_report!();
writeln_report!("${:?}$に対応する${}$を連分数展開する.", map[&frac], frac);
res.push(map[&frac]);
notfound.remove(&frac);
let mut obtained = HashSet::new();
while obtained.insert(frac) && !notfound.is_empty() {
write_report!(r"\[ {} = ", frac);
let int = frac.integer_part();
frac = frac.sub_int(int);
write_report!(r"{} + \left({}\right) = ", int, frac);
frac = frac.invert();
writeln_report!(r"{} + \frac{{ 1 }}{{ {} }}. \]", int, frac);
if notfound.contains(&frac) {
writeln_report!(
"${}$は${:?}$に対応するので,${:?}$は除く.",
frac,
map[&frac],
map[&frac]
);
notfound.remove(&frac);
}
}
if !notfound.is_empty() && obtained.contains(&frac) {
writeln_report!(
"ここで${}$は一度現れたので,この連分数はここから循環する.",
frac
);
}
}
res
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
struct Frac {
num: i64,
coeff: i64,
root: i64,
denom: i64,
}
impl fmt::Display for Frac {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
let coeff = if self.coeff == 1 {
"+".to_string()
} else if self.coeff == -1 {
"-".to_string()
} else {
format!("{:+}", self.coeff)
};
let num = format!(r"{} {}\sqrt{{ {} }}", self.num, coeff, self.root);
let frac = if self.denom == 1 {
num
} else {
format!(r"\frac{{ {} }}{{ {} }}", num, self.denom)
};
write!(b, "{}", frac)
}
}
impl Frac {
pub fn from_abd(a: i64, b: i64, disc: i64) -> Frac {
Frac::new(-b, 1, disc, 2 * a)
}
pub fn new(num: i64, coeff: i64, root: i64, denom: i64) -> Frac {
assert!(root > 0);
let mut f = Frac {
num,
coeff,
root,
denom,
};
f.normalize();
f
}
pub fn normalize(&mut self) {
self.normalize_root();
self.reduce();
if self.denom < 0 {
self.denom *= -1;
self.num *= -1;
self.coeff *= -1;
}
}
pub fn invert(self) -> Frac {
let denom = self.num * self.num - self.coeff * self.coeff * self.root;
let num = self.denom * self.num;
let coeff = -self.denom * self.coeff;
let root = self.root;
let mut res = Frac {
denom,
num,
coeff,
root,
};
res.normalize();
res
}
pub fn integer_part(self) -> i64 {
let num = self.num as f64 + self.coeff as f64 * (self.root as f64).sqrt();
let denom = self.denom as f64;
let float = num / denom;
if is_int(float) {
float.round() as i64
} else {
float.floor() as i64
}
}
pub fn sub_int(mut self, int: i64) -> Frac {
self.num -= int * self.denom;
self.normalize();
self
}
fn normalize_root(&mut self) {
while let Err(d) = has_square_factor(self.root) {
self.root /= d * d;
self.coeff *= d;
}
}
fn reduce(&mut self) {
let g = gcd(gcd(self.num, self.coeff), self.denom);
self.num /= g;
self.coeff /= g;
self.denom /= g;
}
}
#[allow(clippy::collapsible_if)]
fn do_main(d: i64) -> Result<(), String> {
// if d.abs() > 999 {
// return Err(format!("input too large: {}", d));
// }
if d == 0 {
writeln_report!("$d = 0$ のときは考えない.");
return Err("d is zero".to_string());
}
if let Err(f) = has_square_factor(d) {
writeln_report!("$d = {}$は平方因子${}$を持つため,考えない.", d, f);
return Err(format!("{} has square factor: {}", d, f));
}
writeln_report!(r"このとき$d = {}$である.", d);
let disc = discriminant(d);
writeln_report!();
let res = if d < 0 {
calc_negative(disc)?
} else {
calc_positive(disc)?
};
if !MAKE_REPORT.load(Ordering::SeqCst) {
println!("d = {}: {} ({:?})", d, res.len(), res);
}
if MAKE_REPORT.load(Ordering::SeqCst) {
writeln_report!("したがって,$h_K = {}$.", res.len());
writeln_report!();
writeln_report!("イデアル類群の代表元は,");
let mut first = true;
for (a, b, _) in res {
if !first {
write_report!(", ");
}
first = false;
if b % 2 == 0 && disc % 4 == 0 {
if b == 0 {
write_report!(r"$\left({}, \sqrt{{ {} }}\right)$", a, disc / 4);
} else {
write_report!(
r"$\left({}, {} + \sqrt{{ {} }}\right)$",
a,
-b / 2,
disc / 4
);
}
} else {
if b == 0 {
write_report!(
r"$\left({}, \frac{{ \sqrt{{ {} }} }}{{ 2 }}\right)$",
a,
disc
);
} else {
write_report!(
r"$\left({}, \frac{{ {} + \sqrt{{ {} }} }}{{ 2 }}\right)$",
a,
-b,
disc
);
}
}
}
writeln_report!(r".");
}
Ok(())
}
#[derive(Clap)]
struct Opt {
#[clap(short = "r", long, about = "Enables reporting")]
make_report: bool,
start: i64,
end: Option<i64>,
}
fn main() {
let opt = Opt::parse();
if opt.make_report {
MAKE_REPORT.store(true, Ordering::SeqCst);
}
let start = opt.start;
let end = opt.end.unwrap_or(opt.start);
for d in start..=end {
writeln_report!();
writeln_report!(r"\section{{ $K = \mathbb{{Q}}(\sqrt{{ {} }})$ }}", d);
writeln_report!();
let _ = do_main(d);
}
}
| writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
| conditional_block |
main.rs | use clap::Clap;
use itertools::Itertools;
use num::integer::gcd;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
static MAKE_REPORT: AtomicBool = AtomicBool::new(false);
macro_rules! write_report(
($($args:tt)*) => {
if MAKE_REPORT.load(Ordering::SeqCst) {
print!($($args)*);
}
}
);
macro_rules! writeln_report(
($($args:tt)*) => {
if MAKE_REPORT.load(Ordering::SeqCst) {
println!($($args)*);
}
}
);
fn has_square_factor(d: i64) -> Result<(), i64> {
let mut d = d.abs();
assert_ne!(d, 0);
let mut i = 2;
while d != 1 {
if d % i == 0 {
d /= i;
if d % i == 0 {
return Err(i);
}
}
i += 1;
}
Ok(())
}
fn discriminant(d: i64) -> i64 {
let mod4 = (d % 4 + 4) % 4;
writeln_report!(
r"${} \equiv {} \mod 4$であるから,判別式を$D$とすると",
d,
mod4
);
// 負の時の対策
let disc = if mod4 == 1 { d } else { 4 * d };
writeln_report!(r"$D = {}$となる.", disc);
disc
}
fn is_int(x: f64) -> bool {
(x - x.round()).abs() < 1e-8
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_negative(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
// b の範囲を求める (exclusive)
let maxb = {
let sqrt = (disc.abs() as f64 / 3.0).sqrt();
let is_int = is_int(sqrt);
// 1.3 -> 2, 2.9 -> 3, 4.0 -> 5 としたい。
//
// sqrt.floor() + 1.0 でもよいが、 sqrt の精度で整数がわずかに小さい値に
// なって floor で 1 ずれる可能性を心配している。
let maxb = if is_int {
sqrt.round() + 1.0
} else {
sqrt.ceil()
} as i64;
writeln_report!(
r"\[ |b| \leqq \sqrt{{ \frac{{ |{disc}| }}{{ 3 }} }} = \sqrt{{ \frac{{ |{discabs}| }}{{ 3 }} }} {op} {maxb}. \]",
disc = disc,
discabs = disc.abs(),
op = if is_int { r"=" } else { "<" },
maxb = if is_int { maxb - 1 } else { maxb },
);
maxb
};
writeln_report!(
r"$4ac = b^2 + {}$より$b$は{}であるから,",
disc.abs(),
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = (0..maxb)
.filter(|x| x % 2 == disc.abs() % 2)
.flat_map(|x| vec![x, -x])
.dedup()
.collect_vec();
{
let nonzero = bs.iter().filter(|&&x| x > 0);
let has_zero = bs[0] == 0;
if bs.is_empty() {
writeln_report!(r"条件を満たす$b$はない.",);
return Err("no cands; is d = 1?".to_string());
}
if bs.len() == 1 {
writeln_report!(r"条件を満たす$b$は$b = 0$.",);
} else {
writeln_report!(
r"条件を満たす$b$は$b = {}$\pm {}$.",
if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
);
}
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b != 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
a,
c
);
return true;
}
let left_failure = if !(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if !(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b);
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if !left { r"\not" } else { "" };
let leftend = if left {
"を満たす."
} else {
"となるので不適."
};
let right = a - b + c > 0;
let rightopnot = if !right { r"\not" } else { "" };
let rightstart = if left && right {
"また"
} else {
"このとき"
};
let rightend = if right {
"を満たす."
} else {
"となるので不適."
};
if !left || (left && right) {
writeln_report!(
r"このとき$a + b + c = {} {:+} {:+} = {} {}< 0${}",
a,
b,
c,
a + b + c,
leftopnot,
leftend
);
}
if !right || (left && right) {
writeln_report!(
r"{}$a - b + c = {} {:+} {:+} = {} {}> 0${}",
rightstart,
a,
-b,
c,
a - b + c,
rightopnot,
rightend
);
}
left && right
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
// 条件 (C) を確認する
let res = remove_same_repeat(disc, &res);
writeln_report!();
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
fn remove_same_repeat(disc: i64, cands: &[(i64, i64, i64)]) -> Vec<(i64, i64, i64)> {
writeln_report!("");
writeln_report!("ここまでで得られた$(a, b, c)$の組は,");
writeln_report!(r"${:?}$.", cands.iter().format("$, $"));
writeln_report!(r"これを連分数展開し,循環節が同じものを除く.");
writeln_report!(r"連分数展開の途中に現れた分数を全て除けば良い.");
let cand_fracs = cands
.iter()
.map(|&(a, b, _)| Frac::from_abd(a, b, disc))
.collect_vec();
let map: HashMap<_, _> = cand_fracs
.iter()
.copied()
.zip(cands.iter().copied())
.collect();
let mut notfound: HashSet<_> = map.keys().collect();
let mut res = Vec::new();
for mut frac in cand_fracs {
if !notfound.contains(&frac) {
continue;
}
writeln_report!();
writeln_report!("${:?}$に対応する${}$を連分数展開する.", map[&frac], frac);
res.push(map[&frac]);
notfound.remove(&frac);
let mut obtained = HashSet::new();
while obtained.insert(frac) && !notfound.is_empty() {
write_report!(r"\[ {} = ", frac);
let int = frac.integer_part();
frac = frac.sub_int(int);
write_report!(r"{} + \left({}\right) = ", int, frac);
frac = frac.invert();
writeln_report!(r"{} + \frac{{ 1 }}{{ {} }}. \]", int, frac);
if notfound.contains(&frac) {
writeln_report!(
"${}$は${:?}$に対応するので,${:?}$は除く.",
frac,
map[&frac],
map[&frac]
);
notfound.remove(&frac);
}
}
if !notfound.is_empty() && obtained.contains(&frac) {
writeln_report!(
"ここで${}$は一度現れたので,この連分数はここから循環する.",
frac
);
}
}
res
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
struct Frac {
num: i64,
coeff: i64,
root: i64,
denom: i64,
}
impl fmt::Display for Frac {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
let coeff = if self.coeff == 1 {
"+".to_string()
} else if self.coeff == -1 {
"-".to_string()
} else {
format!("{:+}", self.coeff)
};
let num = format!(r"{} {}\sqrt{{ {} }}", self.num, coeff, self.root);
let frac = if self.denom == 1 {
num
} else {
format!(r"\frac{{ {} }}{{ {} }}", num, self.denom)
};
write!(b, "{}", frac)
}
}
impl Frac {
pub fn from_abd(a: i64, b: i64, disc: i64) -> Frac {
Frac::new(-b, 1, disc, 2 * a)
}
pub fn new(num: i64, coeff: i64, root: i64, denom: i64) -> Frac {
assert!(root > 0);
let mut f = Frac {
num,
coeff,
root,
denom,
};
f.normalize();
f
}
pub fn normalize(&mut self) {
self.normalize_root();
self.reduce();
if self.denom < 0 {
self.denom *= -1;
self.num *= -1;
self.coeff *= -1;
}
}
pub fn invert(self) -> Frac {
let denom = self.num * self.num - self.coeff * self.coeff * self.root;
let num = self.denom * self.num;
let coeff = -self.denom * self.coeff;
let root = self.root;
let mut res = Frac {
denom,
num,
coeff,
root,
};
res.normalize();
res
}
pub fn integer_part(self) -> i64 {
let num = self.num as f64 + self.coeff as f64 * (self.root as f64).sqrt();
let denom = self.denom as f64;
let float = num / denom;
if is_int(float) {
float.round() as i64
} else {
float.floor() as i64
}
}
pub fn sub_int(mut self, int: i64) -> Frac {
self.num -= int * self.denom;
self.normalize();
self
}
fn normalize_root(&mut self) {
while let Err(d) = has_square_factor(self.root) {
self.root /= d * d;
self.coeff *= d;
}
}
fn reduce(&mut self) {
let g = gcd(gcd(self.num, self.coeff), self.denom);
self.num /= g;
self.coeff /= g;
self.denom /= g;
}
}
#[allow(clippy::collapsible_if)]
fn do_main(d: i64) -> Result<(), String> {
// if d.abs() > 999 {
// return Err(format!("input too large: {}", d));
// }
if d == 0 {
writeln_report!("$d = 0$ のときは考えない.");
return Err("d is zero".to_string());
}
if let Err(f) = has_square_factor(d) {
writeln_report!("$d = {}$は平方因子${}$を持つため,考えない.", d, f);
return Err(format!("{} has square factor: {}", d, f));
}
writeln_report!(r"このとき$d = {}$である.", d);
let disc = discriminant(d);
writeln_report!();
let res = if d < 0 {
calc_negative(disc)?
} else {
calc_positive(disc)?
};
if !MAKE_REPORT.load(Ordering::SeqCst) {
println!("d = {}: {} ({:?})", d, res.len(), res);
}
if MAKE_REPORT.load(Ordering::SeqCst) {
writeln_report!("したがって,$h_K = {}$.", res.len());
writeln_report!();
writeln_report!("イデアル類群の代表元は,");
let mut first = t | for (a, b, _) in res {
if !first {
write_report!(", ");
}
first = false;
if b % 2 == 0 && disc % 4 == 0 {
if b == 0 {
write_report!(r"$\left({}, \sqrt{{ {} }}\right)$", a, disc / 4);
} else {
write_report!(
r"$\left({}, {} + \sqrt{{ {} }}\right)$",
a,
-b / 2,
disc / 4
);
}
} else {
if b == 0 {
write_report!(
r"$\left({}, \frac{{ \sqrt{{ {} }} }}{{ 2 }}\right)$",
a,
disc
);
} else {
write_report!(
r"$\left({}, \frac{{ {} + \sqrt{{ {} }} }}{{ 2 }}\right)$",
a,
-b,
disc
);
}
}
}
writeln_report!(r".");
}
Ok(())
}
#[derive(Clap)]
struct Opt {
#[clap(short = "r", long, about = "Enables reporting")]
make_report: bool,
start: i64,
end: Option<i64>,
}
fn main() {
let opt = Opt::parse();
if opt.make_report {
MAKE_REPORT.store(true, Ordering::SeqCst);
}
let start = opt.start;
let end = opt.end.unwrap_or(opt.start);
for d in start..=end {
writeln_report!();
writeln_report!(r"\section{{ $K = \mathbb{{Q}}(\sqrt{{ {} }})$ }}", d);
writeln_report!();
let _ = do_main(d);
}
}
| rue;
| identifier_name |
main.rs | use clap::Clap;
use itertools::Itertools;
use num::integer::gcd;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
static MAKE_REPORT: AtomicBool = AtomicBool::new(false);
macro_rules! write_report(
($($args:tt)*) => {
if MAKE_REPORT.load(Ordering::SeqCst) {
print!($($args)*);
}
}
);
macro_rules! writeln_report(
($($args:tt)*) => {
if MAKE_REPORT.load(Ordering::SeqCst) {
println!($($args)*);
}
}
);
fn has_square_factor(d: i64) -> Result<(), i64> {
let mut d = d.abs();
assert_ne!(d, 0);
let mut i = 2;
while d != 1 {
if d % i == 0 {
d /= i;
if d % i == 0 {
return Err(i);
}
}
i += 1;
}
Ok(())
}
fn discriminant(d: i64) -> i64 {
let mod4 = (d % 4 + 4) % 4;
writeln_report!(
r"${} \equiv {} \mod 4$であるから,判別式を$D$とすると",
d,
mod4
);
// 負の時の対策
let disc = if mod4 == 1 { d } else { 4 * d };
writeln_report!(r"$D = {}$となる.", disc);
disc
}
fn is_int(x: f64) -> bool {
(x - x.round()).abs() < 1e-8
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_negative(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
// b の範囲を求める (exclusive)
let maxb = {
let sqrt = (disc.abs() as f64 / 3.0).sqrt();
let is_int = is_int(sqrt);
// 1.3 -> 2, 2.9 -> 3, 4.0 -> 5 としたい。
//
// sqrt.floor() + 1.0 でもよいが、 sqrt の精度で整数がわずかに小さい値に
// なって floor で 1 ずれる可能性を心配している。
let maxb = if is_int {
sqrt.round() + 1.0
} else {
sqrt.ceil()
} as i64;
writeln_report!(
r"\[ |b| \leqq \sqrt{{ \frac{{ |{disc}| }}{{ 3 }} }} = \sqrt{{ \frac{{ |{discabs}| }}{{ 3 }} }} {op} {maxb}. \]",
disc = disc,
discabs = disc.abs(),
op = if is_int { r"=" } else { "<" },
maxb = if is_int { maxb - 1 } else { maxb },
);
maxb
};
writeln_report!(
r"$4ac = b^2 + {}$より$b$は{}であるから,",
disc.abs(),
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = (0..maxb)
.filter(|x| x % 2 == disc.abs() % 2)
.flat_map(|x| vec![x, -x])
.dedup()
.collect_vec();
{
let nonzero = bs.iter().filter(|&&x| x > 0);
let has_zero = bs[0] == 0;
if bs.is_empty() {
writeln_report!(r"条件を満たす$b$はない.",);
return Err("no cands; is d = 1?".to_string());
}
if bs.len() == 1 {
writeln_report!(r"条件を満たす$b$は$b = 0$.",);
} else {
writeln_report!(
r"条件を満たす$b$は$b = {}$\pm {}$.",
if has_zero { "0$, " } else { "$" },
nonzero.format(r"$, $\pm ")
);
}
}
writeln_report!();
writeln_report!(r"その上で$a \leqq c, c > 0$となるような$a, c$を求める.");
writeln_report!(r"\begin{{itemize}}");
// 条件を満たす a, c を求める.
let mut res = Vec::new();
for b in bs {
let do_report = b >= 0;
if do_report {
writeln_report!(
r"\item $b = {}{}$のとき \\",
if b != 0 { r"\pm " } else { "" },
b
);
}
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
if do_report {
writeln_report!(r"$4ac = {}$となり,これは整数解を持たない.", ac4);
}
continue;
}
let ac = ac4 / 4;
if do_report {
writeln_report!(r"$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!(r"よって$(a, c) = $");
}
let mut first = true;
for a in -ac..=ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
if a <= c && c > 0 {
if do_report {
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
}
res.push((a, b, c));
}
}
if do_report {
writeln_report!(".");
}
}
writeln_report!(r"\end{{itemize}}");
res.sort();
res.dedup();
res.sort_by_key(|&(a, b, c)| (a.abs(), b.abs(), c.abs()));
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) をチェックする
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = -a < b && b <= a && a < c;
let right = 0 <= b && b <= a && a == c;
if left {
writeln_report!(
r"これは左側の不等式${} < {} \leqq {} < {}$を満たす.",
-a,
b,
a,
c
);
return true;
}
if right {
writeln_report!(
r"これは右側の不等式$0 \leqq {} \leqq {} = {}$満たす.",
b,
a,
c
);
return true;
}
let left_failure = if !(-a < b) {
format!(r"$-a < b$について${} \not< {}$", -a, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a < c) {
format!(r"$a < c$について${} \not< {}$", a, c)
} else {
unreachable!()
};
let right_failure = if !(0 <= b) {
format!(r"$0 \leqq b$について${} \not\leqq {}$", 0, b)
} else if !(b <= a) {
format!(r"$b \leqq a$について${} \not\leqq {}$", b, a)
} else if !(a == c) {
format!(r"$a = c$について${} \neq {}$", a, c)
} else {
unreachable!()
};
writeln_report!("この組は左側の不等式では{}であり,右側の不等式では{}であるから,両方の不等式を満たさない.", left_failure, right_failure);
false
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
#[allow(clippy::cognitive_complexity, clippy::nonminimal_bool)]
fn calc_positive(disc: i64) -> Result<Vec<(i64, i64, i64)>, String> {
assert!(disc > 0);
// 条件 (A) を確認する
// b の候補を得る (exclusive))
writeln_report!("まず,条件を満たす$b$の候補を計算する.$b$の範囲は");
let minb = {
let sqrt = (disc as f64).sqrt();
// 本来は d = 1 以外で int になることはないのであまり考える必要はない。
let is_int = is_int(sqrt);
// 1.3 -> -2, -2.9 -> -3, 4.0 -> -4 としたい。
let minb = if is_int { -sqrt.round() } else { -sqrt.ceil() } as i64;
writeln_report!(
r"\[ 0 > b > -\sqrt{{ {disc} }} {op} {minb}. \]",
disc = disc,
op = if is_int { "=" } else { ">" },
minb = if is_int { minb - 1 } else { minb },
);
minb
};
writeln_report!(
r"$4ac = b^2 - {}$より$ | t) = ", int, frac);
frac = frac.invert();
writeln_report!(r"{} + \frac{{ 1 }}{{ {} }}. \]", int, frac);
if notfound.contains(&frac) {
writeln_report!(
"${}$は${:?}$に対応するので,${:?}$は除く.",
frac,
map[&frac],
map[&frac]
);
notfound.remove(&frac);
}
}
if !notfound.is_empty() && obtained.contains(&frac) {
writeln_report!(
"ここで${}$は一度現れたので,この連分数はここから循環する.",
frac
);
}
}
res
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
struct Frac {
num: i64,
coeff: i64,
root: i64,
denom: i64,
}
impl fmt::Display for Frac {
fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result {
let coeff = if self.coeff == 1 {
"+".to_string()
} else if self.coeff == -1 {
"-".to_string()
} else {
format!("{:+}", self.coeff)
};
let num = format!(r"{} {}\sqrt{{ {} }}", self.num, coeff, self.root);
let frac = if self.denom == 1 {
num
} else {
format!(r"\frac{{ {} }}{{ {} }}", num, self.denom)
};
write!(b, "{}", frac)
}
}
impl Frac {
pub fn from_abd(a: i64, b: i64, disc: i64) -> Frac {
Frac::new(-b, 1, disc, 2 * a)
}
pub fn new(num: i64, coeff: i64, root: i64, denom: i64) -> Frac {
assert!(root > 0);
let mut f = Frac {
num,
coeff,
root,
denom,
};
f.normalize();
f
}
pub fn normalize(&mut self) {
self.normalize_root();
self.reduce();
if self.denom < 0 {
self.denom *= -1;
self.num *= -1;
self.coeff *= -1;
}
}
pub fn invert(self) -> Frac {
let denom = self.num * self.num - self.coeff * self.coeff * self.root;
let num = self.denom * self.num;
let coeff = -self.denom * self.coeff;
let root = self.root;
let mut res = Frac {
denom,
num,
coeff,
root,
};
res.normalize();
res
}
pub fn integer_part(self) -> i64 {
let num = self.num as f64 + self.coeff as f64 * (self.root as f64).sqrt();
let denom = self.denom as f64;
let float = num / denom;
if is_int(float) {
float.round() as i64
} else {
float.floor() as i64
}
}
pub fn sub_int(mut self, int: i64) -> Frac {
self.num -= int * self.denom;
self.normalize();
self
}
fn normalize_root(&mut self) {
while let Err(d) = has_square_factor(self.root) {
self.root /= d * d;
self.coeff *= d;
}
}
fn reduce(&mut self) {
let g = gcd(gcd(self.num, self.coeff), self.denom);
self.num /= g;
self.coeff /= g;
self.denom /= g;
}
}
#[allow(clippy::collapsible_if)]
fn do_main(d: i64) -> Result<(), String> {
// if d.abs() > 999 {
// return Err(format!("input too large: {}", d));
// }
if d == 0 {
writeln_report!("$d = 0$ のときは考えない.");
return Err("d is zero".to_string());
}
if let Err(f) = has_square_factor(d) {
writeln_report!("$d = {}$は平方因子${}$を持つため,考えない.", d, f);
return Err(format!("{} has square factor: {}", d, f));
}
writeln_report!(r"このとき$d = {}$である.", d);
let disc = discriminant(d);
writeln_report!();
let res = if d < 0 {
calc_negative(disc)?
} else {
calc_positive(disc)?
};
if !MAKE_REPORT.load(Ordering::SeqCst) {
println!("d = {}: {} ({:?})", d, res.len(), res);
}
if MAKE_REPORT.load(Ordering::SeqCst) {
writeln_report!("したがって,$h_K = {}$.", res.len());
writeln_report!();
writeln_report!("イデアル類群の代表元は,");
let mut first = true;
for (a, b, _) in res {
if !first {
write_report!(", ");
}
first = false;
if b % 2 == 0 && disc % 4 == 0 {
if b == 0 {
write_report!(r"$\left({}, \sqrt{{ {} }}\right)$", a, disc / 4);
} else {
write_report!(
r"$\left({}, {} + \sqrt{{ {} }}\right)$",
a,
-b / 2,
disc / 4
);
}
} else {
if b == 0 {
write_report!(
r"$\left({}, \frac{{ \sqrt{{ {} }} }}{{ 2 }}\right)$",
a,
disc
);
} else {
write_report!(
r"$\left({}, \frac{{ {} + \sqrt{{ {} }} }}{{ 2 }}\right)$",
a,
-b,
disc
);
}
}
}
writeln_report!(r".");
}
Ok(())
}
#[derive(Clap)]
struct Opt {
#[clap(short = "r", long, about = "Enables reporting")]
make_report: bool,
start: i64,
end: Option<i64>,
}
fn main() {
let opt = Opt::parse();
if opt.make_report {
MAKE_REPORT.store(true, Ordering::SeqCst);
}
let start = opt.start;
let end = opt.end.unwrap_or(opt.start);
for d in start..=end {
writeln_report!();
writeln_report!(r"\section{{ $K = \mathbb{{Q}}(\sqrt{{ {} }})$ }}", d);
writeln_report!();
let _ = do_main(d);
}
}
| b$は{}であるから,",
disc,
if disc % 2 == 0 { "偶数" } else { "奇数" }
);
let bs = ((minb + 1)..0).filter(|x| x.abs() % 2 == disc % 2);
if bs.clone().collect_vec().is_empty() {
writeln_report!(r"条件を満たす$b$はない.");
return Err("no cands".to_string());
}
writeln_report!(r"条件を満たす$b$は$b = $ ${}$.", bs.clone().format("$, $"));
// a, c を求める
writeln_report!();
writeln_report!("その上で$a > 0, c < 0$となる$a, c$を求める.");
let mut res = Vec::new();
writeln_report!(r"\begin{{itemize}}");
for b in bs {
writeln_report!(r"\item $b = {}$のとき \\", b);
let ac4 = b * b - disc;
if ac4 % 4 != 0 {
writeln_report!("$4ac = {}$となり,これは整数解を持たない.", ac4);
continue;
}
let ac = ac4 / 4;
writeln_report!("$4ac = {}$より$ac = {}$.", ac4, ac);
write_report!("よって$(a, c) = $");
let mut first = true;
for a in 0..=-ac {
if a == 0 || ac % a != 0 {
continue;
}
let c = ac / a;
assert!(c < 0);
write_report!("{}$({}, {})$", if first { "" } else { ", " }, a, c);
first = false;
res.push((a, b, c));
}
writeln_report!(".");
}
writeln_report!(r"\end{{itemize}}");
writeln_report!(r"以上により,ここまでの条件を満たす$(a, b, c)$の組は");
writeln_report!(r"$(a, b, c) = $ ${:?}$.", res.iter().format("$, $"));
// 条件 (B) を確認する
fn cond(&(a, b, c): &(i64, i64, i64)) -> bool {
writeln_report!(r"\item $(a, b, c) = ({}, {}, {})$のとき \\", a, b, c);
let g = gcd(gcd(a, b), c);
if g != 1 {
writeln_report!("最大公約数が${}$となるので不適.", g);
return false;
}
let left = a + b + c < 0;
let leftopnot = if !left { r"\not" } else { "" };
let leftend = if left {
"を満たす."
} else {
"となるので不適."
};
let right = a - b + c > 0;
let rightopnot = if !right { r"\not" } else { "" };
let rightstart = if left && right {
"また"
} else {
"このとき"
};
let rightend = if right {
"を満たす."
} else {
"となるので不適."
};
if !left || (left && right) {
writeln_report!(
r"このとき$a + b + c = {} {:+} {:+} = {} {}< 0${}",
a,
b,
c,
a + b + c,
leftopnot,
leftend
);
}
if !right || (left && right) {
writeln_report!(
r"{}$a - b + c = {} {:+} {:+} = {} {}> 0${}",
rightstart,
a,
-b,
c,
a - b + c,
rightopnot,
rightend
);
}
left && right
}
writeln_report!(r"\begin{{itemize}}");
res.retain(cond);
writeln_report!(r"\end{{itemize}}");
// 条件 (C) を確認する
let res = remove_same_repeat(disc, &res);
writeln_report!();
writeln_report!(
"以上により,全ての条件を満たす$(a, b, c)$の組は${:?}$となる.",
res.iter().format("$, $")
);
Ok(res)
}
fn remove_same_repeat(disc: i64, cands: &[(i64, i64, i64)]) -> Vec<(i64, i64, i64)> {
writeln_report!("");
writeln_report!("ここまでで得られた$(a, b, c)$の組は,");
writeln_report!(r"${:?}$.", cands.iter().format("$, $"));
writeln_report!(r"これを連分数展開し,循環節が同じものを除く.");
writeln_report!(r"連分数展開の途中に現れた分数を全て除けば良い.");
let cand_fracs = cands
.iter()
.map(|&(a, b, _)| Frac::from_abd(a, b, disc))
.collect_vec();
let map: HashMap<_, _> = cand_fracs
.iter()
.copied()
.zip(cands.iter().copied())
.collect();
let mut notfound: HashSet<_> = map.keys().collect();
let mut res = Vec::new();
for mut frac in cand_fracs {
if !notfound.contains(&frac) {
continue;
}
writeln_report!();
writeln_report!("${:?}$に対応する${}$を連分数展開する.", map[&frac], frac);
res.push(map[&frac]);
notfound.remove(&frac);
let mut obtained = HashSet::new();
while obtained.insert(frac) && !notfound.is_empty() {
write_report!(r"\[ {} = ", frac);
let int = frac.integer_part();
frac = frac.sub_int(int);
write_report!(r"{} + \left({}\righ | identifier_body |
csr.rs | /*
Copyright 2020 Brandon Lucia <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
extern crate bit_vec;
extern crate csv;
extern crate rand;
use bit_vec::BitVec;
use memmap2::{MmapMut,Mmap};
use rand::Rng;
use rayon::prelude::*;
use std::fs::File;
use std::fs::OpenOptions;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use byte_slice_cast::*;
#[derive(Debug)]
pub struct CSR {
v: usize,
e: usize,
vtxprop: Vec<f64>,
offsets: Vec<usize>,
neighbs: Vec<usize>,
}
impl CSR {
pub fn get_vtxprop(&self) -> &[f64] {
&self.vtxprop
}
pub fn get_mut_vtxprop(&mut self) -> &mut [f64] {
&mut self.vtxprop
}
pub fn get_v(&self) -> usize {
self.v
}
pub fn get_e(&self) -> usize {
self.e
}
pub fn get_offsets(&self) -> &Vec<usize> {
&self.offsets
}
pub fn get_neighbs(&self) -> &[usize] {
&self.neighbs
}
/// Build a random edge list
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn random_el(numv: usize, maxe: usize) -> Vec<(usize, usize)> {
let mut rng = rand::thread_rng();
let mut el: Vec<(usize, usize)> = Vec::new();
for i in 0..numv {
/*edges per vertex*/
let num_e: usize = rng.gen_range(0, maxe) as usize;
for _ in 0..num_e {
let edge = (i as usize, rng.gen_range(0, numv) as usize);
el.push(edge);
}
}
el
}
/// Build an edge list from a file containing text describing one.
/// The file format is line oriented and human readable:
/// v0,v1
/// v0,v2
/// v0,v3
/// v0,v3
/// v1,v2
/// v1,v2
/// v2,v3
/// v3,v1
/// ...
///
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn el_from_file(path: &str) -> (usize, Vec<(usize, usize)>) {
let mut el: Vec<(usize, usize)> = Vec::new();
let mut maxv: usize = 0;
let f = File::open(path);
match f {
Ok(file) => {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(file);
for result in rdr.records() {
match result {
Ok(p) => {
let v0 = p.get(0).unwrap().parse::<usize>().unwrap();
let v1 = p.get(1).unwrap().parse::<usize>().unwrap();
if v0 > maxv {
maxv = v0
}
if v1 > maxv {
maxv = v1
}
el.push((v0, v1));
}
_ => {
eprintln!("Failed to parse file");
}
}
}
}
_ => {
eprintln!("Failed to open file {}", path);
}
}
(maxv + 1, el)
}
pub fn new_from_el_mmap(v: usize, f: String) -> CSR {
let path = PathBuf::from(f);
let file = OpenOptions::new()
.read(true)
.open(&path)
.unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
let el = mmap[..]
.as_slice_of::<usize>()
.unwrap();
let mut ncnt = Vec::with_capacity(v);
for _ in 0..v {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.chunks(2).par_bridge().for_each(|e| {
ncnt[ e[0] ].fetch_add(1, Ordering::SeqCst);
});
let mut work_offsets = Vec::with_capacity(v);
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: v,
e: el.chunks(2).len(),
vtxprop: vec![0f64; v],
offsets: vec![0; v],
neighbs: vec![0; el.chunks(2).len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::with_capacity(el.chunks(2).len());
for _ in 0..el.chunks(2).len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.chunks(2).par_bridge().for_each(|e| {
let cur_ind = work_offsets[e[0]].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(e[1], Ordering::Relaxed);
});
g.neighbs.par_iter_mut().enumerate().for_each(|(i,e)| {
*e = nbs[i].load(Ordering::Relaxed);
}); |
}
/// Take an edge list in and produce a CSR out
/// (u,v)
pub fn new(numv: usize, ref el: Vec<(usize, usize)>) -> CSR {
const NUMCHUNKS: usize = 16;
let chunksz: usize = if numv > NUMCHUNKS {
numv / NUMCHUNKS
} else {
1
};
/*TODO: Parameter*/
let numbins = 16;
let mut ncnt = Vec::new();
for _ in 0..numv {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.par_chunks(chunksz).for_each(|cnk| {
/*Per-thread bin structure*/
let mut bins = Vec::new();
for _ in 0..numbins {
bins.push(Vec::<&(usize, usize)>::new());
}
/*iterate over chunk, push edges to bins*/
cnk.iter().for_each(|e| {
bins[(e).0 % 16].push(e);
});
bins.iter().for_each(|b| {
b.iter().for_each(|e| {
ncnt[(e).0].fetch_add(1, Ordering::SeqCst);
});
});
});
let mut work_offsets = Vec::new();
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: numv,
e: el.len(),
vtxprop: vec![0f64; numv],
offsets: vec![0; numv],
neighbs: vec![0; el.len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::new();
for _ in 0..el.len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.par_chunks(chunksz).for_each(|cnk| {
cnk.iter().for_each(|edge| match *edge {
(v0, v1) => {
let cur_ind = work_offsets[v0].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(v1, Ordering::Relaxed);
}
});
});
g.neighbs
.par_chunks_mut(chunksz)
.enumerate()
.for_each(|(chunkbase, cnk)| {
cnk.iter_mut().enumerate().for_each(|(i, e)| {
*e = nbs[chunkbase + i].load(Ordering::Relaxed);
});
});
/*return the graph, g*/
g
}
/// Get the range of offsets into the neighbs array that hold the neighbors
/// of vertex v
pub fn vtx_offset_range(&self, v: usize) -> (usize, usize) {
(
self.offsets[v],
match v {
v if v == self.v - 1 => self.e,
_ => self.offsets[v + 1],
},
)
}
/// read_only_scan is a read only scan of all edges in the entire CSR
/// that accepts a FnMut(usize,usize,u64) -> () to apply to each vertex
pub fn read_only_scan(&self, mut f: impl FnMut(usize, usize) -> ()) {
/*Iterate over the vertices in the offsets array*/
let len = self.offsets.len();
for i in 0..len {
/*A vertex i's offsets in neighbs array are offsets[i] to offsets[i+1]*/
let (i_start, i_end) = self.vtx_offset_range(i);
/*Traverse vertex i's neighbs and call provided f(...) on the edge*/
for ei in i_start..i_end {
let e = self.neighbs[ei];
match e {
v1 => {
f(i, v1);
}
}
}
}
}
pub fn write_fastcsr(&self, s: String) {
let path = PathBuf::from(s);
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
file.set_len((self.offsets.len() + self.neighbs.len() + 2) as u64 * 8)
.unwrap();
let mmap = unsafe { MmapMut::map_mut(&file) };
let offsets_bytes = unsafe { self.offsets.align_to::<u8>().1 };
let neighbs_bytes = unsafe { self.neighbs.align_to::<u8>().1 };
mmap.unwrap().copy_from_slice(
&[
&self.offsets.len().to_le_bytes(),
&self.neighbs.len().to_le_bytes(),
offsets_bytes,
neighbs_bytes,
]
.concat(),
);
}
/// bfs_traversal starts from vertex start and does a breadth first search
/// traversal on the vertices, applying f, the closure passed in, to each
/// vertex
pub fn bfs_traversal(&self, start: usize, mut f: impl FnMut(usize) -> ()) {
let mut visited = BitVec::from_elem(self.v, false);
let mut q = Vec::new();
visited.set(start, true);
q.push(start);
while q.len() > 0 {
let v = q.remove(0);
f(v);
let (st, en) = self.vtx_offset_range(v);
for nei in st..en {
/*Get the first element of the edge, which is the distal vertex*/
let ne = self.neighbs[nei] as usize;
match visited[ne] {
false => {
visited.set(ne, true);
q.push(ne as usize);
}
_ => (),
}
}
}
}
pub fn par_scan(
&mut self,
par_level: usize,
f: impl Fn(usize, &[usize]) -> f64 + std::marker::Sync,
) -> () {
/*basically the number of threads to use*/
let chunksz: usize = if self.v > par_level {
self.v / par_level
} else {
1
};
let scan_vtx_row = |(row_i, vtx_row): (usize, &mut [f64])| {
let row_i_base: usize = row_i * chunksz;
vtx_row
.iter_mut()
.enumerate()
.for_each(|(ii, v): (usize, &mut f64)| {
let v0 = row_i_base + ii;
let (start, end) = self.vtx_offset_range(v0);
*v = f(v0, &self.neighbs[start..end]);
});
};
let mut vtxprop = vec![0.0; self.get_v()];
vtxprop
.par_chunks_mut(chunksz)
.enumerate()
.for_each(scan_vtx_row);
self.vtxprop.copy_from_slice(&vtxprop);
}
} /*impl CSR*/ |
/*return the graph, g*/
g | random_line_split |
csr.rs | /*
Copyright 2020 Brandon Lucia <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
extern crate bit_vec;
extern crate csv;
extern crate rand;
use bit_vec::BitVec;
use memmap2::{MmapMut,Mmap};
use rand::Rng;
use rayon::prelude::*;
use std::fs::File;
use std::fs::OpenOptions;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use byte_slice_cast::*;
#[derive(Debug)]
pub struct | {
v: usize,
e: usize,
vtxprop: Vec<f64>,
offsets: Vec<usize>,
neighbs: Vec<usize>,
}
impl CSR {
pub fn get_vtxprop(&self) -> &[f64] {
&self.vtxprop
}
pub fn get_mut_vtxprop(&mut self) -> &mut [f64] {
&mut self.vtxprop
}
pub fn get_v(&self) -> usize {
self.v
}
pub fn get_e(&self) -> usize {
self.e
}
pub fn get_offsets(&self) -> &Vec<usize> {
&self.offsets
}
pub fn get_neighbs(&self) -> &[usize] {
&self.neighbs
}
/// Build a random edge list
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn random_el(numv: usize, maxe: usize) -> Vec<(usize, usize)> {
let mut rng = rand::thread_rng();
let mut el: Vec<(usize, usize)> = Vec::new();
for i in 0..numv {
/*edges per vertex*/
let num_e: usize = rng.gen_range(0, maxe) as usize;
for _ in 0..num_e {
let edge = (i as usize, rng.gen_range(0, numv) as usize);
el.push(edge);
}
}
el
}
/// Build an edge list from a file containing text describing one.
/// The file format is line oriented and human readable:
/// v0,v1
/// v0,v2
/// v0,v3
/// v0,v3
/// v1,v2
/// v1,v2
/// v2,v3
/// v3,v1
/// ...
///
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn el_from_file(path: &str) -> (usize, Vec<(usize, usize)>) {
let mut el: Vec<(usize, usize)> = Vec::new();
let mut maxv: usize = 0;
let f = File::open(path);
match f {
Ok(file) => {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(file);
for result in rdr.records() {
match result {
Ok(p) => {
let v0 = p.get(0).unwrap().parse::<usize>().unwrap();
let v1 = p.get(1).unwrap().parse::<usize>().unwrap();
if v0 > maxv {
maxv = v0
}
if v1 > maxv {
maxv = v1
}
el.push((v0, v1));
}
_ => {
eprintln!("Failed to parse file");
}
}
}
}
_ => {
eprintln!("Failed to open file {}", path);
}
}
(maxv + 1, el)
}
pub fn new_from_el_mmap(v: usize, f: String) -> CSR {
let path = PathBuf::from(f);
let file = OpenOptions::new()
.read(true)
.open(&path)
.unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
let el = mmap[..]
.as_slice_of::<usize>()
.unwrap();
let mut ncnt = Vec::with_capacity(v);
for _ in 0..v {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.chunks(2).par_bridge().for_each(|e| {
ncnt[ e[0] ].fetch_add(1, Ordering::SeqCst);
});
let mut work_offsets = Vec::with_capacity(v);
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: v,
e: el.chunks(2).len(),
vtxprop: vec![0f64; v],
offsets: vec![0; v],
neighbs: vec![0; el.chunks(2).len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::with_capacity(el.chunks(2).len());
for _ in 0..el.chunks(2).len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.chunks(2).par_bridge().for_each(|e| {
let cur_ind = work_offsets[e[0]].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(e[1], Ordering::Relaxed);
});
g.neighbs.par_iter_mut().enumerate().for_each(|(i,e)| {
*e = nbs[i].load(Ordering::Relaxed);
});
/*return the graph, g*/
g
}
/// Take an edge list in and produce a CSR out
/// (u,v)
pub fn new(numv: usize, ref el: Vec<(usize, usize)>) -> CSR {
const NUMCHUNKS: usize = 16;
let chunksz: usize = if numv > NUMCHUNKS {
numv / NUMCHUNKS
} else {
1
};
/*TODO: Parameter*/
let numbins = 16;
let mut ncnt = Vec::new();
for _ in 0..numv {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.par_chunks(chunksz).for_each(|cnk| {
/*Per-thread bin structure*/
let mut bins = Vec::new();
for _ in 0..numbins {
bins.push(Vec::<&(usize, usize)>::new());
}
/*iterate over chunk, push edges to bins*/
cnk.iter().for_each(|e| {
bins[(e).0 % 16].push(e);
});
bins.iter().for_each(|b| {
b.iter().for_each(|e| {
ncnt[(e).0].fetch_add(1, Ordering::SeqCst);
});
});
});
let mut work_offsets = Vec::new();
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: numv,
e: el.len(),
vtxprop: vec![0f64; numv],
offsets: vec![0; numv],
neighbs: vec![0; el.len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::new();
for _ in 0..el.len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.par_chunks(chunksz).for_each(|cnk| {
cnk.iter().for_each(|edge| match *edge {
(v0, v1) => {
let cur_ind = work_offsets[v0].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(v1, Ordering::Relaxed);
}
});
});
g.neighbs
.par_chunks_mut(chunksz)
.enumerate()
.for_each(|(chunkbase, cnk)| {
cnk.iter_mut().enumerate().for_each(|(i, e)| {
*e = nbs[chunkbase + i].load(Ordering::Relaxed);
});
});
/*return the graph, g*/
g
}
/// Get the range of offsets into the neighbs array that hold the neighbors
/// of vertex v
pub fn vtx_offset_range(&self, v: usize) -> (usize, usize) {
(
self.offsets[v],
match v {
v if v == self.v - 1 => self.e,
_ => self.offsets[v + 1],
},
)
}
/// read_only_scan is a read only scan of all edges in the entire CSR
/// that accepts a FnMut(usize,usize,u64) -> () to apply to each vertex
pub fn read_only_scan(&self, mut f: impl FnMut(usize, usize) -> ()) {
/*Iterate over the vertices in the offsets array*/
let len = self.offsets.len();
for i in 0..len {
/*A vertex i's offsets in neighbs array are offsets[i] to offsets[i+1]*/
let (i_start, i_end) = self.vtx_offset_range(i);
/*Traverse vertex i's neighbs and call provided f(...) on the edge*/
for ei in i_start..i_end {
let e = self.neighbs[ei];
match e {
v1 => {
f(i, v1);
}
}
}
}
}
pub fn write_fastcsr(&self, s: String) {
let path = PathBuf::from(s);
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
file.set_len((self.offsets.len() + self.neighbs.len() + 2) as u64 * 8)
.unwrap();
let mmap = unsafe { MmapMut::map_mut(&file) };
let offsets_bytes = unsafe { self.offsets.align_to::<u8>().1 };
let neighbs_bytes = unsafe { self.neighbs.align_to::<u8>().1 };
mmap.unwrap().copy_from_slice(
&[
&self.offsets.len().to_le_bytes(),
&self.neighbs.len().to_le_bytes(),
offsets_bytes,
neighbs_bytes,
]
.concat(),
);
}
/// bfs_traversal starts from vertex start and does a breadth first search
/// traversal on the vertices, applying f, the closure passed in, to each
/// vertex
pub fn bfs_traversal(&self, start: usize, mut f: impl FnMut(usize) -> ()) {
let mut visited = BitVec::from_elem(self.v, false);
let mut q = Vec::new();
visited.set(start, true);
q.push(start);
while q.len() > 0 {
let v = q.remove(0);
f(v);
let (st, en) = self.vtx_offset_range(v);
for nei in st..en {
/*Get the first element of the edge, which is the distal vertex*/
let ne = self.neighbs[nei] as usize;
match visited[ne] {
false => {
visited.set(ne, true);
q.push(ne as usize);
}
_ => (),
}
}
}
}
pub fn par_scan(
&mut self,
par_level: usize,
f: impl Fn(usize, &[usize]) -> f64 + std::marker::Sync,
) -> () {
/*basically the number of threads to use*/
let chunksz: usize = if self.v > par_level {
self.v / par_level
} else {
1
};
let scan_vtx_row = |(row_i, vtx_row): (usize, &mut [f64])| {
let row_i_base: usize = row_i * chunksz;
vtx_row
.iter_mut()
.enumerate()
.for_each(|(ii, v): (usize, &mut f64)| {
let v0 = row_i_base + ii;
let (start, end) = self.vtx_offset_range(v0);
*v = f(v0, &self.neighbs[start..end]);
});
};
let mut vtxprop = vec![0.0; self.get_v()];
vtxprop
.par_chunks_mut(chunksz)
.enumerate()
.for_each(scan_vtx_row);
self.vtxprop.copy_from_slice(&vtxprop);
}
} /*impl CSR*/
| CSR | identifier_name |
csr.rs | /*
Copyright 2020 Brandon Lucia <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
extern crate bit_vec;
extern crate csv;
extern crate rand;
use bit_vec::BitVec;
use memmap2::{MmapMut,Mmap};
use rand::Rng;
use rayon::prelude::*;
use std::fs::File;
use std::fs::OpenOptions;
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use byte_slice_cast::*;
#[derive(Debug)]
pub struct CSR {
v: usize,
e: usize,
vtxprop: Vec<f64>,
offsets: Vec<usize>,
neighbs: Vec<usize>,
}
impl CSR {
pub fn get_vtxprop(&self) -> &[f64] {
&self.vtxprop
}
pub fn get_mut_vtxprop(&mut self) -> &mut [f64] |
pub fn get_v(&self) -> usize {
self.v
}
pub fn get_e(&self) -> usize {
self.e
}
pub fn get_offsets(&self) -> &Vec<usize> {
&self.offsets
}
pub fn get_neighbs(&self) -> &[usize] {
&self.neighbs
}
/// Build a random edge list
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn random_el(numv: usize, maxe: usize) -> Vec<(usize, usize)> {
let mut rng = rand::thread_rng();
let mut el: Vec<(usize, usize)> = Vec::new();
for i in 0..numv {
/*edges per vertex*/
let num_e: usize = rng.gen_range(0, maxe) as usize;
for _ in 0..num_e {
let edge = (i as usize, rng.gen_range(0, numv) as usize);
el.push(edge);
}
}
el
}
/// Build an edge list from a file containing text describing one.
/// The file format is line oriented and human readable:
/// v0,v1
/// v0,v2
/// v0,v3
/// v0,v3
/// v1,v2
/// v1,v2
/// v2,v3
/// v3,v1
/// ...
///
/// This method returns a tuple of the number of vertices seen and the edge list
/// el.len() is the number of edges.
pub fn el_from_file(path: &str) -> (usize, Vec<(usize, usize)>) {
let mut el: Vec<(usize, usize)> = Vec::new();
let mut maxv: usize = 0;
let f = File::open(path);
match f {
Ok(file) => {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(file);
for result in rdr.records() {
match result {
Ok(p) => {
let v0 = p.get(0).unwrap().parse::<usize>().unwrap();
let v1 = p.get(1).unwrap().parse::<usize>().unwrap();
if v0 > maxv {
maxv = v0
}
if v1 > maxv {
maxv = v1
}
el.push((v0, v1));
}
_ => {
eprintln!("Failed to parse file");
}
}
}
}
_ => {
eprintln!("Failed to open file {}", path);
}
}
(maxv + 1, el)
}
pub fn new_from_el_mmap(v: usize, f: String) -> CSR {
let path = PathBuf::from(f);
let file = OpenOptions::new()
.read(true)
.open(&path)
.unwrap();
let mmap = unsafe { Mmap::map(&file).unwrap() };
let el = mmap[..]
.as_slice_of::<usize>()
.unwrap();
let mut ncnt = Vec::with_capacity(v);
for _ in 0..v {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.chunks(2).par_bridge().for_each(|e| {
ncnt[ e[0] ].fetch_add(1, Ordering::SeqCst);
});
let mut work_offsets = Vec::with_capacity(v);
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: v,
e: el.chunks(2).len(),
vtxprop: vec![0f64; v],
offsets: vec![0; v],
neighbs: vec![0; el.chunks(2).len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::with_capacity(el.chunks(2).len());
for _ in 0..el.chunks(2).len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.chunks(2).par_bridge().for_each(|e| {
let cur_ind = work_offsets[e[0]].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(e[1], Ordering::Relaxed);
});
g.neighbs.par_iter_mut().enumerate().for_each(|(i,e)| {
*e = nbs[i].load(Ordering::Relaxed);
});
/*return the graph, g*/
g
}
/// Take an edge list in and produce a CSR out
/// (u,v)
pub fn new(numv: usize, ref el: Vec<(usize, usize)>) -> CSR {
const NUMCHUNKS: usize = 16;
let chunksz: usize = if numv > NUMCHUNKS {
numv / NUMCHUNKS
} else {
1
};
/*TODO: Parameter*/
let numbins = 16;
let mut ncnt = Vec::new();
for _ in 0..numv {
ncnt.push(AtomicUsize::new(0));
}
/*Count up the number of neighbors that each vertex has */
el.par_chunks(chunksz).for_each(|cnk| {
/*Per-thread bin structure*/
let mut bins = Vec::new();
for _ in 0..numbins {
bins.push(Vec::<&(usize, usize)>::new());
}
/*iterate over chunk, push edges to bins*/
cnk.iter().for_each(|e| {
bins[(e).0 % 16].push(e);
});
bins.iter().for_each(|b| {
b.iter().for_each(|e| {
ncnt[(e).0].fetch_add(1, Ordering::SeqCst);
});
});
});
let mut work_offsets = Vec::new();
work_offsets.push(AtomicUsize::new(0));
let mut g = CSR {
v: numv,
e: el.len(),
vtxprop: vec![0f64; numv],
offsets: vec![0; numv],
neighbs: vec![0; el.len()],
};
/* CSR Structure e.g.,
|0,3,5,6,9|
|v2,v3,v5|v1,v9|v2|v3,v7,v8|x|
*/
/*vertex i's offset is vtx i-1's offset + i's neighbor count*/
for i in 1..ncnt.len() {
g.offsets[i] = g.offsets[i - 1] + ncnt[i - 1].load(Ordering::SeqCst);
work_offsets.push(AtomicUsize::new(g.offsets[i]));
}
/*Temporary synchronized edge list array*/
let mut nbs = Vec::new();
for _ in 0..el.len() {
nbs.push(AtomicUsize::new(0));
}
/*Populate the neighbor array based on the counts*/
el.par_chunks(chunksz).for_each(|cnk| {
cnk.iter().for_each(|edge| match *edge {
(v0, v1) => {
let cur_ind = work_offsets[v0].fetch_add(1, Ordering::SeqCst);
nbs[cur_ind].store(v1, Ordering::Relaxed);
}
});
});
g.neighbs
.par_chunks_mut(chunksz)
.enumerate()
.for_each(|(chunkbase, cnk)| {
cnk.iter_mut().enumerate().for_each(|(i, e)| {
*e = nbs[chunkbase + i].load(Ordering::Relaxed);
});
});
/*return the graph, g*/
g
}
/// Get the range of offsets into the neighbs array that hold the neighbors
/// of vertex v
pub fn vtx_offset_range(&self, v: usize) -> (usize, usize) {
(
self.offsets[v],
match v {
v if v == self.v - 1 => self.e,
_ => self.offsets[v + 1],
},
)
}
/// read_only_scan is a read only scan of all edges in the entire CSR
/// that accepts a FnMut(usize,usize,u64) -> () to apply to each vertex
pub fn read_only_scan(&self, mut f: impl FnMut(usize, usize) -> ()) {
/*Iterate over the vertices in the offsets array*/
let len = self.offsets.len();
for i in 0..len {
/*A vertex i's offsets in neighbs array are offsets[i] to offsets[i+1]*/
let (i_start, i_end) = self.vtx_offset_range(i);
/*Traverse vertex i's neighbs and call provided f(...) on the edge*/
for ei in i_start..i_end {
let e = self.neighbs[ei];
match e {
v1 => {
f(i, v1);
}
}
}
}
}
pub fn write_fastcsr(&self, s: String) {
let path = PathBuf::from(s);
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
file.set_len((self.offsets.len() + self.neighbs.len() + 2) as u64 * 8)
.unwrap();
let mmap = unsafe { MmapMut::map_mut(&file) };
let offsets_bytes = unsafe { self.offsets.align_to::<u8>().1 };
let neighbs_bytes = unsafe { self.neighbs.align_to::<u8>().1 };
mmap.unwrap().copy_from_slice(
&[
&self.offsets.len().to_le_bytes(),
&self.neighbs.len().to_le_bytes(),
offsets_bytes,
neighbs_bytes,
]
.concat(),
);
}
/// bfs_traversal starts from vertex start and does a breadth first search
/// traversal on the vertices, applying f, the closure passed in, to each
/// vertex
pub fn bfs_traversal(&self, start: usize, mut f: impl FnMut(usize) -> ()) {
let mut visited = BitVec::from_elem(self.v, false);
let mut q = Vec::new();
visited.set(start, true);
q.push(start);
while q.len() > 0 {
let v = q.remove(0);
f(v);
let (st, en) = self.vtx_offset_range(v);
for nei in st..en {
/*Get the first element of the edge, which is the distal vertex*/
let ne = self.neighbs[nei] as usize;
match visited[ne] {
false => {
visited.set(ne, true);
q.push(ne as usize);
}
_ => (),
}
}
}
}
pub fn par_scan(
&mut self,
par_level: usize,
f: impl Fn(usize, &[usize]) -> f64 + std::marker::Sync,
) -> () {
/*basically the number of threads to use*/
let chunksz: usize = if self.v > par_level {
self.v / par_level
} else {
1
};
let scan_vtx_row = |(row_i, vtx_row): (usize, &mut [f64])| {
let row_i_base: usize = row_i * chunksz;
vtx_row
.iter_mut()
.enumerate()
.for_each(|(ii, v): (usize, &mut f64)| {
let v0 = row_i_base + ii;
let (start, end) = self.vtx_offset_range(v0);
*v = f(v0, &self.neighbs[start..end]);
});
};
let mut vtxprop = vec![0.0; self.get_v()];
vtxprop
.par_chunks_mut(chunksz)
.enumerate()
.for_each(scan_vtx_row);
self.vtxprop.copy_from_slice(&vtxprop);
}
} /*impl CSR*/
| {
&mut self.vtxprop
} | identifier_body |
main.rs | #[macro_use]
extern crate clap;
extern crate ansi_term;
extern crate atty;
extern crate regex;
extern crate ignore;
extern crate num_cpus;
pub mod lscolors;
pub mod fshelper;
mod app;
use std::env;
use std::error::Error;
use std::io::Write;
use std::ops::Deref;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread;
use std::time;
use atty::Stream;
use regex::{Regex, RegexBuilder};
use ignore::WalkBuilder;
use lscolors::LsColors;
/// Defines how to display search result paths.
#[derive(PartialEq)]
enum PathDisplay {
/// As an absolute path
Absolute,
/// As a relative path
Relative,
}
/// The type of file to search for.
#[derive(Copy, Clone)]
enum FileType {
Any,
RegularFile,
Directory,
SymLink,
}
/// Configuration options for *fd*.
struct FdOptions {
/// Determines whether the regex search is case-sensitive or case-insensitive.
case_sensitive: bool,
/// Whether to search within the full file path or just the base name (filename or directory
/// name).
search_full_path: bool,
/// Whether to ignore hidden files and directories (or not).
ignore_hidden: bool,
/// Whether to respect VCS ignore files (`.gitignore`, `.ignore`, ..) or not.
read_ignore: bool,
/// Whether to follow symlinks or not.
follow_links: bool,
/// Whether elements of output should be separated by a null character
null_separator: bool,
/// The maximum search depth, or `None` if no maximum search depth should be set.
///
/// A depth of `1` includes all files under the current directory, a depth of `2` also includes
/// all files under subdirectories of the current directory, etc.
max_depth: Option<usize>,
/// The number of threads to use.
threads: usize,
/// Time to buffer results internally before streaming to the console. This is useful to
/// provide a sorted output, in case the total execution time is shorter than
/// `max_buffer_time`.
max_buffer_time: Option<time::Duration>,
/// Display results as relative or absolute path.
path_display: PathDisplay,
/// `None` if the output should not be colorized. Otherwise, a `LsColors` instance that defines
/// how to style different filetypes.
ls_colors: Option<LsColors>,
/// The type of file to search for. All files other than the specified type will be ignored.
file_type: FileType,
/// The extension to search for. Only entries matching the extension will be included.
///
/// The value (if present) will be a lowercase string without leading dots.
extension: Option<String>,
}
/// The receiver thread can either be buffering results or directly streaming to the console.
enum ReceiverMode {
/// Receiver is still buffering in order to sort the results, if the search finishes fast
/// enough.
Buffering,
/// Receiver is directly printing results to the output.
Streaming,
}
/// Root directory
#[cfg(unix)]
static ROOT_DIR: &'static str = "/";
#[cfg(windows)]
static ROOT_DIR: &'static str = "";
/// Print a search result to the console.
fn print_entry(base: &Path, entry: &PathBuf, config: &FdOptions) {
let path_full = base.join(entry);
let path_str = entry.to_string_lossy();
#[cfg(unix)]
let is_executable = |p: Option<&std::fs::Metadata>| {
p.map(|f| f.permissions().mode() & 0o111 != 0)
.unwrap_or(false)
};
#[cfg(windows)]
let is_executable = |_: Option<&std::fs::Metadata>| false;
let stdout = std::io::stdout();
let mut handle = stdout.lock();
if let Some(ref ls_colors) = config.ls_colors {
let default_style = ansi_term::Style::default();
let mut component_path = base.to_path_buf();
if config.path_display == PathDisplay::Absolute {
print!("{}", ls_colors.directory.paint(ROOT_DIR));
}
// Traverse the path and colorize each component
for component in entry.components() {
let comp_str = component.as_os_str().to_string_lossy();
component_path.push(Path::new(comp_str.deref()));
let metadata = component_path.metadata().ok();
let is_directory = metadata.as_ref().map(|md| md.is_dir()).unwrap_or(false);
let style =
if component_path.symlink_metadata()
.map(|md| md.file_type().is_symlink())
.unwrap_or(false) {
&ls_colors.symlink
} else if is_directory {
&ls_colors.directory
} else if is_executable(metadata.as_ref()) {
&ls_colors.executable
} else {
// Look up file name
let o_style =
component_path.file_name()
.and_then(|n| n.to_str())
.and_then(|n| ls_colors.filenames.get(n));
match o_style {
Some(s) => s,
None =>
// Look up file extension
component_path.extension()
.and_then(|e| e.to_str())
.and_then(|e| ls_colors.extensions.get(e))
.unwrap_or(&default_style)
}
};
write!(handle, "{}", style.paint(comp_str)).ok();
if is_directory && component_path != path_full {
let sep = std::path::MAIN_SEPARATOR.to_string();
write!(handle, "{}", style.paint(sep)).ok();
}
}
let r = if config.null_separator {
write!(handle, "\0")
} else {
writeln!(handle, "")
};
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
} else {
// Uncolorized output
let prefix = if config.path_display == PathDisplay::Absolute { ROOT_DIR } else { "" };
let separator = if config.null_separator { "\0" } else { "\n" }; |
let r = write!(&mut std::io::stdout(), "{}{}{}", prefix, path_str, separator);
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
}
}
/// Recursively scan the given search path and search for files / pathnames matching the pattern.
fn scan(root: &Path, pattern: Arc<Regex>, base: &Path, config: Arc<FdOptions>) {
let (tx, rx) = channel();
let walker = WalkBuilder::new(root)
.hidden(config.ignore_hidden)
.ignore(config.read_ignore)
.git_ignore(config.read_ignore)
.parents(config.read_ignore)
.git_global(config.read_ignore)
.git_exclude(config.read_ignore)
.follow_links(config.follow_links)
.max_depth(config.max_depth)
.threads(config.threads)
.build_parallel();
// Spawn the thread that receives all results through the channel.
let rx_config = Arc::clone(&config);
let rx_base = base.to_owned();
let receiver_thread = thread::spawn(move || {
let start = time::Instant::now();
let mut buffer = vec![];
// Start in buffering mode
let mut mode = ReceiverMode::Buffering;
// Maximum time to wait before we start streaming to the console.
let max_buffer_time = rx_config.max_buffer_time
.unwrap_or_else(|| time::Duration::from_millis(100));
for value in rx {
match mode {
ReceiverMode::Buffering => {
buffer.push(value);
// Have we reached the maximum time?
if time::Instant::now() - start > max_buffer_time {
// Flush the buffer
for v in &buffer {
print_entry(&rx_base, v, &rx_config);
}
buffer.clear();
// Start streaming
mode = ReceiverMode::Streaming;
}
},
ReceiverMode::Streaming => {
print_entry(&rx_base, &value, &rx_config);
}
}
}
// If we have finished fast enough (faster than max_buffer_time), we haven't streamed
// anything to the console, yet. In this case, sort the results and print them:
if !buffer.is_empty() {
buffer.sort();
for value in buffer {
print_entry(&rx_base, &value, &rx_config);
}
}
});
// Spawn the sender threads.
walker.run(|| {
let base = base.to_owned();
let config = Arc::clone(&config);
let pattern = Arc::clone(&pattern);
let tx_thread = tx.clone();
Box::new(move |entry_o| {
let entry = match entry_o {
Ok(e) => e,
Err(_) => return ignore::WalkState::Continue,
};
// Filter out unwanted file types.
match config.file_type {
FileType::Any => (),
FileType::RegularFile => if entry.file_type().map_or(false, |ft| !ft.is_file()) {
return ignore::WalkState::Continue;
},
FileType::Directory => if entry.file_type().map_or(false, |ft| !ft.is_dir()) {
return ignore::WalkState::Continue;
},
FileType::SymLink => if entry.file_type().map_or(false, |ft| !ft.is_symlink()) {
return ignore::WalkState::Continue;
},
}
// Filter out unwanted extensions.
if let Some(ref filter_ext) = config.extension {
let entry_ext = entry.path().extension().map(|e| e.to_string_lossy().to_lowercase());
if entry_ext.map_or(true, |ext| ext != *filter_ext) {
return ignore::WalkState::Continue;
}
}
let path_rel_buf = match fshelper::path_relative_from(entry.path(), &*base) {
Some(p) => p,
None => error("Error: could not get relative path for directory entry.")
};
let path_rel = path_rel_buf.as_path();
let search_str_o =
if config.search_full_path {
Some(path_rel.to_string_lossy())
} else {
path_rel.file_name()
.map(|f| f.to_string_lossy())
};
if let Some(search_str) = search_str_o {
// TODO: take care of the unwrap call
pattern.find(&*search_str)
.map(|_| tx_thread.send(path_rel_buf.to_owned()).unwrap());
}
ignore::WalkState::Continue
})
});
// Drop the initial sender. If we don't do this, the receiver will block even
// if all threads have finished, since there is still one sender around.
drop(tx);
// Wait for the receiver thread to print out all results.
receiver_thread.join().unwrap();
}
/// Print error message to stderr and exit with status `1`.
fn error(message: &str) -> ! {
writeln!(&mut std::io::stderr(), "{}", message).expect("Failed writing to stderr");
process::exit(1);
}
fn main() {
let matches = app::build_app().get_matches();
// Get the search pattern
let empty_pattern = String::new();
let pattern = matches.value_of("pattern").unwrap_or(&empty_pattern);
// Get the current working directory
let current_dir_buf = match env::current_dir() {
Ok(cd) => cd,
Err(_) => error("Error: could not get current directory.")
};
let current_dir = current_dir_buf.as_path();
// Get the root directory for the search
let mut root_dir_is_absolute = false;
let root_dir_buf = if let Some(rd) = matches.value_of("path") {
let path = Path::new(rd);
root_dir_is_absolute = path.is_absolute();
fshelper::absolute_path(path).unwrap_or_else(
|_| error(&format!("Error: could not find directory '{}'.", rd))
)
} else {
current_dir_buf.clone()
};
if !root_dir_buf.is_dir() {
error(&format!("Error: '{}' is not a directory.", root_dir_buf.to_string_lossy()));
}
let root_dir = root_dir_buf.as_path();
// The search will be case-sensitive if the command line flag is set or
// if the pattern has an uppercase character (smart case).
let case_sensitive = matches.is_present("case-sensitive") ||
pattern.chars().any(char::is_uppercase);
let colored_output = match matches.value_of("color") {
Some("always") => true,
Some("never") => false,
_ => atty::is(Stream::Stdout)
};
let ls_colors =
if colored_output {
Some(
env::var("LS_COLORS")
.ok()
.map(|val| LsColors::from_string(&val))
.unwrap_or_default()
)
} else {
None
};
let config = FdOptions {
case_sensitive: case_sensitive,
search_full_path: matches.is_present("full-path"),
ignore_hidden: !matches.is_present("hidden"),
read_ignore: !matches.is_present("no-ignore"),
follow_links: matches.is_present("follow"),
null_separator: matches.is_present("null_separator"),
max_depth: matches.value_of("depth")
.and_then(|n| usize::from_str_radix(n, 10).ok()),
threads: std::cmp::max(
matches.value_of("threads")
.and_then(|n| usize::from_str_radix(n, 10).ok())
.unwrap_or_else(num_cpus::get),
1
),
max_buffer_time: matches.value_of("max-buffer-time")
.and_then(|n| u64::from_str_radix(n, 10).ok())
.map(time::Duration::from_millis),
path_display: if matches.is_present("absolute-path") || root_dir_is_absolute {
PathDisplay::Absolute
} else {
PathDisplay::Relative
},
ls_colors: ls_colors,
file_type: match matches.value_of("file-type") {
Some("f") | Some("file") => FileType::RegularFile,
Some("d") | Some("directory") => FileType::Directory,
Some("l") | Some("symlink") => FileType::SymLink,
_ => FileType::Any,
},
extension: matches.value_of("extension")
.map(|e| e.trim_left_matches('.').to_lowercase()),
};
let root = Path::new(ROOT_DIR);
let base = match config.path_display {
PathDisplay::Relative => current_dir,
PathDisplay::Absolute => root
};
match RegexBuilder::new(pattern)
.case_insensitive(!config.case_sensitive)
.build() {
Ok(re) => scan(root_dir, Arc::new(re), base, Arc::new(config)),
Err(err) => error(err.description())
}
} | random_line_split |
|
main.rs | #[macro_use]
extern crate clap;
extern crate ansi_term;
extern crate atty;
extern crate regex;
extern crate ignore;
extern crate num_cpus;
pub mod lscolors;
pub mod fshelper;
mod app;
use std::env;
use std::error::Error;
use std::io::Write;
use std::ops::Deref;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread;
use std::time;
use atty::Stream;
use regex::{Regex, RegexBuilder};
use ignore::WalkBuilder;
use lscolors::LsColors;
/// Defines how to display search result paths.
#[derive(PartialEq)]
enum PathDisplay {
/// As an absolute path
Absolute,
/// As a relative path
Relative,
}
/// The type of file to search for.
#[derive(Copy, Clone)]
enum FileType {
Any,
RegularFile,
Directory,
SymLink,
}
/// Configuration options for *fd*.
struct FdOptions {
/// Determines whether the regex search is case-sensitive or case-insensitive.
case_sensitive: bool,
/// Whether to search within the full file path or just the base name (filename or directory
/// name).
search_full_path: bool,
/// Whether to ignore hidden files and directories (or not).
ignore_hidden: bool,
/// Whether to respect VCS ignore files (`.gitignore`, `.ignore`, ..) or not.
read_ignore: bool,
/// Whether to follow symlinks or not.
follow_links: bool,
/// Whether elements of output should be separated by a null character
null_separator: bool,
/// The maximum search depth, or `None` if no maximum search depth should be set.
///
/// A depth of `1` includes all files under the current directory, a depth of `2` also includes
/// all files under subdirectories of the current directory, etc.
max_depth: Option<usize>,
/// The number of threads to use.
threads: usize,
/// Time to buffer results internally before streaming to the console. This is useful to
/// provide a sorted output, in case the total execution time is shorter than
/// `max_buffer_time`.
max_buffer_time: Option<time::Duration>,
/// Display results as relative or absolute path.
path_display: PathDisplay,
/// `None` if the output should not be colorized. Otherwise, a `LsColors` instance that defines
/// how to style different filetypes.
ls_colors: Option<LsColors>,
/// The type of file to search for. All files other than the specified type will be ignored.
file_type: FileType,
/// The extension to search for. Only entries matching the extension will be included.
///
/// The value (if present) will be a lowercase string without leading dots.
extension: Option<String>,
}
/// The receiver thread can either be buffering results or directly streaming to the console.
enum ReceiverMode {
/// Receiver is still buffering in order to sort the results, if the search finishes fast
/// enough.
Buffering,
/// Receiver is directly printing results to the output.
Streaming,
}
/// Root directory
#[cfg(unix)]
static ROOT_DIR: &'static str = "/";
#[cfg(windows)]
static ROOT_DIR: &'static str = "";
/// Print a search result to the console.
fn print_entry(base: &Path, entry: &PathBuf, config: &FdOptions) {
let path_full = base.join(entry);
let path_str = entry.to_string_lossy();
#[cfg(unix)]
let is_executable = |p: Option<&std::fs::Metadata>| {
p.map(|f| f.permissions().mode() & 0o111 != 0)
.unwrap_or(false)
};
#[cfg(windows)]
let is_executable = |_: Option<&std::fs::Metadata>| false;
let stdout = std::io::stdout();
let mut handle = stdout.lock();
if let Some(ref ls_colors) = config.ls_colors {
let default_style = ansi_term::Style::default();
let mut component_path = base.to_path_buf();
if config.path_display == PathDisplay::Absolute {
print!("{}", ls_colors.directory.paint(ROOT_DIR));
}
// Traverse the path and colorize each component
for component in entry.components() {
let comp_str = component.as_os_str().to_string_lossy();
component_path.push(Path::new(comp_str.deref()));
let metadata = component_path.metadata().ok();
let is_directory = metadata.as_ref().map(|md| md.is_dir()).unwrap_or(false);
let style =
if component_path.symlink_metadata()
.map(|md| md.file_type().is_symlink())
.unwrap_or(false) | else if is_directory {
&ls_colors.directory
} else if is_executable(metadata.as_ref()) {
&ls_colors.executable
} else {
// Look up file name
let o_style =
component_path.file_name()
.and_then(|n| n.to_str())
.and_then(|n| ls_colors.filenames.get(n));
match o_style {
Some(s) => s,
None =>
// Look up file extension
component_path.extension()
.and_then(|e| e.to_str())
.and_then(|e| ls_colors.extensions.get(e))
.unwrap_or(&default_style)
}
};
write!(handle, "{}", style.paint(comp_str)).ok();
if is_directory && component_path != path_full {
let sep = std::path::MAIN_SEPARATOR.to_string();
write!(handle, "{}", style.paint(sep)).ok();
}
}
let r = if config.null_separator {
write!(handle, "\0")
} else {
writeln!(handle, "")
};
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
} else {
// Uncolorized output
let prefix = if config.path_display == PathDisplay::Absolute { ROOT_DIR } else { "" };
let separator = if config.null_separator { "\0" } else { "\n" };
let r = write!(&mut std::io::stdout(), "{}{}{}", prefix, path_str, separator);
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
}
}
/// Recursively scan the given search path and search for files / pathnames matching the pattern.
fn scan(root: &Path, pattern: Arc<Regex>, base: &Path, config: Arc<FdOptions>) {
let (tx, rx) = channel();
let walker = WalkBuilder::new(root)
.hidden(config.ignore_hidden)
.ignore(config.read_ignore)
.git_ignore(config.read_ignore)
.parents(config.read_ignore)
.git_global(config.read_ignore)
.git_exclude(config.read_ignore)
.follow_links(config.follow_links)
.max_depth(config.max_depth)
.threads(config.threads)
.build_parallel();
// Spawn the thread that receives all results through the channel.
let rx_config = Arc::clone(&config);
let rx_base = base.to_owned();
let receiver_thread = thread::spawn(move || {
let start = time::Instant::now();
let mut buffer = vec![];
// Start in buffering mode
let mut mode = ReceiverMode::Buffering;
// Maximum time to wait before we start streaming to the console.
let max_buffer_time = rx_config.max_buffer_time
.unwrap_or_else(|| time::Duration::from_millis(100));
for value in rx {
match mode {
ReceiverMode::Buffering => {
buffer.push(value);
// Have we reached the maximum time?
if time::Instant::now() - start > max_buffer_time {
// Flush the buffer
for v in &buffer {
print_entry(&rx_base, v, &rx_config);
}
buffer.clear();
// Start streaming
mode = ReceiverMode::Streaming;
}
},
ReceiverMode::Streaming => {
print_entry(&rx_base, &value, &rx_config);
}
}
}
// If we have finished fast enough (faster than max_buffer_time), we haven't streamed
// anything to the console, yet. In this case, sort the results and print them:
if !buffer.is_empty() {
buffer.sort();
for value in buffer {
print_entry(&rx_base, &value, &rx_config);
}
}
});
// Spawn the sender threads.
walker.run(|| {
let base = base.to_owned();
let config = Arc::clone(&config);
let pattern = Arc::clone(&pattern);
let tx_thread = tx.clone();
Box::new(move |entry_o| {
let entry = match entry_o {
Ok(e) => e,
Err(_) => return ignore::WalkState::Continue,
};
// Filter out unwanted file types.
match config.file_type {
FileType::Any => (),
FileType::RegularFile => if entry.file_type().map_or(false, |ft| !ft.is_file()) {
return ignore::WalkState::Continue;
},
FileType::Directory => if entry.file_type().map_or(false, |ft| !ft.is_dir()) {
return ignore::WalkState::Continue;
},
FileType::SymLink => if entry.file_type().map_or(false, |ft| !ft.is_symlink()) {
return ignore::WalkState::Continue;
},
}
// Filter out unwanted extensions.
if let Some(ref filter_ext) = config.extension {
let entry_ext = entry.path().extension().map(|e| e.to_string_lossy().to_lowercase());
if entry_ext.map_or(true, |ext| ext != *filter_ext) {
return ignore::WalkState::Continue;
}
}
let path_rel_buf = match fshelper::path_relative_from(entry.path(), &*base) {
Some(p) => p,
None => error("Error: could not get relative path for directory entry.")
};
let path_rel = path_rel_buf.as_path();
let search_str_o =
if config.search_full_path {
Some(path_rel.to_string_lossy())
} else {
path_rel.file_name()
.map(|f| f.to_string_lossy())
};
if let Some(search_str) = search_str_o {
// TODO: take care of the unwrap call
pattern.find(&*search_str)
.map(|_| tx_thread.send(path_rel_buf.to_owned()).unwrap());
}
ignore::WalkState::Continue
})
});
// Drop the initial sender. If we don't do this, the receiver will block even
// if all threads have finished, since there is still one sender around.
drop(tx);
// Wait for the receiver thread to print out all results.
receiver_thread.join().unwrap();
}
/// Print error message to stderr and exit with status `1`.
fn error(message: &str) -> ! {
writeln!(&mut std::io::stderr(), "{}", message).expect("Failed writing to stderr");
process::exit(1);
}
fn main() {
let matches = app::build_app().get_matches();
// Get the search pattern
let empty_pattern = String::new();
let pattern = matches.value_of("pattern").unwrap_or(&empty_pattern);
// Get the current working directory
let current_dir_buf = match env::current_dir() {
Ok(cd) => cd,
Err(_) => error("Error: could not get current directory.")
};
let current_dir = current_dir_buf.as_path();
// Get the root directory for the search
let mut root_dir_is_absolute = false;
let root_dir_buf = if let Some(rd) = matches.value_of("path") {
let path = Path::new(rd);
root_dir_is_absolute = path.is_absolute();
fshelper::absolute_path(path).unwrap_or_else(
|_| error(&format!("Error: could not find directory '{}'.", rd))
)
} else {
current_dir_buf.clone()
};
if !root_dir_buf.is_dir() {
error(&format!("Error: '{}' is not a directory.", root_dir_buf.to_string_lossy()));
}
let root_dir = root_dir_buf.as_path();
// The search will be case-sensitive if the command line flag is set or
// if the pattern has an uppercase character (smart case).
let case_sensitive = matches.is_present("case-sensitive") ||
pattern.chars().any(char::is_uppercase);
let colored_output = match matches.value_of("color") {
Some("always") => true,
Some("never") => false,
_ => atty::is(Stream::Stdout)
};
let ls_colors =
if colored_output {
Some(
env::var("LS_COLORS")
.ok()
.map(|val| LsColors::from_string(&val))
.unwrap_or_default()
)
} else {
None
};
let config = FdOptions {
case_sensitive: case_sensitive,
search_full_path: matches.is_present("full-path"),
ignore_hidden: !matches.is_present("hidden"),
read_ignore: !matches.is_present("no-ignore"),
follow_links: matches.is_present("follow"),
null_separator: matches.is_present("null_separator"),
max_depth: matches.value_of("depth")
.and_then(|n| usize::from_str_radix(n, 10).ok()),
threads: std::cmp::max(
matches.value_of("threads")
.and_then(|n| usize::from_str_radix(n, 10).ok())
.unwrap_or_else(num_cpus::get),
1
),
max_buffer_time: matches.value_of("max-buffer-time")
.and_then(|n| u64::from_str_radix(n, 10).ok())
.map(time::Duration::from_millis),
path_display: if matches.is_present("absolute-path") || root_dir_is_absolute {
PathDisplay::Absolute
} else {
PathDisplay::Relative
},
ls_colors: ls_colors,
file_type: match matches.value_of("file-type") {
Some("f") | Some("file") => FileType::RegularFile,
Some("d") | Some("directory") => FileType::Directory,
Some("l") | Some("symlink") => FileType::SymLink,
_ => FileType::Any,
},
extension: matches.value_of("extension")
.map(|e| e.trim_left_matches('.').to_lowercase()),
};
let root = Path::new(ROOT_DIR);
let base = match config.path_display {
PathDisplay::Relative => current_dir,
PathDisplay::Absolute => root
};
match RegexBuilder::new(pattern)
.case_insensitive(!config.case_sensitive)
.build() {
Ok(re) => scan(root_dir, Arc::new(re), base, Arc::new(config)),
Err(err) => error(err.description())
}
}
| {
&ls_colors.symlink
} | conditional_block |
main.rs | #[macro_use]
extern crate clap;
extern crate ansi_term;
extern crate atty;
extern crate regex;
extern crate ignore;
extern crate num_cpus;
pub mod lscolors;
pub mod fshelper;
mod app;
use std::env;
use std::error::Error;
use std::io::Write;
use std::ops::Deref;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::thread;
use std::time;
use atty::Stream;
use regex::{Regex, RegexBuilder};
use ignore::WalkBuilder;
use lscolors::LsColors;
/// Defines how to display search result paths.
#[derive(PartialEq)]
enum PathDisplay {
/// As an absolute path
Absolute,
/// As a relative path
Relative,
}
/// The type of file to search for.
#[derive(Copy, Clone)]
enum FileType {
Any,
RegularFile,
Directory,
SymLink,
}
/// Configuration options for *fd*.
struct FdOptions {
/// Determines whether the regex search is case-sensitive or case-insensitive.
case_sensitive: bool,
/// Whether to search within the full file path or just the base name (filename or directory
/// name).
search_full_path: bool,
/// Whether to ignore hidden files and directories (or not).
ignore_hidden: bool,
/// Whether to respect VCS ignore files (`.gitignore`, `.ignore`, ..) or not.
read_ignore: bool,
/// Whether to follow symlinks or not.
follow_links: bool,
/// Whether elements of output should be separated by a null character
null_separator: bool,
/// The maximum search depth, or `None` if no maximum search depth should be set.
///
/// A depth of `1` includes all files under the current directory, a depth of `2` also includes
/// all files under subdirectories of the current directory, etc.
max_depth: Option<usize>,
/// The number of threads to use.
threads: usize,
/// Time to buffer results internally before streaming to the console. This is useful to
/// provide a sorted output, in case the total execution time is shorter than
/// `max_buffer_time`.
max_buffer_time: Option<time::Duration>,
/// Display results as relative or absolute path.
path_display: PathDisplay,
/// `None` if the output should not be colorized. Otherwise, a `LsColors` instance that defines
/// how to style different filetypes.
ls_colors: Option<LsColors>,
/// The type of file to search for. All files other than the specified type will be ignored.
file_type: FileType,
/// The extension to search for. Only entries matching the extension will be included.
///
/// The value (if present) will be a lowercase string without leading dots.
extension: Option<String>,
}
/// The receiver thread can either be buffering results or directly streaming to the console.
enum ReceiverMode {
/// Receiver is still buffering in order to sort the results, if the search finishes fast
/// enough.
Buffering,
/// Receiver is directly printing results to the output.
Streaming,
}
/// Root directory
#[cfg(unix)]
static ROOT_DIR: &'static str = "/";
#[cfg(windows)]
static ROOT_DIR: &'static str = "";
/// Print a search result to the console.
fn print_entry(base: &Path, entry: &PathBuf, config: &FdOptions) {
let path_full = base.join(entry);
let path_str = entry.to_string_lossy();
#[cfg(unix)]
let is_executable = |p: Option<&std::fs::Metadata>| {
p.map(|f| f.permissions().mode() & 0o111 != 0)
.unwrap_or(false)
};
#[cfg(windows)]
let is_executable = |_: Option<&std::fs::Metadata>| false;
let stdout = std::io::stdout();
let mut handle = stdout.lock();
if let Some(ref ls_colors) = config.ls_colors {
let default_style = ansi_term::Style::default();
let mut component_path = base.to_path_buf();
if config.path_display == PathDisplay::Absolute {
print!("{}", ls_colors.directory.paint(ROOT_DIR));
}
// Traverse the path and colorize each component
for component in entry.components() {
let comp_str = component.as_os_str().to_string_lossy();
component_path.push(Path::new(comp_str.deref()));
let metadata = component_path.metadata().ok();
let is_directory = metadata.as_ref().map(|md| md.is_dir()).unwrap_or(false);
let style =
if component_path.symlink_metadata()
.map(|md| md.file_type().is_symlink())
.unwrap_or(false) {
&ls_colors.symlink
} else if is_directory {
&ls_colors.directory
} else if is_executable(metadata.as_ref()) {
&ls_colors.executable
} else {
// Look up file name
let o_style =
component_path.file_name()
.and_then(|n| n.to_str())
.and_then(|n| ls_colors.filenames.get(n));
match o_style {
Some(s) => s,
None =>
// Look up file extension
component_path.extension()
.and_then(|e| e.to_str())
.and_then(|e| ls_colors.extensions.get(e))
.unwrap_or(&default_style)
}
};
write!(handle, "{}", style.paint(comp_str)).ok();
if is_directory && component_path != path_full {
let sep = std::path::MAIN_SEPARATOR.to_string();
write!(handle, "{}", style.paint(sep)).ok();
}
}
let r = if config.null_separator {
write!(handle, "\0")
} else {
writeln!(handle, "")
};
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
} else {
// Uncolorized output
let prefix = if config.path_display == PathDisplay::Absolute { ROOT_DIR } else { "" };
let separator = if config.null_separator { "\0" } else { "\n" };
let r = write!(&mut std::io::stdout(), "{}{}{}", prefix, path_str, separator);
if r.is_err() {
// Probably a broken pipe. Exit gracefully.
process::exit(0);
}
}
}
/// Recursively scan the given search path and search for files / pathnames matching the pattern.
fn | (root: &Path, pattern: Arc<Regex>, base: &Path, config: Arc<FdOptions>) {
let (tx, rx) = channel();
let walker = WalkBuilder::new(root)
.hidden(config.ignore_hidden)
.ignore(config.read_ignore)
.git_ignore(config.read_ignore)
.parents(config.read_ignore)
.git_global(config.read_ignore)
.git_exclude(config.read_ignore)
.follow_links(config.follow_links)
.max_depth(config.max_depth)
.threads(config.threads)
.build_parallel();
// Spawn the thread that receives all results through the channel.
let rx_config = Arc::clone(&config);
let rx_base = base.to_owned();
let receiver_thread = thread::spawn(move || {
let start = time::Instant::now();
let mut buffer = vec![];
// Start in buffering mode
let mut mode = ReceiverMode::Buffering;
// Maximum time to wait before we start streaming to the console.
let max_buffer_time = rx_config.max_buffer_time
.unwrap_or_else(|| time::Duration::from_millis(100));
for value in rx {
match mode {
ReceiverMode::Buffering => {
buffer.push(value);
// Have we reached the maximum time?
if time::Instant::now() - start > max_buffer_time {
// Flush the buffer
for v in &buffer {
print_entry(&rx_base, v, &rx_config);
}
buffer.clear();
// Start streaming
mode = ReceiverMode::Streaming;
}
},
ReceiverMode::Streaming => {
print_entry(&rx_base, &value, &rx_config);
}
}
}
// If we have finished fast enough (faster than max_buffer_time), we haven't streamed
// anything to the console, yet. In this case, sort the results and print them:
if !buffer.is_empty() {
buffer.sort();
for value in buffer {
print_entry(&rx_base, &value, &rx_config);
}
}
});
// Spawn the sender threads.
walker.run(|| {
let base = base.to_owned();
let config = Arc::clone(&config);
let pattern = Arc::clone(&pattern);
let tx_thread = tx.clone();
Box::new(move |entry_o| {
let entry = match entry_o {
Ok(e) => e,
Err(_) => return ignore::WalkState::Continue,
};
// Filter out unwanted file types.
match config.file_type {
FileType::Any => (),
FileType::RegularFile => if entry.file_type().map_or(false, |ft| !ft.is_file()) {
return ignore::WalkState::Continue;
},
FileType::Directory => if entry.file_type().map_or(false, |ft| !ft.is_dir()) {
return ignore::WalkState::Continue;
},
FileType::SymLink => if entry.file_type().map_or(false, |ft| !ft.is_symlink()) {
return ignore::WalkState::Continue;
},
}
// Filter out unwanted extensions.
if let Some(ref filter_ext) = config.extension {
let entry_ext = entry.path().extension().map(|e| e.to_string_lossy().to_lowercase());
if entry_ext.map_or(true, |ext| ext != *filter_ext) {
return ignore::WalkState::Continue;
}
}
let path_rel_buf = match fshelper::path_relative_from(entry.path(), &*base) {
Some(p) => p,
None => error("Error: could not get relative path for directory entry.")
};
let path_rel = path_rel_buf.as_path();
let search_str_o =
if config.search_full_path {
Some(path_rel.to_string_lossy())
} else {
path_rel.file_name()
.map(|f| f.to_string_lossy())
};
if let Some(search_str) = search_str_o {
// TODO: take care of the unwrap call
pattern.find(&*search_str)
.map(|_| tx_thread.send(path_rel_buf.to_owned()).unwrap());
}
ignore::WalkState::Continue
})
});
// Drop the initial sender. If we don't do this, the receiver will block even
// if all threads have finished, since there is still one sender around.
drop(tx);
// Wait for the receiver thread to print out all results.
receiver_thread.join().unwrap();
}
/// Print error message to stderr and exit with status `1`.
fn error(message: &str) -> ! {
writeln!(&mut std::io::stderr(), "{}", message).expect("Failed writing to stderr");
process::exit(1);
}
fn main() {
let matches = app::build_app().get_matches();
// Get the search pattern
let empty_pattern = String::new();
let pattern = matches.value_of("pattern").unwrap_or(&empty_pattern);
// Get the current working directory
let current_dir_buf = match env::current_dir() {
Ok(cd) => cd,
Err(_) => error("Error: could not get current directory.")
};
let current_dir = current_dir_buf.as_path();
// Get the root directory for the search
let mut root_dir_is_absolute = false;
let root_dir_buf = if let Some(rd) = matches.value_of("path") {
let path = Path::new(rd);
root_dir_is_absolute = path.is_absolute();
fshelper::absolute_path(path).unwrap_or_else(
|_| error(&format!("Error: could not find directory '{}'.", rd))
)
} else {
current_dir_buf.clone()
};
if !root_dir_buf.is_dir() {
error(&format!("Error: '{}' is not a directory.", root_dir_buf.to_string_lossy()));
}
let root_dir = root_dir_buf.as_path();
// The search will be case-sensitive if the command line flag is set or
// if the pattern has an uppercase character (smart case).
let case_sensitive = matches.is_present("case-sensitive") ||
pattern.chars().any(char::is_uppercase);
let colored_output = match matches.value_of("color") {
Some("always") => true,
Some("never") => false,
_ => atty::is(Stream::Stdout)
};
let ls_colors =
if colored_output {
Some(
env::var("LS_COLORS")
.ok()
.map(|val| LsColors::from_string(&val))
.unwrap_or_default()
)
} else {
None
};
let config = FdOptions {
case_sensitive: case_sensitive,
search_full_path: matches.is_present("full-path"),
ignore_hidden: !matches.is_present("hidden"),
read_ignore: !matches.is_present("no-ignore"),
follow_links: matches.is_present("follow"),
null_separator: matches.is_present("null_separator"),
max_depth: matches.value_of("depth")
.and_then(|n| usize::from_str_radix(n, 10).ok()),
threads: std::cmp::max(
matches.value_of("threads")
.and_then(|n| usize::from_str_radix(n, 10).ok())
.unwrap_or_else(num_cpus::get),
1
),
max_buffer_time: matches.value_of("max-buffer-time")
.and_then(|n| u64::from_str_radix(n, 10).ok())
.map(time::Duration::from_millis),
path_display: if matches.is_present("absolute-path") || root_dir_is_absolute {
PathDisplay::Absolute
} else {
PathDisplay::Relative
},
ls_colors: ls_colors,
file_type: match matches.value_of("file-type") {
Some("f") | Some("file") => FileType::RegularFile,
Some("d") | Some("directory") => FileType::Directory,
Some("l") | Some("symlink") => FileType::SymLink,
_ => FileType::Any,
},
extension: matches.value_of("extension")
.map(|e| e.trim_left_matches('.').to_lowercase()),
};
let root = Path::new(ROOT_DIR);
let base = match config.path_display {
PathDisplay::Relative => current_dir,
PathDisplay::Absolute => root
};
match RegexBuilder::new(pattern)
.case_insensitive(!config.case_sensitive)
.build() {
Ok(re) => scan(root_dir, Arc::new(re), base, Arc::new(config)),
Err(err) => error(err.description())
}
}
| scan | identifier_name |
doc_upsert.rs | //! The `doc upsert` command performs a KV upsert operation.
use super::util::convert_nu_value_to_json_value;
use crate::cli::error::{client_error_to_shell_error, serialize_error};
use crate::cli::util::{
cluster_identifiers_from, get_active_cluster, namespace_from_args, NuValueMap,
};
use crate::client::{ClientError, KeyValueRequest, KvClient, KvResponse};
use crate::state::State;
use crate::RemoteCluster;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use nu_engine::CallExt;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, IntoPipelineData, PipelineData, ShellError, Signature, Span, SyntaxShape, Value,
};
use std::collections::HashSet;
use std::future::Future;
use std::ops::Add;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex, MutexGuard};
use tokio::runtime::Runtime;
use tokio::time::Instant;
#[derive(Clone)]
pub struct DocUpsert {
state: Arc<Mutex<State>>,
}
impl DocUpsert {
pub fn new(state: Arc<Mutex<State>>) -> Self {
Self { state }
}
}
impl Command for DocUpsert {
fn name(&self) -> &str {
"doc upsert"
}
fn signature(&self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest {
KeyValueRequest::Set { key, value, expiry }
}
fn run_upsert(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals, .. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column {
id = v.as_string().ok();
}
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
} | }
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub(crate) fn build_batched_kv_items<T>(
batch_size: u32,
items: impl IntoIterator<Item = T>,
) -> Vec<Vec<T>> {
let mut all_items = vec![];
let mut these_items = vec![];
let mut i = 0;
for item in items.into_iter() {
these_items.push(item);
if i == batch_size {
all_items.push(these_items);
these_items = vec![];
i = 0;
continue;
}
i += 1;
}
all_items.push(these_items);
all_items
}
pub(crate) fn get_active_cluster_client_cid<'a>(
rt: &Runtime,
cluster: String,
guard: &'a MutexGuard<State>,
bucket: Option<String>,
scope: Option<String>,
collection: Option<String>,
ctrl_c: Arc<AtomicBool>,
span: Span,
) -> Result<(&'a RemoteCluster, Arc<KvClient>, u32), ShellError> {
let active_cluster = get_active_cluster(cluster, &guard, span)?;
let (bucket, scope, collection) =
namespace_from_args(bucket, scope, collection, active_cluster, span)?;
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let client = rt
.block_on(active_cluster.cluster().key_value_client(
bucket.clone(),
deadline,
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
let cid = rt
.block_on(client.get_cid(
scope,
collection,
Instant::now().add(active_cluster.timeouts().data_timeout()),
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
Ok((active_cluster, Arc::new(client), cid))
}
#[derive(Debug)]
pub struct MutationResult {
success: i32,
failed: i32,
fail_reasons: HashSet<String>,
cluster: String,
}
impl MutationResult {
pub fn new(cluster: String) -> Self {
Self {
success: 0,
failed: 0,
fail_reasons: Default::default(),
cluster,
}
}
pub fn success(mut self, success: i32) -> Self {
self.success = success;
self
}
pub fn failed(mut self, failed: i32) -> Self {
self.failed = failed;
self
}
pub fn fail_reasons(mut self, fail_reasons: HashSet<String>) -> Self {
self.fail_reasons = fail_reasons;
self
}
pub fn into_value(self, span: Span) -> Value {
let mut collected = NuValueMap::default();
collected.add_i64("processed", (self.success + self.failed) as i64, span);
collected.add_i64("success", self.success as i64, span);
collected.add_i64("failed", self.failed as i64, span);
let reasons = self
.fail_reasons
.into_iter()
.collect::<Vec<String>>()
.join(", ");
collected.add_string("failures", reasons, span);
collected.add_string("cluster", self.cluster, span);
collected.into_value(span)
}
} | random_line_split |
|
doc_upsert.rs | //! The `doc upsert` command performs a KV upsert operation.
use super::util::convert_nu_value_to_json_value;
use crate::cli::error::{client_error_to_shell_error, serialize_error};
use crate::cli::util::{
cluster_identifiers_from, get_active_cluster, namespace_from_args, NuValueMap,
};
use crate::client::{ClientError, KeyValueRequest, KvClient, KvResponse};
use crate::state::State;
use crate::RemoteCluster;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use nu_engine::CallExt;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, IntoPipelineData, PipelineData, ShellError, Signature, Span, SyntaxShape, Value,
};
use std::collections::HashSet;
use std::future::Future;
use std::ops::Add;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex, MutexGuard};
use tokio::runtime::Runtime;
use tokio::time::Instant;
#[derive(Clone)]
pub struct DocUpsert {
state: Arc<Mutex<State>>,
}
impl DocUpsert {
pub fn new(state: Arc<Mutex<State>>) -> Self {
Self { state }
}
}
impl Command for DocUpsert {
fn name(&self) -> &str {
"doc upsert"
}
fn signature(&self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest {
KeyValueRequest::Set { key, value, expiry }
}
fn | (
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals, .. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column {
id = v.as_string().ok();
}
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
}
}
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub(crate) fn build_batched_kv_items<T>(
batch_size: u32,
items: impl IntoIterator<Item = T>,
) -> Vec<Vec<T>> {
let mut all_items = vec![];
let mut these_items = vec![];
let mut i = 0;
for item in items.into_iter() {
these_items.push(item);
if i == batch_size {
all_items.push(these_items);
these_items = vec![];
i = 0;
continue;
}
i += 1;
}
all_items.push(these_items);
all_items
}
pub(crate) fn get_active_cluster_client_cid<'a>(
rt: &Runtime,
cluster: String,
guard: &'a MutexGuard<State>,
bucket: Option<String>,
scope: Option<String>,
collection: Option<String>,
ctrl_c: Arc<AtomicBool>,
span: Span,
) -> Result<(&'a RemoteCluster, Arc<KvClient>, u32), ShellError> {
let active_cluster = get_active_cluster(cluster, &guard, span)?;
let (bucket, scope, collection) =
namespace_from_args(bucket, scope, collection, active_cluster, span)?;
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let client = rt
.block_on(active_cluster.cluster().key_value_client(
bucket.clone(),
deadline,
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
let cid = rt
.block_on(client.get_cid(
scope,
collection,
Instant::now().add(active_cluster.timeouts().data_timeout()),
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
Ok((active_cluster, Arc::new(client), cid))
}
#[derive(Debug)]
pub struct MutationResult {
success: i32,
failed: i32,
fail_reasons: HashSet<String>,
cluster: String,
}
impl MutationResult {
pub fn new(cluster: String) -> Self {
Self {
success: 0,
failed: 0,
fail_reasons: Default::default(),
cluster,
}
}
pub fn success(mut self, success: i32) -> Self {
self.success = success;
self
}
pub fn failed(mut self, failed: i32) -> Self {
self.failed = failed;
self
}
pub fn fail_reasons(mut self, fail_reasons: HashSet<String>) -> Self {
self.fail_reasons = fail_reasons;
self
}
pub fn into_value(self, span: Span) -> Value {
let mut collected = NuValueMap::default();
collected.add_i64("processed", (self.success + self.failed) as i64, span);
collected.add_i64("success", self.success as i64, span);
collected.add_i64("failed", self.failed as i64, span);
let reasons = self
.fail_reasons
.into_iter()
.collect::<Vec<String>>()
.join(", ");
collected.add_string("failures", reasons, span);
collected.add_string("cluster", self.cluster, span);
collected.into_value(span)
}
}
| run_upsert | identifier_name |
doc_upsert.rs | //! The `doc upsert` command performs a KV upsert operation.
use super::util::convert_nu_value_to_json_value;
use crate::cli::error::{client_error_to_shell_error, serialize_error};
use crate::cli::util::{
cluster_identifiers_from, get_active_cluster, namespace_from_args, NuValueMap,
};
use crate::client::{ClientError, KeyValueRequest, KvClient, KvResponse};
use crate::state::State;
use crate::RemoteCluster;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use nu_engine::CallExt;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, IntoPipelineData, PipelineData, ShellError, Signature, Span, SyntaxShape, Value,
};
use std::collections::HashSet;
use std::future::Future;
use std::ops::Add;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex, MutexGuard};
use tokio::runtime::Runtime;
use tokio::time::Instant;
#[derive(Clone)]
pub struct DocUpsert {
state: Arc<Mutex<State>>,
}
impl DocUpsert {
pub fn new(state: Arc<Mutex<State>>) -> Self {
Self { state }
}
}
impl Command for DocUpsert {
fn name(&self) -> &str {
"doc upsert"
}
fn signature(&self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest |
fn run_upsert(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals, .. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column {
id = v.as_string().ok();
}
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
}
}
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub(crate) fn build_batched_kv_items<T>(
batch_size: u32,
items: impl IntoIterator<Item = T>,
) -> Vec<Vec<T>> {
let mut all_items = vec![];
let mut these_items = vec![];
let mut i = 0;
for item in items.into_iter() {
these_items.push(item);
if i == batch_size {
all_items.push(these_items);
these_items = vec![];
i = 0;
continue;
}
i += 1;
}
all_items.push(these_items);
all_items
}
pub(crate) fn get_active_cluster_client_cid<'a>(
rt: &Runtime,
cluster: String,
guard: &'a MutexGuard<State>,
bucket: Option<String>,
scope: Option<String>,
collection: Option<String>,
ctrl_c: Arc<AtomicBool>,
span: Span,
) -> Result<(&'a RemoteCluster, Arc<KvClient>, u32), ShellError> {
let active_cluster = get_active_cluster(cluster, &guard, span)?;
let (bucket, scope, collection) =
namespace_from_args(bucket, scope, collection, active_cluster, span)?;
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let client = rt
.block_on(active_cluster.cluster().key_value_client(
bucket.clone(),
deadline,
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
let cid = rt
.block_on(client.get_cid(
scope,
collection,
Instant::now().add(active_cluster.timeouts().data_timeout()),
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
Ok((active_cluster, Arc::new(client), cid))
}
#[derive(Debug)]
pub struct MutationResult {
success: i32,
failed: i32,
fail_reasons: HashSet<String>,
cluster: String,
}
impl MutationResult {
pub fn new(cluster: String) -> Self {
Self {
success: 0,
failed: 0,
fail_reasons: Default::default(),
cluster,
}
}
pub fn success(mut self, success: i32) -> Self {
self.success = success;
self
}
pub fn failed(mut self, failed: i32) -> Self {
self.failed = failed;
self
}
pub fn fail_reasons(mut self, fail_reasons: HashSet<String>) -> Self {
self.fail_reasons = fail_reasons;
self
}
pub fn into_value(self, span: Span) -> Value {
let mut collected = NuValueMap::default();
collected.add_i64("processed", (self.success + self.failed) as i64, span);
collected.add_i64("success", self.success as i64, span);
collected.add_i64("failed", self.failed as i64, span);
let reasons = self
.fail_reasons
.into_iter()
.collect::<Vec<String>>()
.join(", ");
collected.add_string("failures", reasons, span);
collected.add_string("cluster", self.cluster, span);
collected.into_value(span)
}
}
| {
KeyValueRequest::Set { key, value, expiry }
} | identifier_body |
doc_upsert.rs | //! The `doc upsert` command performs a KV upsert operation.
use super::util::convert_nu_value_to_json_value;
use crate::cli::error::{client_error_to_shell_error, serialize_error};
use crate::cli::util::{
cluster_identifiers_from, get_active_cluster, namespace_from_args, NuValueMap,
};
use crate::client::{ClientError, KeyValueRequest, KvClient, KvResponse};
use crate::state::State;
use crate::RemoteCluster;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use nu_engine::CallExt;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, IntoPipelineData, PipelineData, ShellError, Signature, Span, SyntaxShape, Value,
};
use std::collections::HashSet;
use std::future::Future;
use std::ops::Add;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex, MutexGuard};
use tokio::runtime::Runtime;
use tokio::time::Instant;
#[derive(Clone)]
pub struct DocUpsert {
state: Arc<Mutex<State>>,
}
impl DocUpsert {
pub fn new(state: Arc<Mutex<State>>) -> Self {
Self { state }
}
}
impl Command for DocUpsert {
fn name(&self) -> &str {
"doc upsert"
}
fn signature(&self) -> Signature {
Signature::build("doc upsert")
.optional("id", SyntaxShape::String, "the document id")
.optional("content", SyntaxShape::Any, "the document content")
.named(
"id-column",
SyntaxShape::String,
"the name of the id column if used with an input stream",
None,
)
.named(
"bucket",
SyntaxShape::String,
"the name of the bucket",
None,
)
.named(
"content-column",
SyntaxShape::String,
"the name of the content column if used with an input stream",
None,
)
.named(
"expiry",
SyntaxShape::Number,
"the expiry for the documents in seconds, or absolute",
None,
)
.named("scope", SyntaxShape::String, "the name of the scope", None)
.named(
"collection",
SyntaxShape::String,
"the name of the collection",
None,
)
.named(
"databases",
SyntaxShape::String,
"the databases which should be contacted",
None,
)
.named(
"batch-size",
SyntaxShape::Number,
"the maximum number of items to batch send at a time",
None,
)
.switch("halt-on-error", "halt on any errors", Some('e'))
.category(Category::Custom("couchbase".to_string()))
}
fn usage(&self) -> &str {
"Upsert (insert or override) a document through the data service"
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
run_upsert(self.state.clone(), engine_state, stack, call, input)
}
}
fn build_req(key: String, value: Vec<u8>, expiry: u32) -> KeyValueRequest {
KeyValueRequest::Set { key, value, expiry }
}
fn run_upsert(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let results = run_kv_store_ops(state, engine_state, stack, call, input, build_req)?;
Ok(Value::List {
vals: results,
span: call.head,
}
.into_pipeline_data())
}
pub(crate) fn run_kv_store_ops(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let span = call.head;
let id_column = call
.get_flag(engine_state, stack, "id-column")?
.unwrap_or_else(|| String::from("id"));
let content_column = call
.get_flag(engine_state, stack, "content-column")?
.unwrap_or_else(|| String::from("content"));
let input_args = if let Some(id) = call.opt::<String>(engine_state, stack, 0)? {
if let Some(v) = call.opt::<Value>(engine_state, stack, 1)? {
let content = convert_nu_value_to_json_value(&v, span)?;
vec![(id, content)]
} else {
vec![]
}
} else {
vec![]
};
let filtered = input.into_iter().filter_map(move |i| {
let id_column = id_column.clone();
let content_column = content_column.clone();
if let Value::Record { cols, vals, .. } = i {
let mut id = None;
let mut content = None;
for (k, v) in cols.iter().zip(vals) {
if k.clone() == id_column |
if k.clone() == content_column {
content = convert_nu_value_to_json_value(&v, span).ok();
}
}
if let Some(i) = id {
if let Some(c) = content {
return Some((i, c));
}
}
}
None
});
let mut all_items = vec![];
for item in filtered.chain(input_args) {
let value =
serde_json::to_vec(&item.1).map_err(|e| serialize_error(e.to_string(), span))?;
all_items.push((item.0, value));
}
run_kv_mutations(
state,
engine_state,
stack,
call,
span,
all_items,
req_builder,
)
}
pub fn run_kv_mutations(
state: Arc<Mutex<State>>,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
span: Span,
all_items: Vec<(String, Vec<u8>)>,
req_builder: fn(String, Vec<u8>, u32) -> KeyValueRequest,
) -> Result<Vec<Value>, ShellError> {
let ctrl_c = engine_state.ctrlc.as_ref().unwrap().clone();
let expiry: i64 = call.get_flag(engine_state, stack, "expiry")?.unwrap_or(0);
let batch_size: Option<i64> = call.get_flag(engine_state, stack, "batch-size")?;
let bucket_flag = call.get_flag(engine_state, stack, "bucket")?;
let scope_flag = call.get_flag(engine_state, stack, "scope")?;
let collection_flag = call.get_flag(engine_state, stack, "collection")?;
let halt_on_error = call.has_flag("halt-on-error");
let cluster_identifiers = cluster_identifiers_from(engine_state, stack, &state, call, true)?;
let guard = state.lock().unwrap();
let mut all_values = vec![];
if let Some(size) = batch_size {
all_values = build_batched_kv_items(size as u32, all_items.clone());
}
let mut results = vec![];
for identifier in cluster_identifiers {
let rt = Runtime::new().unwrap();
let (active_cluster, client, cid) = match get_active_cluster_client_cid(
&rt,
identifier.clone(),
&guard,
bucket_flag.clone(),
scope_flag.clone(),
collection_flag.clone(),
ctrl_c.clone(),
span,
) {
Ok(c) => c,
Err(e) => {
if halt_on_error {
return Err(e);
}
let mut failures = HashSet::new();
failures.insert(e.to_string());
let collected = MutationResult::new(identifier.clone())
.fail_reasons(failures)
.into_value(call.head);
results.push(collected);
continue;
}
};
if all_values.is_empty() {
all_values = build_batched_kv_items(active_cluster.kv_batch_size(), all_items.clone());
}
let mut workers = FuturesUnordered::new();
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
for items in all_values.clone() {
for item in items.clone() {
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let ctrl_c = ctrl_c.clone();
let client = client.clone();
workers.push(async move {
client
.request(
req_builder(item.0, item.1, expiry as u32),
cid,
deadline,
ctrl_c,
)
.await
});
}
// process_kv_workers will handle creating an error for us if halt_on_error is set so we
// can just bubble it.
let worked = process_kv_workers(workers, &rt, halt_on_error, span)?;
success += worked.success;
failed += worked.failed;
fail_reasons.extend(worked.fail_reasons);
workers = FuturesUnordered::new()
}
let collected = MutationResult::new(identifier.clone())
.success(success)
.failed(failed)
.fail_reasons(fail_reasons);
results.push(collected.into_value(span));
}
Ok(results)
}
pub(crate) struct WorkerResponse {
pub(crate) success: i32,
pub(crate) failed: i32,
pub(crate) fail_reasons: HashSet<String>,
}
pub(crate) fn process_kv_workers(
mut workers: FuturesUnordered<impl Future<Output = Result<KvResponse, ClientError>>>,
rt: &Runtime,
halt_on_error: bool,
span: Span,
) -> Result<WorkerResponse, ShellError> {
let (success, failed, fail_reasons) = rt.block_on(async {
let mut success = 0;
let mut failed = 0;
let mut fail_reasons: HashSet<String> = HashSet::new();
while let Some(result) = workers.next().await {
match result {
Ok(_) => success += 1,
Err(e) => {
if halt_on_error {
return Err(client_error_to_shell_error(e, span));
}
failed += 1;
fail_reasons.insert(e.to_string());
}
}
}
Ok((success, failed, fail_reasons))
})?;
Ok(WorkerResponse {
success,
failed,
fail_reasons,
})
}
pub(crate) fn build_batched_kv_items<T>(
batch_size: u32,
items: impl IntoIterator<Item = T>,
) -> Vec<Vec<T>> {
let mut all_items = vec![];
let mut these_items = vec![];
let mut i = 0;
for item in items.into_iter() {
these_items.push(item);
if i == batch_size {
all_items.push(these_items);
these_items = vec![];
i = 0;
continue;
}
i += 1;
}
all_items.push(these_items);
all_items
}
pub(crate) fn get_active_cluster_client_cid<'a>(
rt: &Runtime,
cluster: String,
guard: &'a MutexGuard<State>,
bucket: Option<String>,
scope: Option<String>,
collection: Option<String>,
ctrl_c: Arc<AtomicBool>,
span: Span,
) -> Result<(&'a RemoteCluster, Arc<KvClient>, u32), ShellError> {
let active_cluster = get_active_cluster(cluster, &guard, span)?;
let (bucket, scope, collection) =
namespace_from_args(bucket, scope, collection, active_cluster, span)?;
let deadline = Instant::now().add(active_cluster.timeouts().data_timeout());
let client = rt
.block_on(active_cluster.cluster().key_value_client(
bucket.clone(),
deadline,
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
let cid = rt
.block_on(client.get_cid(
scope,
collection,
Instant::now().add(active_cluster.timeouts().data_timeout()),
ctrl_c.clone(),
))
.map_err(|e| client_error_to_shell_error(e, span))?;
Ok((active_cluster, Arc::new(client), cid))
}
#[derive(Debug)]
pub struct MutationResult {
success: i32,
failed: i32,
fail_reasons: HashSet<String>,
cluster: String,
}
impl MutationResult {
pub fn new(cluster: String) -> Self {
Self {
success: 0,
failed: 0,
fail_reasons: Default::default(),
cluster,
}
}
pub fn success(mut self, success: i32) -> Self {
self.success = success;
self
}
pub fn failed(mut self, failed: i32) -> Self {
self.failed = failed;
self
}
pub fn fail_reasons(mut self, fail_reasons: HashSet<String>) -> Self {
self.fail_reasons = fail_reasons;
self
}
pub fn into_value(self, span: Span) -> Value {
let mut collected = NuValueMap::default();
collected.add_i64("processed", (self.success + self.failed) as i64, span);
collected.add_i64("success", self.success as i64, span);
collected.add_i64("failed", self.failed as i64, span);
let reasons = self
.fail_reasons
.into_iter()
.collect::<Vec<String>>()
.join(", ");
collected.add_string("failures", reasons, span);
collected.add_string("cluster", self.cluster, span);
collected.into_value(span)
}
}
| {
id = v.as_string().ok();
} | conditional_block |
pipeline.fromIlastik.py | #!/usr/bin/env python
####################################################################################################
# Load the necessary libraries
###################################################################################################
import networkx as nx
import numpy as np
from scipy import sparse, linalg
from sklearn.preprocessing import normalize
from graviti import * # the local module
import sys, getopt
import os
import copy
import seaborn as sns; sns.set()
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
####################################################################################################
# Define the input parameters
####################################################################################################
try:
opts, args = getopt.getopt(sys.argv[1:], "i:s:n:a:p:e:c:m:t:",
["input","seed=","nn=","area=","perimeter=",
"eccentricity=","circularity=","meanIntensity=","totalIntensity=","pos="])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
print(opts)
usecols = ()
for o, a in opts:
if o in ("-i", "--input"):
filename = a # morphology measurements file
elif o in ("-s", "--seed"):
rndsample = int(a) # Leiden seed
elif o in ("-n", "--nn"):
nn = int(a) # Leiden seed
elif o in ("-a", "--area"):
if a == '1': usecols = usecols+(3,)
elif o in ("-p", "--perimeter"):
if a == '1': usecols = usecols+(4,)
elif o in ("-e", "--eccentricity"):
if a == '1': usecols = usecols+(5,)
elif o in ("-c", "--circularity"):
if a == '1': usecols = usecols+(6,)
elif o in ("-m", "--meanIntensity"):
if a == '1': usecols = usecols+(7,)
elif o in ("-t", "--totalIntensity"):
if a == '1': usecols = usecols+(8,)
elif o in ("--pos"):
if a == '1':
position = True
else:
position = False
else:
assert False, "unhandled option"
####################################################################################################
# Define basic filenames
# !!! have the base name dependent on the parameters !!!
####################################################################################################
basename_graph = os.path.splitext(os.path.basename(filename))[0]
if os.path.splitext(os.path.basename(filename))[1] == '.gz':
basename = os.path.splitext(os.path.splitext(os.path.basename(filename))[0])[0]+'.s'+str(rndsample)
dirname = os.path.dirname(filename)
####################################################################################################
# Construct the UMAP graph
# and save the adjacency matrix
# and the degree and clustering coefficient vectors
###################################################################################################
print('Prepare the topological graph ...')
path = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.adj.npz'
if os.path.exists(path) and os.path.exists( os.path.join(dirname, basename_graph) + ".graph.pickle" ):
print('The graph exists already and I am now loading it...')
A = sparse.load_npz(path)
pos = np.loadtxt(filename, delimiter="\t",skiprows=True,usecols=(1,2)) # chose x and y and do not consider header
G = nx.read_gpickle(os.path.join(dirname, basename_graph) + ".graph.pickle")
d = getdegree(G)
cc = clusteringCoeff(A)
else:
print('The graph does not exists yet and I am going to create one...')
pos = np.loadtxt(filename, delimiter="\t",skiprows=True,usecols=(1,2))
A = space2graph(pos,nn) # create the topology graph
sparse.save_npz(path, A)
G = nx.from_scipy_sparse_matrix(A, edge_attribute='weight')
d = getdegree(G)
cc = clusteringCoeff(A)
outfile = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.degree.gz'
np.savetxt(outfile, d)
outfile = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.cc.gz'
np.savetxt(outfile, cc)
nx.write_gpickle(G, os.path.join(dirname, basename_graph) + ".graph.pickle")
pos2norm = np.linalg.norm(pos,axis=1).reshape((pos.shape[0],1)) # the length of the position vector |
print('Topological graph ready!')
print('...the graph has '+str(A.shape[0])+' nodes')
####################################################################################################
# Select the morphological features,
# and set the min number of nodes per subgraph
# Features list:
# fov_name x_centroid y_centroid area perimeter eccentricity circularity mean_intensity total_intensity
# !!!optimize the threshold!!!
###################################################################################################
print('Prepare the morphology array')
print('...the features tuple that we consider is: ',str(usecols))
morphology = np.loadtxt(filename, delimiter="\t", skiprows=True, usecols=usecols).reshape((A.shape[0],len(usecols)))
threshold = (morphology.shape[1]+4)*2 # set the min subgraph size based on the dim of the feature matrix
morphologies = morphology.shape[1] # number of morphological features
####################################################################################################
# Weight the graph taking into account topology and morphology
# the new weight is the ratio topological_similarity/(1+morpho_dissimilarity)
# !!!need to be optimized!!!
###################################################################################################
print('Rescale graph weights by local morphology')
morphology_normed = normalize(morphology, norm='l1', axis=0) # normalize features
GG = copy.deepcopy(G) # create a topology+morphology new graph
for ijw in G.edges(data='weight'): # loop over edges
feature = np.asarray([ abs(morphology_normed[ijw[0],f]-morphology_normed[ijw[1],f]) for f in range(morphologies) ])
GG[ijw[0]][ijw[1]]['weight'] = ijw[2]/(1.0+np.sum(feature))
####################################################################################################
# Community detection in the topology+morphology graph
# !!! find a way to avoid writing the edge list on disk !!!
###################################################################################################
print('Find the communities in GG')
from cdlib import algorithms
from cdlib import evaluation
from cdlib.utils import convert_graph_formats
import igraph
import leidenalg
from networkx.algorithms.community.quality import modularity
print('...generate connected components as subgraphs...')
graphs = list(nx.connected_component_subgraphs(GG)) # list the connected components
print('...convert networkx graph to igraph object...')
communities = []
for graph in graphs:
nx.write_weighted_edgelist(graph, basename+".edgelist.txt") # write the edge list on disk
g = igraph.Graph.Read_Ncol(basename+".edgelist.txt", names=True, weights="if_present", directed=False) # define the igraph obj
os.remove(basename+".edgelist.txt") # delete the edge list
part = leidenalg.find_partition(g,
leidenalg.ModularityVertexPartition,
initial_membership=None,
weights='weight',
seed=rndsample,
n_iterations=2) # find partitions
communities.extend([g.vs[x]['name'] for x in part]) # create a list of communities
bigcommunities = [g for g in communities if len(g) > threshold] # list of big enough communities
outfile = os.path.join(dirname, basename)+'.bigcommunities'
np.save(outfile, bigcommunities) # store the big communities
print('There are '+str(len(bigcommunities))+' big communities and '+str(len(communities))+' communities in total')
####################################################################################################
# Generate the covariance descriptors of the topology graph
# !!! insert a switch for the position !!!
###################################################################################################
print('Generate the covariance descriptor')
if position:
print('...the position information is included')
features = np.hstack((pos2norm,morphology)) # this is rotational invariant
else:
print('...the position information is not included')
features = morphology # this is without positions
outfile_covd = os.path.join(dirname, basename)+'.covd.npy' # name of the covd file
if os.path.exists(outfile_covd):
print('... loading the descriptors ...')
covdata = np.load(outfile_covd,allow_pickle=True) # load covd data
else:
print('... creating the descriptors ...')
covdata = community_covd(features,G,bigcommunities) # get list of cov matrices and a list of nodes per matrix
np.save(outfile_covd,covdata) # store covd data
print('There are '+str(len(covdata))+' covariance descriptors ')
####################################################################################################
# Cluster the covariance descriptors
###################################################################################################
print('Clustering the descriptors')
import umap
import hdbscan
import sklearn.cluster as cluster
from sklearn.cluster import OPTICS
from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import SpectralClustering
print('...prepare the data...')
outfile_logvec = os.path.join(dirname, basename)+'.logvec.npy'
if os.path.exists(outfile_logvec):
print('...load the logvec dataset...')
X = np.load(outfile_logvec,allow_pickle=True)
else:
print('...create the logvec dataset...')
logvec = [ linalg.logm(m).reshape((1,m.shape[0]*m.shape[1])) for m in covdata] #calculate the logm and vectorize
X = np.vstack(logvec) #create the array of vectorized covd data
np.save(outfile_logvec,X)
print('The vectorized covd array has shape '+str(X.shape))
outfile_clusterable_embedding = os.path.join(dirname, basename)+'.clusterable_embedding.npy'
if os.path.exists(outfile_clusterable_embedding):
print('...load the clusterable embedding...')
clusterable_embedding = np.load(outfile_clusterable_embedding,allow_pickle=True)
else:
print('...create the clusterable embedding...')
clusterable_embedding = umap.UMAP(min_dist=0.0,n_components=3,random_state=42).fit_transform(X) # this is used to identify clusters
np.save(outfile_clusterable_embedding,clusterable_embedding)
print('The embedding has shape '+str(clusterable_embedding.shape))
# ####################################################################################################
# # Free up spaces
# ###################################################################################################
# del G # G is not needed anymore
# del A # A is not needed anymore
# del morphology
# ####################################################################################################
# # Color graph nodes by community label
# ###################################################################################################
# print('Preparing to color the graph communities')
# print('...set up the empty graph...')
# g = nx.Graph()
# g.add_nodes_from(range(pos.shape[0])) # add all the nodes of the graph, but not all of them are in a covd cluster because of small communities
# print('...set up the empty dictionary...')
# dictionary = {}
# for node in range(pos.shape[0]):
# dictionary[int(node)] = -1 # set all node to -1
# print('...set up the full dictionary...')
# node_comm_tuples = [(int(node),i) for i, community in enumerate(bigcommunities) for node in community]
# dictionary.update(dict(node_comm_tuples))
# node_color = []
# for i in sorted (dictionary) : # determine the color based on the community
# node_color.append(dictionary[i])
# print('...draw the graph...')
# sns.set(style='white', rc={'figure.figsize':(50,50)})
# nx.draw_networkx_nodes(g, pos, alpha=0.5,node_color=node_color, node_size=1,cmap=plt.cm.Set1)
# print('...saving graph...')
# plt.axis('off')
# plt.savefig(os.path.join(dirname, basename)+'.community_graph.png') # save as png
# plt.close() | random_line_split |
|
pipeline.fromIlastik.py | #!/usr/bin/env python
####################################################################################################
# Load the necessary libraries
###################################################################################################
import networkx as nx
import numpy as np
from scipy import sparse, linalg
from sklearn.preprocessing import normalize
from graviti import * # the local module
import sys, getopt
import os
import copy
import seaborn as sns; sns.set()
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
####################################################################################################
# Define the input parameters
####################################################################################################
try:
opts, args = getopt.getopt(sys.argv[1:], "i:s:n:a:p:e:c:m:t:",
["input","seed=","nn=","area=","perimeter=",
"eccentricity=","circularity=","meanIntensity=","totalIntensity=","pos="])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
print(opts)
usecols = ()
for o, a in opts:
if o in ("-i", "--input"):
filename = a # morphology measurements file
elif o in ("-s", "--seed"):
rndsample = int(a) # Leiden seed
elif o in ("-n", "--nn"):
nn = int(a) # Leiden seed
elif o in ("-a", "--area"):
if a == '1': usecols = usecols+(3,)
elif o in ("-p", "--perimeter"):
if a == '1': usecols = usecols+(4,)
elif o in ("-e", "--eccentricity"):
if a == '1': usecols = usecols+(5,)
elif o in ("-c", "--circularity"):
if a == '1': usecols = usecols+(6,)
elif o in ("-m", "--meanIntensity"):
if a == '1': usecols = usecols+(7,)
elif o in ("-t", "--totalIntensity"):
if a == '1': usecols = usecols+(8,)
elif o in ("--pos"):
if a == '1':
position = True
else:
position = False
else:
assert False, "unhandled option"
####################################################################################################
# Define basic filenames
# !!! have the base name dependent on the parameters !!!
####################################################################################################
basename_graph = os.path.splitext(os.path.basename(filename))[0]
if os.path.splitext(os.path.basename(filename))[1] == '.gz':
basename = os.path.splitext(os.path.splitext(os.path.basename(filename))[0])[0]+'.s'+str(rndsample)
dirname = os.path.dirname(filename)
####################################################################################################
# Construct the UMAP graph
# and save the adjacency matrix
# and the degree and clustering coefficient vectors
###################################################################################################
print('Prepare the topological graph ...')
path = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.adj.npz'
if os.path.exists(path) and os.path.exists( os.path.join(dirname, basename_graph) + ".graph.pickle" ):
|
else:
print('The graph does not exists yet and I am going to create one...')
pos = np.loadtxt(filename, delimiter="\t",skiprows=True,usecols=(1,2))
A = space2graph(pos,nn) # create the topology graph
sparse.save_npz(path, A)
G = nx.from_scipy_sparse_matrix(A, edge_attribute='weight')
d = getdegree(G)
cc = clusteringCoeff(A)
outfile = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.degree.gz'
np.savetxt(outfile, d)
outfile = os.path.join(dirname, basename_graph)+'.nn'+str(nn)+'.cc.gz'
np.savetxt(outfile, cc)
nx.write_gpickle(G, os.path.join(dirname, basename_graph) + ".graph.pickle")
pos2norm = np.linalg.norm(pos,axis=1).reshape((pos.shape[0],1)) # the length of the position vector
print('Topological graph ready!')
print('...the graph has '+str(A.shape[0])+' nodes')
####################################################################################################
# Select the morphological features,
# and set the min number of nodes per subgraph
# Features list:
# fov_name x_centroid y_centroid area perimeter eccentricity circularity mean_intensity total_intensity
# !!!optimize the threshold!!!
###################################################################################################
print('Prepare the morphology array')
print('...the features tuple that we consider is: ',str(usecols))
morphology = np.loadtxt(filename, delimiter="\t", skiprows=True, usecols=usecols).reshape((A.shape[0],len(usecols)))
threshold = (morphology.shape[1]+4)*2 # set the min subgraph size based on the dim of the feature matrix
morphologies = morphology.shape[1] # number of morphological features
####################################################################################################
# Weight the graph taking into account topology and morphology
# the new weight is the ratio topological_similarity/(1+morpho_dissimilarity)
# !!!need to be optimized!!!
###################################################################################################
print('Rescale graph weights by local morphology')
morphology_normed = normalize(morphology, norm='l1', axis=0) # normalize features
GG = copy.deepcopy(G) # create a topology+morphology new graph
for ijw in G.edges(data='weight'): # loop over edges
feature = np.asarray([ abs(morphology_normed[ijw[0],f]-morphology_normed[ijw[1],f]) for f in range(morphologies) ])
GG[ijw[0]][ijw[1]]['weight'] = ijw[2]/(1.0+np.sum(feature))
####################################################################################################
# Community detection in the topology+morphology graph
# !!! find a way to avoid writing the edge list on disk !!!
###################################################################################################
print('Find the communities in GG')
from cdlib import algorithms
from cdlib import evaluation
from cdlib.utils import convert_graph_formats
import igraph
import leidenalg
from networkx.algorithms.community.quality import modularity
print('...generate connected components as subgraphs...')
graphs = list(nx.connected_component_subgraphs(GG)) # list the connected components
print('...convert networkx graph to igraph object...')
communities = []
for graph in graphs:
nx.write_weighted_edgelist(graph, basename+".edgelist.txt") # write the edge list on disk
g = igraph.Graph.Read_Ncol(basename+".edgelist.txt", names=True, weights="if_present", directed=False) # define the igraph obj
os.remove(basename+".edgelist.txt") # delete the edge list
part = leidenalg.find_partition(g,
leidenalg.ModularityVertexPartition,
initial_membership=None,
weights='weight',
seed=rndsample,
n_iterations=2) # find partitions
communities.extend([g.vs[x]['name'] for x in part]) # create a list of communities
bigcommunities = [g for g in communities if len(g) > threshold] # list of big enough communities
outfile = os.path.join(dirname, basename)+'.bigcommunities'
np.save(outfile, bigcommunities) # store the big communities
print('There are '+str(len(bigcommunities))+' big communities and '+str(len(communities))+' communities in total')
####################################################################################################
# Generate the covariance descriptors of the topology graph
# !!! insert a switch for the position !!!
###################################################################################################
print('Generate the covariance descriptor')
if position:
print('...the position information is included')
features = np.hstack((pos2norm,morphology)) # this is rotational invariant
else:
print('...the position information is not included')
features = morphology # this is without positions
outfile_covd = os.path.join(dirname, basename)+'.covd.npy' # name of the covd file
if os.path.exists(outfile_covd):
print('... loading the descriptors ...')
covdata = np.load(outfile_covd,allow_pickle=True) # load covd data
else:
print('... creating the descriptors ...')
covdata = community_covd(features,G,bigcommunities) # get list of cov matrices and a list of nodes per matrix
np.save(outfile_covd,covdata) # store covd data
print('There are '+str(len(covdata))+' covariance descriptors ')
####################################################################################################
# Cluster the covariance descriptors
###################################################################################################
print('Clustering the descriptors')
import umap
import hdbscan
import sklearn.cluster as cluster
from sklearn.cluster import OPTICS
from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import SpectralClustering
print('...prepare the data...')
outfile_logvec = os.path.join(dirname, basename)+'.logvec.npy'
if os.path.exists(outfile_logvec):
print('...load the logvec dataset...')
X = np.load(outfile_logvec,allow_pickle=True)
else:
print('...create the logvec dataset...')
logvec = [ linalg.logm(m).reshape((1,m.shape[0]*m.shape[1])) for m in covdata] #calculate the logm and vectorize
X = np.vstack(logvec) #create the array of vectorized covd data
np.save(outfile_logvec,X)
print('The vectorized covd array has shape '+str(X.shape))
outfile_clusterable_embedding = os.path.join(dirname, basename)+'.clusterable_embedding.npy'
if os.path.exists(outfile_clusterable_embedding):
print('...load the clusterable embedding...')
clusterable_embedding = np.load(outfile_clusterable_embedding,allow_pickle=True)
else:
print('...create the clusterable embedding...')
clusterable_embedding = umap.UMAP(min_dist=0.0,n_components=3,random_state=42).fit_transform(X) # this is used to identify clusters
np.save(outfile_clusterable_embedding,clusterable_embedding)
print('The embedding has shape '+str(clusterable_embedding.shape))
# ####################################################################################################
# # Free up spaces
# ###################################################################################################
# del G # G is not needed anymore
# del A # A is not needed anymore
# del morphology
# ####################################################################################################
# # Color graph nodes by community label
# ###################################################################################################
# print('Preparing to color the graph communities')
# print('...set up the empty graph...')
# g = nx.Graph()
# g.add_nodes_from(range(pos.shape[0])) # add all the nodes of the graph, but not all of them are in a covd cluster because of small communities
# print('...set up the empty dictionary...')
# dictionary = {}
# for node in range(pos.shape[0]):
# dictionary[int(node)] = -1 # set all node to -1
# print('...set up the full dictionary...')
# node_comm_tuples = [(int(node),i) for i, community in enumerate(bigcommunities) for node in community]
# dictionary.update(dict(node_comm_tuples))
# node_color = []
# for i in sorted (dictionary) : # determine the color based on the community
# node_color.append(dictionary[i])
# print('...draw the graph...')
# sns.set(style='white', rc={'figure.figsize':(50,50)})
# nx.draw_networkx_nodes(g, pos, alpha=0.5,node_color=node_color, node_size=1,cmap=plt.cm.Set1)
# print('...saving graph...')
# plt.axis('off')
# plt.savefig(os.path.join(dirname, basename)+'.community_graph.png') # save as png
# plt.close()
| print('The graph exists already and I am now loading it...')
A = sparse.load_npz(path)
pos = np.loadtxt(filename, delimiter="\t",skiprows=True,usecols=(1,2)) # chose x and y and do not consider header
G = nx.read_gpickle(os.path.join(dirname, basename_graph) + ".graph.pickle")
d = getdegree(G)
cc = clusteringCoeff(A) | conditional_block |
save_hist.py | #!/usr/bin/env python
###########################################################################
# Replacement for save_data.py takes a collection of hdf5 files, and #
# builds desired histograms for rapid plotting #
###########################################################################
import numpy as np
import healpy as hp
import argparse, tables
from icecube import astro
import dataFunctions as df
from showerllh.analysis.skypos import getDecRA
from showerllh.analysis.llhtools import inPoly, getEbins
from showerllh.analysis.zfix import zfix
def hdf5extractor(config, file):
##=======================================================================
## Starting parameters
rDict = {'proton':'p','helium':'h','oxygen':'o','iron':'f'}
t1 = astro.Time()
print 'Building arrays from %s...' % file
t = tables.openFile(file)
q = {}
# Get reconstructed compositions from list of children in file
children = []
for node in t.walk_nodes('/'):
try: children += [node.name]
except tables.NoSuchNodeError:
continue
children = list(set(children))
compList = [n.split('_')[-1] for n in children if 'ShowerLLH_' in n]
# Get ShowerLLH cuts and info
rrc = t.root.ShowerLLH_proton.col('exists').astype('bool')
for value in ['zenith', 'azimuth']:
q[value] = t.root.ShowerLLH_proton.col(value)
for comp in compList:
r = rDict[comp]
for value in ['x','y','energy']:
q[r+'ML_'+value] = t.getNode('/ShowerLLH_'+comp).col(value)
q[r+'LLH'] = t.getNode('/ShowerLLHParams_'+comp).col('maxLLH')
# Timing
mjd_day = t.root.I3EventHeader.col('time_start_mjd_day')
mjd_sec = t.root.I3EventHeader.col('time_start_mjd_sec')
mjd_ns = t.root.I3EventHeader.col('time_start_mjd_ns')
q['mjd'] = np.zeros(len(mjd_day), dtype=np.float64)
for i in range(len(mjd_day)):
day = int(mjd_day[i])
sec = int(mjd_sec[i])
ns = int(mjd_ns[i])
t1.SetTime(day, sec, ns)
q['mjd'][i] = t1.GetMJD()
# Event ID
run = t.root.I3EventHeader.col('Run')
event = t.root.I3EventHeader.col('Event')
subevent = t.root.I3EventHeader.col('SubEvent')
eventIDs = []
for i in range(len(run)):
eventIDs += ['%s_%s_%s' % (run[i], event[i], subevent[i])]
q['eventIDs'] = np.asarray(eventIDs)
# Condition and prescale passed (filter[condition, prescale])
# For notes on weights see bottom of file
filtermask = df.filter_mask(config)
filternames = df.filter_names(config)
f = {}
for fname in filternames:
f[fname] = t.getNode('/'+filtermask).col(fname)
f[fname] = f[fname][:,0].astype(float)
filterArray = np.array([f[fname] * df.it_weights(fname)
for fname in f.keys()])
filterArray[filterArray == 0] = 100.
q['weights'] = np.amin(filterArray, axis=0)
# Other reconstruction info
q['NStations'] = t.root.NStations.col('value')
t.close()
# Laputop values
#for key in ['x','y','zenith','azimuth','s125','e_proton','e_iron','beta']:
# arrays += ['lap_'+key]
# Get Laputop info
#for value in ['x', 'y', 'zenith', 'azimuth']:
# q['lap_'+value] = t.root.Laputop.col(value)
#for value in ['s125', 'e_proton', 'e_iron', 'beta']:
# q['lap_'+value] = t.root.LaputopParams.col(value)
# Get most likely composition
rList = [rDict[comp] for comp in compList]
full_llhs = np.array([q[r+'LLH'] for r in rList])
max_llh = np.amax(full_llhs, axis=0)
q['llh_comp'] = np.array(['' for i in range(len(q['pLLH']))])
for r in rList:
q['llh_comp'][q[r+'LLH'] == max_llh] = r
for key in ['x', 'y', 'energy']:
q['ML_'+key] = np.array([q[r+'ML_'+key][i]
for i, r in enumerate(q['llh_comp'])])
# Check for multiple most-likely compositions (mark as bad)
badVals = np.sum(full_llhs == max_llh, axis=0)
badVals = (badVals-1).astype('bool')
q['llh_comp'][badVals] = ''
for key in ['x','y','energy']:
q['ML_'+key][badVals] = np.nan
# Calculate sky positions
q['dec'], q['ra'] = getDecRA(q, verbose=False)
# Containment cut
it_geo = df.it_geo(config)
q['cuts'] = {}
q['cuts']['llh'] = inPoly(q['ML_x'], q['ML_y'], 0, config=it_geo)
return q
def histWriter(config, file, outfile):
# Bin values
eList = ['p','h','o','f']
decbins = ['0-12','12-24','24-40']
rabins = ['0-60','60-120','120-180','180-240','240-300','300-360']
# Build general list of key names to write
keyList = []
keyList += ['energy','energy_w','energy_z','energy_w_z']
keyList += ['zenith','zenith_w','core','core_w']
keyList += ['%s_err' % k for k in keyList]
# Split by composition
keyList = ['%s_%s' % (k, e) for k in keyList for e in eList]
# Split spatially into 3 dec bins (~12 degrees each) and 6 ra bins
keyList = ['%s_%s_%s' % (k, dec, ra) for k in keyList \
for dec in decbins for ra in rabins]
# Extract information from hdf5 file
q = hdf5extractor(config, file)
c0 = q['cuts']['llh']
r = np.log10(q['ML_energy'])[c0]
cosz = np.cos(q['zenith'])[c0]
dist = np.sqrt(q['ML_x']**2 + q['ML_y']**2)[c0]
fit = zfix(q['zenith'], bintype='logdist')[c0]
w = q['weights'][c0]
# Make cuts
degree = np.pi / 180.
for e in eList:
q[e] = (q['llh_comp'] == e)[c0]
for dec in decbins:
decmin = (180 - float(dec.split('-')[1])) * degree
decmax = (180 - float(dec.split('-')[0])) * degree
q[dec] = ((q['dec'] >= decmin) * (q['dec'] < decmax))[c0]
for ra in rabins:
ramin = float(ra.split('-')[0]) * degree
ramax = float(ra.split('-')[1]) * degree
q[ra] = ((q['ra'] >= ramin) * (q['ra'] < ramax))[c0]
# Method of intelligently producing histograms based on key names
def smartHist(key, x, bins):
tempx = x
wts = None
params = key.split('_')
e, dec, ra = params[-3:]
c1 = q[e] * q[dec] * q[ra]
if 'z' in params:
tempx = x - fit
if 'w' in params:
wts = w[c1]
if 'err' in params:
wts = (w[c1])**2
h0 = np.histogram(tempx[c1], bins=bins, weights=wts)[0]
return h0
# Energy distribution
h = {}
print 'Calculating energy distributions...'
bins = getEbins(reco=True)
energyKeys = [k for k in keyList if 'energy' in k]
for key in energyKeys:
h[key] = smartHist(key, r, bins)
# Zenith distribution
print 'Calculating zenith distributions...'
bins = np.linspace(0.8, 1, 81)
zenithKeys = [k for k in keyList if 'zenith' in k]
for key in zenithKeys:
h[key] = smartHist(key, cosz, bins)
# Core distribution
print 'Calculating core position distributions...'
bins = np.linspace(0, 700, 141)
coreKeys = [k for k in keyList if 'core' in k]
for key in coreKeys:
h[key] = smartHist(key, dist, bins)
print 'Saving...'
np.save(outfile, h)
def | (config, file, outfile):
nside = 64
npix = hp.nside2npix(nside)
# Binning for various parameters
sbins = np.arange(npix+1, dtype=int)
ebins = np.arange(5, 9.501, 0.05)
dbins = np.linspace(0, 700, 141)
lbins = np.linspace(-20, 20, 151)
# Get desired information from hdf5 file
d = hdf5extractor(config, file)
c0 = d['cuts']['llh']
r = np.log10(d['ML_energy'])[c0]
fit = zfix(d['zenith'], bintype='logdist')[c0]
w = d['weights'][c0]
xy = np.sqrt(d['ML_x']**2 + d['ML_y']**2)[c0]
dllh = (d['fLLH'] - d['pLLH'])[c0]
# Bin in sky
#zen = np.pi - d['zenith'][c0]
#azi = d['azimuth'][c0]
dec = d['dec'][c0]
ra = d['ra'][c0]
x = hp.ang2pix(nside, dec, ra)
# Energy cut
ecut = (r >= 6.2)
p = {'weights':w}
q = {}
q['energy_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
p['weights'] = w**2
q['energy_err_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_err_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_err_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
# Energy cut versions
q['llhcut_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins),
weights=w[ecut])[0]
q['llhcut_err_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins),
weights=(w[ecut])**2)[0]
q['energy'] = np.histogram2d(x, r-fit, bins=(sbins,ebins))[0]
q['dist'] = np.histogram2d(x, xy, bins=(sbins,dbins))[0]
q['llh'] = np.histogram2d(x, dllh, bins=(sbins,lbins))[0]
q['energy_err'] = np.histogram2d(x, r-fit, bins=(sbins,ebins))[0]
q['dist_err'] = np.histogram2d(x, xy, bins=(sbins,dbins))[0]
q['llh_err'] = np.histogram2d(x, dllh, bins=(sbins,lbins))[0]
# Energy cut versions
q['llhcut'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins))[0]
q['llhcut_err'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins))[0]
np.save(outfile, q)
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Converts hdf5 file to npy dict')
p.add_argument('-c', '--config', dest='config',
help='Detector configuration [IT73 --> IT81-IV]')
p.add_argument('-f', '--files', dest='files', nargs='*',
help='Input files')
p.add_argument('-o', '--outfiles', dest='outfiles', nargs='*',
help='Output files')
p.add_argument('--sky', dest='sky',
default=False, action='store_true',
help='Write the sky histograms')
args = p.parse_args()
for infile, outfile in zip(args.files, args.outfiles):
if args.sky:
skyWriter(args.config, infile, outfile)
else:
histWriter(args.config, infile, outfile)
###############################################################################
## Notes on weights
"""
- Events that pass STA8 condition have a prescale and weight of 1.
- Events that pass STA3ii condition have a 1/2 chance to pass the STA3ii
prescale. Those that fail have a 1/3 chance to pass the STA3 prescale. So, the
total chance of an event passing is 1/2+(1/3*1/2) = 2/3. Weight = 1.5
- Events that pass STA3 condition but not STA3ii condition have a prescale and
weight of 3
"""
| skyWriter | identifier_name |
save_hist.py | #!/usr/bin/env python
###########################################################################
# Replacement for save_data.py takes a collection of hdf5 files, and #
# builds desired histograms for rapid plotting #
###########################################################################
import numpy as np
import healpy as hp
import argparse, tables
from icecube import astro
import dataFunctions as df
from showerllh.analysis.skypos import getDecRA
from showerllh.analysis.llhtools import inPoly, getEbins
from showerllh.analysis.zfix import zfix
def hdf5extractor(config, file):
##=======================================================================
## Starting parameters
rDict = {'proton':'p','helium':'h','oxygen':'o','iron':'f'}
t1 = astro.Time()
print 'Building arrays from %s...' % file
t = tables.openFile(file)
q = {}
# Get reconstructed compositions from list of children in file
children = []
for node in t.walk_nodes('/'):
try: children += [node.name]
except tables.NoSuchNodeError:
continue
children = list(set(children))
compList = [n.split('_')[-1] for n in children if 'ShowerLLH_' in n]
# Get ShowerLLH cuts and info
rrc = t.root.ShowerLLH_proton.col('exists').astype('bool')
for value in ['zenith', 'azimuth']:
q[value] = t.root.ShowerLLH_proton.col(value)
for comp in compList:
r = rDict[comp]
for value in ['x','y','energy']:
q[r+'ML_'+value] = t.getNode('/ShowerLLH_'+comp).col(value)
q[r+'LLH'] = t.getNode('/ShowerLLHParams_'+comp).col('maxLLH')
# Timing
mjd_day = t.root.I3EventHeader.col('time_start_mjd_day')
mjd_sec = t.root.I3EventHeader.col('time_start_mjd_sec')
mjd_ns = t.root.I3EventHeader.col('time_start_mjd_ns')
q['mjd'] = np.zeros(len(mjd_day), dtype=np.float64)
for i in range(len(mjd_day)):
day = int(mjd_day[i])
sec = int(mjd_sec[i])
ns = int(mjd_ns[i])
t1.SetTime(day, sec, ns)
q['mjd'][i] = t1.GetMJD()
# Event ID
run = t.root.I3EventHeader.col('Run')
event = t.root.I3EventHeader.col('Event')
subevent = t.root.I3EventHeader.col('SubEvent')
eventIDs = []
for i in range(len(run)):
eventIDs += ['%s_%s_%s' % (run[i], event[i], subevent[i])]
q['eventIDs'] = np.asarray(eventIDs)
# Condition and prescale passed (filter[condition, prescale])
# For notes on weights see bottom of file
filtermask = df.filter_mask(config)
filternames = df.filter_names(config)
f = {}
for fname in filternames:
f[fname] = t.getNode('/'+filtermask).col(fname)
f[fname] = f[fname][:,0].astype(float)
filterArray = np.array([f[fname] * df.it_weights(fname)
for fname in f.keys()])
filterArray[filterArray == 0] = 100.
q['weights'] = np.amin(filterArray, axis=0)
# Other reconstruction info
q['NStations'] = t.root.NStations.col('value')
t.close()
# Laputop values
#for key in ['x','y','zenith','azimuth','s125','e_proton','e_iron','beta']:
# arrays += ['lap_'+key]
# Get Laputop info
#for value in ['x', 'y', 'zenith', 'azimuth']:
# q['lap_'+value] = t.root.Laputop.col(value)
#for value in ['s125', 'e_proton', 'e_iron', 'beta']:
# q['lap_'+value] = t.root.LaputopParams.col(value)
# Get most likely composition
rList = [rDict[comp] for comp in compList]
full_llhs = np.array([q[r+'LLH'] for r in rList])
max_llh = np.amax(full_llhs, axis=0)
q['llh_comp'] = np.array(['' for i in range(len(q['pLLH']))])
for r in rList:
q['llh_comp'][q[r+'LLH'] == max_llh] = r
for key in ['x', 'y', 'energy']:
q['ML_'+key] = np.array([q[r+'ML_'+key][i]
for i, r in enumerate(q['llh_comp'])])
# Check for multiple most-likely compositions (mark as bad)
badVals = np.sum(full_llhs == max_llh, axis=0)
badVals = (badVals-1).astype('bool')
q['llh_comp'][badVals] = ''
for key in ['x','y','energy']:
q['ML_'+key][badVals] = np.nan
# Calculate sky positions
q['dec'], q['ra'] = getDecRA(q, verbose=False)
# Containment cut
it_geo = df.it_geo(config)
q['cuts'] = {}
q['cuts']['llh'] = inPoly(q['ML_x'], q['ML_y'], 0, config=it_geo)
return q
def histWriter(config, file, outfile):
# Bin values
eList = ['p','h','o','f']
decbins = ['0-12','12-24','24-40']
rabins = ['0-60','60-120','120-180','180-240','240-300','300-360']
# Build general list of key names to write
keyList = []
keyList += ['energy','energy_w','energy_z','energy_w_z']
keyList += ['zenith','zenith_w','core','core_w']
keyList += ['%s_err' % k for k in keyList]
# Split by composition
keyList = ['%s_%s' % (k, e) for k in keyList for e in eList]
# Split spatially into 3 dec bins (~12 degrees each) and 6 ra bins
keyList = ['%s_%s_%s' % (k, dec, ra) for k in keyList \
for dec in decbins for ra in rabins]
# Extract information from hdf5 file
q = hdf5extractor(config, file)
c0 = q['cuts']['llh']
r = np.log10(q['ML_energy'])[c0]
cosz = np.cos(q['zenith'])[c0]
dist = np.sqrt(q['ML_x']**2 + q['ML_y']**2)[c0]
fit = zfix(q['zenith'], bintype='logdist')[c0]
w = q['weights'][c0]
# Make cuts
degree = np.pi / 180.
for e in eList:
q[e] = (q['llh_comp'] == e)[c0]
for dec in decbins:
decmin = (180 - float(dec.split('-')[1])) * degree
decmax = (180 - float(dec.split('-')[0])) * degree
q[dec] = ((q['dec'] >= decmin) * (q['dec'] < decmax))[c0]
for ra in rabins:
ramin = float(ra.split('-')[0]) * degree
ramax = float(ra.split('-')[1]) * degree
q[ra] = ((q['ra'] >= ramin) * (q['ra'] < ramax))[c0]
# Method of intelligently producing histograms based on key names
def smartHist(key, x, bins):
tempx = x
wts = None
params = key.split('_')
e, dec, ra = params[-3:]
c1 = q[e] * q[dec] * q[ra]
if 'z' in params:
tempx = x - fit
if 'w' in params:
wts = w[c1]
if 'err' in params:
wts = (w[c1])**2
h0 = np.histogram(tempx[c1], bins=bins, weights=wts)[0]
return h0
# Energy distribution
h = {}
print 'Calculating energy distributions...'
bins = getEbins(reco=True)
energyKeys = [k for k in keyList if 'energy' in k]
for key in energyKeys:
h[key] = smartHist(key, r, bins)
# Zenith distribution
print 'Calculating zenith distributions...'
bins = np.linspace(0.8, 1, 81)
zenithKeys = [k for k in keyList if 'zenith' in k]
for key in zenithKeys:
h[key] = smartHist(key, cosz, bins)
# Core distribution
print 'Calculating core position distributions...'
bins = np.linspace(0, 700, 141)
coreKeys = [k for k in keyList if 'core' in k]
for key in coreKeys:
h[key] = smartHist(key, dist, bins)
print 'Saving...'
np.save(outfile, h)
def skyWriter(config, file, outfile):
nside = 64
npix = hp.nside2npix(nside)
# Binning for various parameters
sbins = np.arange(npix+1, dtype=int)
ebins = np.arange(5, 9.501, 0.05)
dbins = np.linspace(0, 700, 141)
lbins = np.linspace(-20, 20, 151)
# Get desired information from hdf5 file
d = hdf5extractor(config, file)
c0 = d['cuts']['llh']
r = np.log10(d['ML_energy'])[c0]
fit = zfix(d['zenith'], bintype='logdist')[c0]
w = d['weights'][c0]
xy = np.sqrt(d['ML_x']**2 + d['ML_y']**2)[c0]
dllh = (d['fLLH'] - d['pLLH'])[c0]
# Bin in sky
#zen = np.pi - d['zenith'][c0]
#azi = d['azimuth'][c0]
dec = d['dec'][c0]
ra = d['ra'][c0]
x = hp.ang2pix(nside, dec, ra)
# Energy cut
ecut = (r >= 6.2)
p = {'weights':w}
q = {}
q['energy_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
p['weights'] = w**2
q['energy_err_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_err_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_err_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
# Energy cut versions
q['llhcut_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins), | q['llhcut_err_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins),
weights=(w[ecut])**2)[0]
q['energy'] = np.histogram2d(x, r-fit, bins=(sbins,ebins))[0]
q['dist'] = np.histogram2d(x, xy, bins=(sbins,dbins))[0]
q['llh'] = np.histogram2d(x, dllh, bins=(sbins,lbins))[0]
q['energy_err'] = np.histogram2d(x, r-fit, bins=(sbins,ebins))[0]
q['dist_err'] = np.histogram2d(x, xy, bins=(sbins,dbins))[0]
q['llh_err'] = np.histogram2d(x, dllh, bins=(sbins,lbins))[0]
# Energy cut versions
q['llhcut'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins))[0]
q['llhcut_err'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins))[0]
np.save(outfile, q)
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Converts hdf5 file to npy dict')
p.add_argument('-c', '--config', dest='config',
help='Detector configuration [IT73 --> IT81-IV]')
p.add_argument('-f', '--files', dest='files', nargs='*',
help='Input files')
p.add_argument('-o', '--outfiles', dest='outfiles', nargs='*',
help='Output files')
p.add_argument('--sky', dest='sky',
default=False, action='store_true',
help='Write the sky histograms')
args = p.parse_args()
for infile, outfile in zip(args.files, args.outfiles):
if args.sky:
skyWriter(args.config, infile, outfile)
else:
histWriter(args.config, infile, outfile)
###############################################################################
## Notes on weights
"""
- Events that pass STA8 condition have a prescale and weight of 1.
- Events that pass STA3ii condition have a 1/2 chance to pass the STA3ii
prescale. Those that fail have a 1/3 chance to pass the STA3 prescale. So, the
total chance of an event passing is 1/2+(1/3*1/2) = 2/3. Weight = 1.5
- Events that pass STA3 condition but not STA3ii condition have a prescale and
weight of 3
""" | weights=w[ecut])[0] | random_line_split |
save_hist.py | #!/usr/bin/env python
###########################################################################
# Replacement for save_data.py takes a collection of hdf5 files, and #
# builds desired histograms for rapid plotting #
###########################################################################
import numpy as np
import healpy as hp
import argparse, tables
from icecube import astro
import dataFunctions as df
from showerllh.analysis.skypos import getDecRA
from showerllh.analysis.llhtools import inPoly, getEbins
from showerllh.analysis.zfix import zfix
def hdf5extractor(config, file):
##=======================================================================
## Starting parameters
rDict = {'proton':'p','helium':'h','oxygen':'o','iron':'f'}
t1 = astro.Time()
print 'Building arrays from %s...' % file
t = tables.openFile(file)
q = {}
# Get reconstructed compositions from list of children in file
children = []
for node in t.walk_nodes('/'):
try: children += [node.name]
except tables.NoSuchNodeError:
continue
children = list(set(children))
compList = [n.split('_')[-1] for n in children if 'ShowerLLH_' in n]
# Get ShowerLLH cuts and info
rrc = t.root.ShowerLLH_proton.col('exists').astype('bool')
for value in ['zenith', 'azimuth']:
q[value] = t.root.ShowerLLH_proton.col(value)
for comp in compList:
r = rDict[comp]
for value in ['x','y','energy']:
q[r+'ML_'+value] = t.getNode('/ShowerLLH_'+comp).col(value)
q[r+'LLH'] = t.getNode('/ShowerLLHParams_'+comp).col('maxLLH')
# Timing
mjd_day = t.root.I3EventHeader.col('time_start_mjd_day')
mjd_sec = t.root.I3EventHeader.col('time_start_mjd_sec')
mjd_ns = t.root.I3EventHeader.col('time_start_mjd_ns')
q['mjd'] = np.zeros(len(mjd_day), dtype=np.float64)
for i in range(len(mjd_day)):
day = int(mjd_day[i])
sec = int(mjd_sec[i])
ns = int(mjd_ns[i])
t1.SetTime(day, sec, ns)
q['mjd'][i] = t1.GetMJD()
# Event ID
run = t.root.I3EventHeader.col('Run')
event = t.root.I3EventHeader.col('Event')
subevent = t.root.I3EventHeader.col('SubEvent')
eventIDs = []
for i in range(len(run)):
eventIDs += ['%s_%s_%s' % (run[i], event[i], subevent[i])]
q['eventIDs'] = np.asarray(eventIDs)
# Condition and prescale passed (filter[condition, prescale])
# For notes on weights see bottom of file
filtermask = df.filter_mask(config)
filternames = df.filter_names(config)
f = {}
for fname in filternames:
f[fname] = t.getNode('/'+filtermask).col(fname)
f[fname] = f[fname][:,0].astype(float)
filterArray = np.array([f[fname] * df.it_weights(fname)
for fname in f.keys()])
filterArray[filterArray == 0] = 100.
q['weights'] = np.amin(filterArray, axis=0)
# Other reconstruction info
q['NStations'] = t.root.NStations.col('value')
t.close()
# Laputop values
#for key in ['x','y','zenith','azimuth','s125','e_proton','e_iron','beta']:
# arrays += ['lap_'+key]
# Get Laputop info
#for value in ['x', 'y', 'zenith', 'azimuth']:
# q['lap_'+value] = t.root.Laputop.col(value)
#for value in ['s125', 'e_proton', 'e_iron', 'beta']:
# q['lap_'+value] = t.root.LaputopParams.col(value)
# Get most likely composition
rList = [rDict[comp] for comp in compList]
full_llhs = np.array([q[r+'LLH'] for r in rList])
max_llh = np.amax(full_llhs, axis=0)
q['llh_comp'] = np.array(['' for i in range(len(q['pLLH']))])
for r in rList:
q['llh_comp'][q[r+'LLH'] == max_llh] = r
for key in ['x', 'y', 'energy']:
q['ML_'+key] = np.array([q[r+'ML_'+key][i]
for i, r in enumerate(q['llh_comp'])])
# Check for multiple most-likely compositions (mark as bad)
badVals = np.sum(full_llhs == max_llh, axis=0)
badVals = (badVals-1).astype('bool')
q['llh_comp'][badVals] = ''
for key in ['x','y','energy']:
q['ML_'+key][badVals] = np.nan
# Calculate sky positions
q['dec'], q['ra'] = getDecRA(q, verbose=False)
# Containment cut
it_geo = df.it_geo(config)
q['cuts'] = {}
q['cuts']['llh'] = inPoly(q['ML_x'], q['ML_y'], 0, config=it_geo)
return q
def histWriter(config, file, outfile):
# Bin values
eList = ['p','h','o','f']
decbins = ['0-12','12-24','24-40']
rabins = ['0-60','60-120','120-180','180-240','240-300','300-360']
# Build general list of key names to write
keyList = []
keyList += ['energy','energy_w','energy_z','energy_w_z']
keyList += ['zenith','zenith_w','core','core_w']
keyList += ['%s_err' % k for k in keyList]
# Split by composition
keyList = ['%s_%s' % (k, e) for k in keyList for e in eList]
# Split spatially into 3 dec bins (~12 degrees each) and 6 ra bins
keyList = ['%s_%s_%s' % (k, dec, ra) for k in keyList \
for dec in decbins for ra in rabins]
# Extract information from hdf5 file
q = hdf5extractor(config, file)
c0 = q['cuts']['llh']
r = np.log10(q['ML_energy'])[c0]
cosz = np.cos(q['zenith'])[c0]
dist = np.sqrt(q['ML_x']**2 + q['ML_y']**2)[c0]
fit = zfix(q['zenith'], bintype='logdist')[c0]
w = q['weights'][c0]
# Make cuts
degree = np.pi / 180.
for e in eList:
q[e] = (q['llh_comp'] == e)[c0]
for dec in decbins:
decmin = (180 - float(dec.split('-')[1])) * degree
decmax = (180 - float(dec.split('-')[0])) * degree
q[dec] = ((q['dec'] >= decmin) * (q['dec'] < decmax))[c0]
for ra in rabins:
ramin = float(ra.split('-')[0]) * degree
ramax = float(ra.split('-')[1]) * degree
q[ra] = ((q['ra'] >= ramin) * (q['ra'] < ramax))[c0]
# Method of intelligently producing histograms based on key names
def smartHist(key, x, bins):
tempx = x
wts = None
params = key.split('_')
e, dec, ra = params[-3:]
c1 = q[e] * q[dec] * q[ra]
if 'z' in params:
tempx = x - fit
if 'w' in params:
wts = w[c1]
if 'err' in params:
wts = (w[c1])**2
h0 = np.histogram(tempx[c1], bins=bins, weights=wts)[0]
return h0
# Energy distribution
h = {}
print 'Calculating energy distributions...'
bins = getEbins(reco=True)
energyKeys = [k for k in keyList if 'energy' in k]
for key in energyKeys:
h[key] = smartHist(key, r, bins)
# Zenith distribution
print 'Calculating zenith distributions...'
bins = np.linspace(0.8, 1, 81)
zenithKeys = [k for k in keyList if 'zenith' in k]
for key in zenithKeys:
h[key] = smartHist(key, cosz, bins)
# Core distribution
print 'Calculating core position distributions...'
bins = np.linspace(0, 700, 141)
coreKeys = [k for k in keyList if 'core' in k]
for key in coreKeys:
h[key] = smartHist(key, dist, bins)
print 'Saving...'
np.save(outfile, h)
def skyWriter(config, file, outfile):
nside = 64
npix = hp.nside2npix(nside)
# Binning for various parameters
sbins = np.arange(npix+1, dtype=int)
ebins = np.arange(5, 9.501, 0.05)
dbins = np.linspace(0, 700, 141)
lbins = np.linspace(-20, 20, 151)
# Get desired information from hdf5 file
d = hdf5extractor(config, file)
c0 = d['cuts']['llh']
r = np.log10(d['ML_energy'])[c0]
fit = zfix(d['zenith'], bintype='logdist')[c0]
w = d['weights'][c0]
xy = np.sqrt(d['ML_x']**2 + d['ML_y']**2)[c0]
dllh = (d['fLLH'] - d['pLLH'])[c0]
# Bin in sky
#zen = np.pi - d['zenith'][c0]
#azi = d['azimuth'][c0]
dec = d['dec'][c0]
ra = d['ra'][c0]
x = hp.ang2pix(nside, dec, ra)
# Energy cut
ecut = (r >= 6.2)
p = {'weights':w}
q = {}
q['energy_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
p['weights'] = w**2
q['energy_err_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_err_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_err_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
# Energy cut versions
q['llhcut_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins),
weights=w[ecut])[0]
q['llhcut_err_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins),
weights=(w[ecut])**2)[0]
q['energy'] = np.histogram2d(x, r-fit, bins=(sbins,ebins))[0]
q['dist'] = np.histogram2d(x, xy, bins=(sbins,dbins))[0]
q['llh'] = np.histogram2d(x, dllh, bins=(sbins,lbins))[0]
q['energy_err'] = np.histogram2d(x, r-fit, bins=(sbins,ebins))[0]
q['dist_err'] = np.histogram2d(x, xy, bins=(sbins,dbins))[0]
q['llh_err'] = np.histogram2d(x, dllh, bins=(sbins,lbins))[0]
# Energy cut versions
q['llhcut'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins))[0]
q['llhcut_err'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins))[0]
np.save(outfile, q)
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Converts hdf5 file to npy dict')
p.add_argument('-c', '--config', dest='config',
help='Detector configuration [IT73 --> IT81-IV]')
p.add_argument('-f', '--files', dest='files', nargs='*',
help='Input files')
p.add_argument('-o', '--outfiles', dest='outfiles', nargs='*',
help='Output files')
p.add_argument('--sky', dest='sky',
default=False, action='store_true',
help='Write the sky histograms')
args = p.parse_args()
for infile, outfile in zip(args.files, args.outfiles):
|
###############################################################################
## Notes on weights
"""
- Events that pass STA8 condition have a prescale and weight of 1.
- Events that pass STA3ii condition have a 1/2 chance to pass the STA3ii
prescale. Those that fail have a 1/3 chance to pass the STA3 prescale. So, the
total chance of an event passing is 1/2+(1/3*1/2) = 2/3. Weight = 1.5
- Events that pass STA3 condition but not STA3ii condition have a prescale and
weight of 3
"""
| if args.sky:
skyWriter(args.config, infile, outfile)
else:
histWriter(args.config, infile, outfile) | conditional_block |
save_hist.py | #!/usr/bin/env python
###########################################################################
# Replacement for save_data.py takes a collection of hdf5 files, and #
# builds desired histograms for rapid plotting #
###########################################################################
import numpy as np
import healpy as hp
import argparse, tables
from icecube import astro
import dataFunctions as df
from showerllh.analysis.skypos import getDecRA
from showerllh.analysis.llhtools import inPoly, getEbins
from showerllh.analysis.zfix import zfix
def hdf5extractor(config, file):
##=======================================================================
## Starting parameters
|
def histWriter(config, file, outfile):
# Bin values
eList = ['p','h','o','f']
decbins = ['0-12','12-24','24-40']
rabins = ['0-60','60-120','120-180','180-240','240-300','300-360']
# Build general list of key names to write
keyList = []
keyList += ['energy','energy_w','energy_z','energy_w_z']
keyList += ['zenith','zenith_w','core','core_w']
keyList += ['%s_err' % k for k in keyList]
# Split by composition
keyList = ['%s_%s' % (k, e) for k in keyList for e in eList]
# Split spatially into 3 dec bins (~12 degrees each) and 6 ra bins
keyList = ['%s_%s_%s' % (k, dec, ra) for k in keyList \
for dec in decbins for ra in rabins]
# Extract information from hdf5 file
q = hdf5extractor(config, file)
c0 = q['cuts']['llh']
r = np.log10(q['ML_energy'])[c0]
cosz = np.cos(q['zenith'])[c0]
dist = np.sqrt(q['ML_x']**2 + q['ML_y']**2)[c0]
fit = zfix(q['zenith'], bintype='logdist')[c0]
w = q['weights'][c0]
# Make cuts
degree = np.pi / 180.
for e in eList:
q[e] = (q['llh_comp'] == e)[c0]
for dec in decbins:
decmin = (180 - float(dec.split('-')[1])) * degree
decmax = (180 - float(dec.split('-')[0])) * degree
q[dec] = ((q['dec'] >= decmin) * (q['dec'] < decmax))[c0]
for ra in rabins:
ramin = float(ra.split('-')[0]) * degree
ramax = float(ra.split('-')[1]) * degree
q[ra] = ((q['ra'] >= ramin) * (q['ra'] < ramax))[c0]
# Method of intelligently producing histograms based on key names
def smartHist(key, x, bins):
tempx = x
wts = None
params = key.split('_')
e, dec, ra = params[-3:]
c1 = q[e] * q[dec] * q[ra]
if 'z' in params:
tempx = x - fit
if 'w' in params:
wts = w[c1]
if 'err' in params:
wts = (w[c1])**2
h0 = np.histogram(tempx[c1], bins=bins, weights=wts)[0]
return h0
# Energy distribution
h = {}
print 'Calculating energy distributions...'
bins = getEbins(reco=True)
energyKeys = [k for k in keyList if 'energy' in k]
for key in energyKeys:
h[key] = smartHist(key, r, bins)
# Zenith distribution
print 'Calculating zenith distributions...'
bins = np.linspace(0.8, 1, 81)
zenithKeys = [k for k in keyList if 'zenith' in k]
for key in zenithKeys:
h[key] = smartHist(key, cosz, bins)
# Core distribution
print 'Calculating core position distributions...'
bins = np.linspace(0, 700, 141)
coreKeys = [k for k in keyList if 'core' in k]
for key in coreKeys:
h[key] = smartHist(key, dist, bins)
print 'Saving...'
np.save(outfile, h)
def skyWriter(config, file, outfile):
nside = 64
npix = hp.nside2npix(nside)
# Binning for various parameters
sbins = np.arange(npix+1, dtype=int)
ebins = np.arange(5, 9.501, 0.05)
dbins = np.linspace(0, 700, 141)
lbins = np.linspace(-20, 20, 151)
# Get desired information from hdf5 file
d = hdf5extractor(config, file)
c0 = d['cuts']['llh']
r = np.log10(d['ML_energy'])[c0]
fit = zfix(d['zenith'], bintype='logdist')[c0]
w = d['weights'][c0]
xy = np.sqrt(d['ML_x']**2 + d['ML_y']**2)[c0]
dllh = (d['fLLH'] - d['pLLH'])[c0]
# Bin in sky
#zen = np.pi - d['zenith'][c0]
#azi = d['azimuth'][c0]
dec = d['dec'][c0]
ra = d['ra'][c0]
x = hp.ang2pix(nside, dec, ra)
# Energy cut
ecut = (r >= 6.2)
p = {'weights':w}
q = {}
q['energy_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
p['weights'] = w**2
q['energy_err_w'] = np.histogram2d(x, r-fit, bins=(sbins,ebins), **p)[0]
q['dist_err_w'] = np.histogram2d(x, xy, bins=(sbins,dbins), **p)[0]
q['llh_err_w'] = np.histogram2d(x, dllh, bins=(sbins,lbins), **p)[0]
# Energy cut versions
q['llhcut_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins),
weights=w[ecut])[0]
q['llhcut_err_w'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins),
weights=(w[ecut])**2)[0]
q['energy'] = np.histogram2d(x, r-fit, bins=(sbins,ebins))[0]
q['dist'] = np.histogram2d(x, xy, bins=(sbins,dbins))[0]
q['llh'] = np.histogram2d(x, dllh, bins=(sbins,lbins))[0]
q['energy_err'] = np.histogram2d(x, r-fit, bins=(sbins,ebins))[0]
q['dist_err'] = np.histogram2d(x, xy, bins=(sbins,dbins))[0]
q['llh_err'] = np.histogram2d(x, dllh, bins=(sbins,lbins))[0]
# Energy cut versions
q['llhcut'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins))[0]
q['llhcut_err'] = np.histogram2d(x[ecut], dllh[ecut], bins=(sbins,lbins))[0]
np.save(outfile, q)
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Converts hdf5 file to npy dict')
p.add_argument('-c', '--config', dest='config',
help='Detector configuration [IT73 --> IT81-IV]')
p.add_argument('-f', '--files', dest='files', nargs='*',
help='Input files')
p.add_argument('-o', '--outfiles', dest='outfiles', nargs='*',
help='Output files')
p.add_argument('--sky', dest='sky',
default=False, action='store_true',
help='Write the sky histograms')
args = p.parse_args()
for infile, outfile in zip(args.files, args.outfiles):
if args.sky:
skyWriter(args.config, infile, outfile)
else:
histWriter(args.config, infile, outfile)
###############################################################################
## Notes on weights
"""
- Events that pass STA8 condition have a prescale and weight of 1.
- Events that pass STA3ii condition have a 1/2 chance to pass the STA3ii
prescale. Those that fail have a 1/3 chance to pass the STA3 prescale. So, the
total chance of an event passing is 1/2+(1/3*1/2) = 2/3. Weight = 1.5
- Events that pass STA3 condition but not STA3ii condition have a prescale and
weight of 3
"""
| rDict = {'proton':'p','helium':'h','oxygen':'o','iron':'f'}
t1 = astro.Time()
print 'Building arrays from %s...' % file
t = tables.openFile(file)
q = {}
# Get reconstructed compositions from list of children in file
children = []
for node in t.walk_nodes('/'):
try: children += [node.name]
except tables.NoSuchNodeError:
continue
children = list(set(children))
compList = [n.split('_')[-1] for n in children if 'ShowerLLH_' in n]
# Get ShowerLLH cuts and info
rrc = t.root.ShowerLLH_proton.col('exists').astype('bool')
for value in ['zenith', 'azimuth']:
q[value] = t.root.ShowerLLH_proton.col(value)
for comp in compList:
r = rDict[comp]
for value in ['x','y','energy']:
q[r+'ML_'+value] = t.getNode('/ShowerLLH_'+comp).col(value)
q[r+'LLH'] = t.getNode('/ShowerLLHParams_'+comp).col('maxLLH')
# Timing
mjd_day = t.root.I3EventHeader.col('time_start_mjd_day')
mjd_sec = t.root.I3EventHeader.col('time_start_mjd_sec')
mjd_ns = t.root.I3EventHeader.col('time_start_mjd_ns')
q['mjd'] = np.zeros(len(mjd_day), dtype=np.float64)
for i in range(len(mjd_day)):
day = int(mjd_day[i])
sec = int(mjd_sec[i])
ns = int(mjd_ns[i])
t1.SetTime(day, sec, ns)
q['mjd'][i] = t1.GetMJD()
# Event ID
run = t.root.I3EventHeader.col('Run')
event = t.root.I3EventHeader.col('Event')
subevent = t.root.I3EventHeader.col('SubEvent')
eventIDs = []
for i in range(len(run)):
eventIDs += ['%s_%s_%s' % (run[i], event[i], subevent[i])]
q['eventIDs'] = np.asarray(eventIDs)
# Condition and prescale passed (filter[condition, prescale])
# For notes on weights see bottom of file
filtermask = df.filter_mask(config)
filternames = df.filter_names(config)
f = {}
for fname in filternames:
f[fname] = t.getNode('/'+filtermask).col(fname)
f[fname] = f[fname][:,0].astype(float)
filterArray = np.array([f[fname] * df.it_weights(fname)
for fname in f.keys()])
filterArray[filterArray == 0] = 100.
q['weights'] = np.amin(filterArray, axis=0)
# Other reconstruction info
q['NStations'] = t.root.NStations.col('value')
t.close()
# Laputop values
#for key in ['x','y','zenith','azimuth','s125','e_proton','e_iron','beta']:
# arrays += ['lap_'+key]
# Get Laputop info
#for value in ['x', 'y', 'zenith', 'azimuth']:
# q['lap_'+value] = t.root.Laputop.col(value)
#for value in ['s125', 'e_proton', 'e_iron', 'beta']:
# q['lap_'+value] = t.root.LaputopParams.col(value)
# Get most likely composition
rList = [rDict[comp] for comp in compList]
full_llhs = np.array([q[r+'LLH'] for r in rList])
max_llh = np.amax(full_llhs, axis=0)
q['llh_comp'] = np.array(['' for i in range(len(q['pLLH']))])
for r in rList:
q['llh_comp'][q[r+'LLH'] == max_llh] = r
for key in ['x', 'y', 'energy']:
q['ML_'+key] = np.array([q[r+'ML_'+key][i]
for i, r in enumerate(q['llh_comp'])])
# Check for multiple most-likely compositions (mark as bad)
badVals = np.sum(full_llhs == max_llh, axis=0)
badVals = (badVals-1).astype('bool')
q['llh_comp'][badVals] = ''
for key in ['x','y','energy']:
q['ML_'+key][badVals] = np.nan
# Calculate sky positions
q['dec'], q['ra'] = getDecRA(q, verbose=False)
# Containment cut
it_geo = df.it_geo(config)
q['cuts'] = {}
q['cuts']['llh'] = inPoly(q['ML_x'], q['ML_y'], 0, config=it_geo)
return q | identifier_body |
repocachemanager.go | package cache
import (
"context"
"encoding/json"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/fluxcd/flux/pkg/image"
"github.com/fluxcd/flux/pkg/registry"
)
type imageToUpdate struct {
ref image.Ref
previousDigest string
previousRefresh time.Duration
}
// repoCacheManager handles cache operations for a container image repository
type repoCacheManager struct {
now time.Time
repoID image.Name
client registry.Client
clientTimeout time.Duration
burst int
trace bool
logger log.Logger
cacheClient Client
sync.Mutex
}
func newRepoCacheManager(now time.Time,
repoID image.Name, clientFactory registry.ClientFactory, creds registry.Credentials, repoClientTimeout time.Duration,
burst int, trace bool, logger log.Logger, cacheClient Client) (*repoCacheManager, error) {
client, err := clientFactory.ClientFor(repoID.CanonicalName(), creds)
if err != nil {
return nil, err
}
manager := &repoCacheManager{
now: now,
repoID: repoID,
client: client,
clientTimeout: repoClientTimeout,
burst: burst,
trace: trace,
logger: logger,
cacheClient: cacheClient,
}
return manager, nil
}
// fetchRepository fetches the repository from the cache
func (c *repoCacheManager) fetchRepository() (ImageRepository, error) {
var result ImageRepository
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, _, err := c.cacheClient.GetKey(repoKey)
if err != nil {
return ImageRepository{}, err
}
if err = json.Unmarshal(bytes, &result); err != nil {
return ImageRepository{}, err
}
return result, nil
}
// getTags gets the tags from the repository
func (c *repoCacheManager) | (ctx context.Context) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
tags, err := c.client.Tags(ctx)
if ctx.Err() == context.DeadlineExceeded {
return nil, c.clientTimeoutError()
}
return tags, err
}
// storeRepository stores the repository from the cache
func (c *repoCacheManager) storeRepository(repo ImageRepository) error {
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, err := json.Marshal(repo)
if err != nil {
return err
}
return c.cacheClient.SetKey(repoKey, c.now.Add(repoRefresh), bytes)
}
// fetchImagesResult is the result of fetching images from the cache
// invariant: len(imagesToUpdate) == imagesToUpdateRefreshCount + imagesToUpdateMissingCount
type fetchImagesResult struct {
imagesFound map[string]image.Info // images found in the cache
imagesToUpdate []imageToUpdate // images which need to be updated
imagesToUpdateRefreshCount int // number of imagesToUpdate which need updating due to their cache entry expiring
imagesToUpdateMissingCount int // number of imagesToUpdate which need updating due to being missing
}
// fetchImages attempts to fetch the images with the provided tags from the cache.
// It returns the images found, those which require updating and details about
// why they need to be updated.
func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {
images := map[string]image.Info{}
// Create a list of images that need updating
var toUpdate []imageToUpdate
// Counters for reporting what happened
var missing, refresh int
for _, tag := range tags {
if tag == "" {
return fetchImagesResult{}, fmt.Errorf("empty tag in fetched tags")
}
// See if we have the manifest already cached
newID := c.repoID.ToRef(tag)
key := NewManifestKey(newID.CanonicalRef())
bytes, deadline, err := c.cacheClient.GetKey(key)
// If err, then we don't have it yet. Update.
switch {
case err != nil: // by and large these are cache misses, but any error shall count as "not found"
if err != ErrNotCached {
c.logger.Log("warning", "error from cache", "err", err, "ref", newID)
}
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
case len(bytes) == 0:
c.logger.Log("warning", "empty result from cache", "ref", newID)
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
default:
var entry registry.ImageEntry
if err := json.Unmarshal(bytes, &entry); err == nil {
if c.trace {
c.logger.Log("trace", "found cached manifest", "ref", newID, "last_fetched", entry.LastFetched.Format(time.RFC3339), "deadline", deadline.Format(time.RFC3339))
}
if entry.ExcludedReason == "" {
images[tag] = entry.Info
if c.now.After(deadline) {
previousRefresh := minRefresh
lastFetched := entry.Info.LastFetched
if !lastFetched.IsZero() {
previousRefresh = deadline.Sub(lastFetched)
}
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})
refresh++
}
} else {
if c.trace {
c.logger.Log("trace", "excluded in cache", "ref", newID, "reason", entry.ExcludedReason)
}
if c.now.After(deadline) {
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})
refresh++
}
}
}
}
}
result := fetchImagesResult{
imagesFound: images,
imagesToUpdate: toUpdate,
imagesToUpdateRefreshCount: refresh,
imagesToUpdateMissingCount: missing,
}
return result, nil
}
// updateImages, refreshes the cache entries for the images passed. It may not succeed for all images.
// It returns the values stored in cache, the number of images it succeeded for and the number
// of images whose manifest wasn't found in the registry.
func (c *repoCacheManager) updateImages(ctx context.Context, images []imageToUpdate) (map[string]image.Info, int, int) {
// The upper bound for concurrent fetches against a single host is
// w.Burst, so limit the number of fetching goroutines to that.
fetchers := make(chan struct{}, c.burst)
awaitFetchers := &sync.WaitGroup{}
ctxc, cancel := context.WithCancel(ctx)
defer cancel()
var successCount int
var manifestUnknownCount int
var result = map[string]image.Info{}
var warnAboutRateLimit sync.Once
updates:
for _, up := range images {
// to avoid race condition, when accessing it in the go routine
upCopy := up
select {
case <-ctxc.Done():
break updates
case fetchers <- struct{}{}:
}
awaitFetchers.Add(1)
go func() {
defer func() { awaitFetchers.Done(); <-fetchers }()
ctxcc, cancel := context.WithTimeout(ctxc, c.clientTimeout)
defer cancel()
entry, err := c.updateImage(ctxcc, upCopy)
if err != nil {
if err, ok := errors.Cause(err).(net.Error); (ok && err.Timeout()) || ctxcc.Err() == context.DeadlineExceeded {
// This was due to a context timeout, don't bother logging
return
}
switch {
case strings.Contains(err.Error(), "429"), strings.Contains(err.Error(), "toomanyrequests"):
// abort the image tags fetching if we've been rate limited
warnAboutRateLimit.Do(func() {
c.logger.Log("warn", "aborting image tag fetching due to rate limiting, will try again later")
cancel()
})
case strings.Contains(err.Error(), "manifest unknown"):
// Registry is corrupted, keep going, this manifest may not be relevant for automatic updates
c.Lock()
manifestUnknownCount++
c.Unlock()
c.logger.Log("warn", fmt.Sprintf("manifest for tag %s missing in repository %s", up.ref.Tag, up.ref.Name),
"impact", "flux will fail to auto-release workloads with matching images, ask the repository administrator to fix the inconsistency")
default:
c.logger.Log("err", err, "ref", up.ref)
}
return
}
c.Lock()
successCount++
if entry.ExcludedReason == "" {
result[upCopy.ref.Tag] = entry.Info
}
c.Unlock()
}()
}
awaitFetchers.Wait()
return result, successCount, manifestUnknownCount
}
func (c *repoCacheManager) updateImage(ctx context.Context, update imageToUpdate) (registry.ImageEntry, error) {
imageID := update.ref
if c.trace {
c.logger.Log("trace", "refreshing manifest", "ref", imageID, "previous_refresh", update.previousRefresh.String())
}
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
// Get the image from the remote
entry, err := c.client.Manifest(ctx, imageID.Tag)
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return registry.ImageEntry{}, c.clientTimeoutError()
}
if _, ok := err.(*image.LabelTimestampFormatError); !ok {
return registry.ImageEntry{}, err
}
c.logger.Log("err", err, "ref", imageID)
}
refresh := update.previousRefresh
reason := ""
switch {
case entry.ExcludedReason != "":
c.logger.Log("excluded", entry.ExcludedReason, "ref", imageID)
refresh = excludedRefresh
reason = "image is excluded"
case update.previousDigest == "":
entry.Info.LastFetched = c.now
refresh = update.previousRefresh
reason = "no prior cache entry for image"
case entry.Info.Digest == update.previousDigest:
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh * 2)
reason = "image digest is same"
default: // i.e., not excluded, but the digests differ -> the tag was moved
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh / 2)
reason = "image digest is different"
}
if c.trace {
c.logger.Log("trace", "caching manifest", "ref", imageID, "last_fetched", c.now.Format(time.RFC3339), "refresh", refresh.String(), "reason", reason)
}
key := NewManifestKey(imageID.CanonicalRef())
// Write back to memcached
val, err := json.Marshal(entry)
if err != nil {
return registry.ImageEntry{}, err
}
err = c.cacheClient.SetKey(key, c.now.Add(refresh), val)
if err != nil {
return registry.ImageEntry{}, err
}
return entry, nil
}
func (r *repoCacheManager) clientTimeoutError() error {
return fmt.Errorf("client timeout (%s) exceeded", r.clientTimeout)
}
| getTags | identifier_name |
repocachemanager.go | package cache
import (
"context"
"encoding/json"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/fluxcd/flux/pkg/image"
"github.com/fluxcd/flux/pkg/registry"
)
type imageToUpdate struct {
ref image.Ref
previousDigest string
previousRefresh time.Duration
}
// repoCacheManager handles cache operations for a container image repository
type repoCacheManager struct {
now time.Time
repoID image.Name
client registry.Client
clientTimeout time.Duration
burst int
trace bool
logger log.Logger
cacheClient Client
sync.Mutex
}
func newRepoCacheManager(now time.Time,
repoID image.Name, clientFactory registry.ClientFactory, creds registry.Credentials, repoClientTimeout time.Duration,
burst int, trace bool, logger log.Logger, cacheClient Client) (*repoCacheManager, error) {
client, err := clientFactory.ClientFor(repoID.CanonicalName(), creds)
if err != nil {
return nil, err
}
manager := &repoCacheManager{
now: now,
repoID: repoID,
client: client,
clientTimeout: repoClientTimeout,
burst: burst,
trace: trace,
logger: logger,
cacheClient: cacheClient,
}
return manager, nil
}
// fetchRepository fetches the repository from the cache
func (c *repoCacheManager) fetchRepository() (ImageRepository, error) {
var result ImageRepository
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, _, err := c.cacheClient.GetKey(repoKey)
if err != nil {
return ImageRepository{}, err
}
if err = json.Unmarshal(bytes, &result); err != nil {
return ImageRepository{}, err
}
return result, nil
}
// getTags gets the tags from the repository
func (c *repoCacheManager) getTags(ctx context.Context) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
tags, err := c.client.Tags(ctx)
if ctx.Err() == context.DeadlineExceeded {
return nil, c.clientTimeoutError()
}
return tags, err
}
// storeRepository stores the repository from the cache
func (c *repoCacheManager) storeRepository(repo ImageRepository) error {
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, err := json.Marshal(repo)
if err != nil {
return err
}
return c.cacheClient.SetKey(repoKey, c.now.Add(repoRefresh), bytes)
}
// fetchImagesResult is the result of fetching images from the cache
// invariant: len(imagesToUpdate) == imagesToUpdateRefreshCount + imagesToUpdateMissingCount
type fetchImagesResult struct {
imagesFound map[string]image.Info // images found in the cache
imagesToUpdate []imageToUpdate // images which need to be updated
imagesToUpdateRefreshCount int // number of imagesToUpdate which need updating due to their cache entry expiring
imagesToUpdateMissingCount int // number of imagesToUpdate which need updating due to being missing
}
// fetchImages attempts to fetch the images with the provided tags from the cache.
// It returns the images found, those which require updating and details about
// why they need to be updated.
func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {
images := map[string]image.Info{}
// Create a list of images that need updating
var toUpdate []imageToUpdate
// Counters for reporting what happened
var missing, refresh int
for _, tag := range tags {
if tag == "" {
return fetchImagesResult{}, fmt.Errorf("empty tag in fetched tags")
}
// See if we have the manifest already cached
newID := c.repoID.ToRef(tag)
key := NewManifestKey(newID.CanonicalRef())
bytes, deadline, err := c.cacheClient.GetKey(key)
// If err, then we don't have it yet. Update.
switch {
case err != nil: // by and large these are cache misses, but any error shall count as "not found"
if err != ErrNotCached {
c.logger.Log("warning", "error from cache", "err", err, "ref", newID)
}
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
case len(bytes) == 0:
c.logger.Log("warning", "empty result from cache", "ref", newID)
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
default:
var entry registry.ImageEntry
if err := json.Unmarshal(bytes, &entry); err == nil {
if c.trace {
c.logger.Log("trace", "found cached manifest", "ref", newID, "last_fetched", entry.LastFetched.Format(time.RFC3339), "deadline", deadline.Format(time.RFC3339))
}
if entry.ExcludedReason == "" {
images[tag] = entry.Info
if c.now.After(deadline) {
previousRefresh := minRefresh
lastFetched := entry.Info.LastFetched
if !lastFetched.IsZero() {
previousRefresh = deadline.Sub(lastFetched)
}
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})
refresh++
} | if c.now.After(deadline) {
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})
refresh++
}
}
}
}
}
result := fetchImagesResult{
imagesFound: images,
imagesToUpdate: toUpdate,
imagesToUpdateRefreshCount: refresh,
imagesToUpdateMissingCount: missing,
}
return result, nil
}
// updateImages, refreshes the cache entries for the images passed. It may not succeed for all images.
// It returns the values stored in cache, the number of images it succeeded for and the number
// of images whose manifest wasn't found in the registry.
func (c *repoCacheManager) updateImages(ctx context.Context, images []imageToUpdate) (map[string]image.Info, int, int) {
// The upper bound for concurrent fetches against a single host is
// w.Burst, so limit the number of fetching goroutines to that.
fetchers := make(chan struct{}, c.burst)
awaitFetchers := &sync.WaitGroup{}
ctxc, cancel := context.WithCancel(ctx)
defer cancel()
var successCount int
var manifestUnknownCount int
var result = map[string]image.Info{}
var warnAboutRateLimit sync.Once
updates:
for _, up := range images {
// to avoid race condition, when accessing it in the go routine
upCopy := up
select {
case <-ctxc.Done():
break updates
case fetchers <- struct{}{}:
}
awaitFetchers.Add(1)
go func() {
defer func() { awaitFetchers.Done(); <-fetchers }()
ctxcc, cancel := context.WithTimeout(ctxc, c.clientTimeout)
defer cancel()
entry, err := c.updateImage(ctxcc, upCopy)
if err != nil {
if err, ok := errors.Cause(err).(net.Error); (ok && err.Timeout()) || ctxcc.Err() == context.DeadlineExceeded {
// This was due to a context timeout, don't bother logging
return
}
switch {
case strings.Contains(err.Error(), "429"), strings.Contains(err.Error(), "toomanyrequests"):
// abort the image tags fetching if we've been rate limited
warnAboutRateLimit.Do(func() {
c.logger.Log("warn", "aborting image tag fetching due to rate limiting, will try again later")
cancel()
})
case strings.Contains(err.Error(), "manifest unknown"):
// Registry is corrupted, keep going, this manifest may not be relevant for automatic updates
c.Lock()
manifestUnknownCount++
c.Unlock()
c.logger.Log("warn", fmt.Sprintf("manifest for tag %s missing in repository %s", up.ref.Tag, up.ref.Name),
"impact", "flux will fail to auto-release workloads with matching images, ask the repository administrator to fix the inconsistency")
default:
c.logger.Log("err", err, "ref", up.ref)
}
return
}
c.Lock()
successCount++
if entry.ExcludedReason == "" {
result[upCopy.ref.Tag] = entry.Info
}
c.Unlock()
}()
}
awaitFetchers.Wait()
return result, successCount, manifestUnknownCount
}
func (c *repoCacheManager) updateImage(ctx context.Context, update imageToUpdate) (registry.ImageEntry, error) {
imageID := update.ref
if c.trace {
c.logger.Log("trace", "refreshing manifest", "ref", imageID, "previous_refresh", update.previousRefresh.String())
}
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
// Get the image from the remote
entry, err := c.client.Manifest(ctx, imageID.Tag)
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return registry.ImageEntry{}, c.clientTimeoutError()
}
if _, ok := err.(*image.LabelTimestampFormatError); !ok {
return registry.ImageEntry{}, err
}
c.logger.Log("err", err, "ref", imageID)
}
refresh := update.previousRefresh
reason := ""
switch {
case entry.ExcludedReason != "":
c.logger.Log("excluded", entry.ExcludedReason, "ref", imageID)
refresh = excludedRefresh
reason = "image is excluded"
case update.previousDigest == "":
entry.Info.LastFetched = c.now
refresh = update.previousRefresh
reason = "no prior cache entry for image"
case entry.Info.Digest == update.previousDigest:
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh * 2)
reason = "image digest is same"
default: // i.e., not excluded, but the digests differ -> the tag was moved
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh / 2)
reason = "image digest is different"
}
if c.trace {
c.logger.Log("trace", "caching manifest", "ref", imageID, "last_fetched", c.now.Format(time.RFC3339), "refresh", refresh.String(), "reason", reason)
}
key := NewManifestKey(imageID.CanonicalRef())
// Write back to memcached
val, err := json.Marshal(entry)
if err != nil {
return registry.ImageEntry{}, err
}
err = c.cacheClient.SetKey(key, c.now.Add(refresh), val)
if err != nil {
return registry.ImageEntry{}, err
}
return entry, nil
}
func (r *repoCacheManager) clientTimeoutError() error {
return fmt.Errorf("client timeout (%s) exceeded", r.clientTimeout)
} | } else {
if c.trace {
c.logger.Log("trace", "excluded in cache", "ref", newID, "reason", entry.ExcludedReason)
} | random_line_split |
repocachemanager.go | package cache
import (
"context"
"encoding/json"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/fluxcd/flux/pkg/image"
"github.com/fluxcd/flux/pkg/registry"
)
type imageToUpdate struct {
ref image.Ref
previousDigest string
previousRefresh time.Duration
}
// repoCacheManager handles cache operations for a container image repository
type repoCacheManager struct {
now time.Time
repoID image.Name
client registry.Client
clientTimeout time.Duration
burst int
trace bool
logger log.Logger
cacheClient Client
sync.Mutex
}
func newRepoCacheManager(now time.Time,
repoID image.Name, clientFactory registry.ClientFactory, creds registry.Credentials, repoClientTimeout time.Duration,
burst int, trace bool, logger log.Logger, cacheClient Client) (*repoCacheManager, error) {
client, err := clientFactory.ClientFor(repoID.CanonicalName(), creds)
if err != nil {
return nil, err
}
manager := &repoCacheManager{
now: now,
repoID: repoID,
client: client,
clientTimeout: repoClientTimeout,
burst: burst,
trace: trace,
logger: logger,
cacheClient: cacheClient,
}
return manager, nil
}
// fetchRepository fetches the repository from the cache
func (c *repoCacheManager) fetchRepository() (ImageRepository, error) {
var result ImageRepository
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, _, err := c.cacheClient.GetKey(repoKey)
if err != nil {
return ImageRepository{}, err
}
if err = json.Unmarshal(bytes, &result); err != nil {
return ImageRepository{}, err
}
return result, nil
}
// getTags gets the tags from the repository
func (c *repoCacheManager) getTags(ctx context.Context) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
tags, err := c.client.Tags(ctx)
if ctx.Err() == context.DeadlineExceeded {
return nil, c.clientTimeoutError()
}
return tags, err
}
// storeRepository stores the repository from the cache
func (c *repoCacheManager) storeRepository(repo ImageRepository) error {
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, err := json.Marshal(repo)
if err != nil {
return err
}
return c.cacheClient.SetKey(repoKey, c.now.Add(repoRefresh), bytes)
}
// fetchImagesResult is the result of fetching images from the cache
// invariant: len(imagesToUpdate) == imagesToUpdateRefreshCount + imagesToUpdateMissingCount
type fetchImagesResult struct {
imagesFound map[string]image.Info // images found in the cache
imagesToUpdate []imageToUpdate // images which need to be updated
imagesToUpdateRefreshCount int // number of imagesToUpdate which need updating due to their cache entry expiring
imagesToUpdateMissingCount int // number of imagesToUpdate which need updating due to being missing
}
// fetchImages attempts to fetch the images with the provided tags from the cache.
// It returns the images found, those which require updating and details about
// why they need to be updated.
func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {
images := map[string]image.Info{}
// Create a list of images that need updating
var toUpdate []imageToUpdate
// Counters for reporting what happened
var missing, refresh int
for _, tag := range tags {
if tag == "" {
return fetchImagesResult{}, fmt.Errorf("empty tag in fetched tags")
}
// See if we have the manifest already cached
newID := c.repoID.ToRef(tag)
key := NewManifestKey(newID.CanonicalRef())
bytes, deadline, err := c.cacheClient.GetKey(key)
// If err, then we don't have it yet. Update.
switch {
case err != nil: // by and large these are cache misses, but any error shall count as "not found"
if err != ErrNotCached {
c.logger.Log("warning", "error from cache", "err", err, "ref", newID)
}
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
case len(bytes) == 0:
c.logger.Log("warning", "empty result from cache", "ref", newID)
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
default:
var entry registry.ImageEntry
if err := json.Unmarshal(bytes, &entry); err == nil {
if c.trace {
c.logger.Log("trace", "found cached manifest", "ref", newID, "last_fetched", entry.LastFetched.Format(time.RFC3339), "deadline", deadline.Format(time.RFC3339))
}
if entry.ExcludedReason == "" {
images[tag] = entry.Info
if c.now.After(deadline) {
previousRefresh := minRefresh
lastFetched := entry.Info.LastFetched
if !lastFetched.IsZero() {
previousRefresh = deadline.Sub(lastFetched)
}
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})
refresh++
}
} else {
if c.trace {
c.logger.Log("trace", "excluded in cache", "ref", newID, "reason", entry.ExcludedReason)
}
if c.now.After(deadline) {
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})
refresh++
}
}
}
}
}
result := fetchImagesResult{
imagesFound: images,
imagesToUpdate: toUpdate,
imagesToUpdateRefreshCount: refresh,
imagesToUpdateMissingCount: missing,
}
return result, nil
}
// updateImages, refreshes the cache entries for the images passed. It may not succeed for all images.
// It returns the values stored in cache, the number of images it succeeded for and the number
// of images whose manifest wasn't found in the registry.
func (c *repoCacheManager) updateImages(ctx context.Context, images []imageToUpdate) (map[string]image.Info, int, int) {
// The upper bound for concurrent fetches against a single host is
// w.Burst, so limit the number of fetching goroutines to that.
fetchers := make(chan struct{}, c.burst)
awaitFetchers := &sync.WaitGroup{}
ctxc, cancel := context.WithCancel(ctx)
defer cancel()
var successCount int
var manifestUnknownCount int
var result = map[string]image.Info{}
var warnAboutRateLimit sync.Once
updates:
for _, up := range images {
// to avoid race condition, when accessing it in the go routine
upCopy := up
select {
case <-ctxc.Done():
break updates
case fetchers <- struct{}{}:
}
awaitFetchers.Add(1)
go func() {
defer func() { awaitFetchers.Done(); <-fetchers }()
ctxcc, cancel := context.WithTimeout(ctxc, c.clientTimeout)
defer cancel()
entry, err := c.updateImage(ctxcc, upCopy)
if err != nil {
if err, ok := errors.Cause(err).(net.Error); (ok && err.Timeout()) || ctxcc.Err() == context.DeadlineExceeded {
// This was due to a context timeout, don't bother logging
return
}
switch {
case strings.Contains(err.Error(), "429"), strings.Contains(err.Error(), "toomanyrequests"):
// abort the image tags fetching if we've been rate limited
warnAboutRateLimit.Do(func() {
c.logger.Log("warn", "aborting image tag fetching due to rate limiting, will try again later")
cancel()
})
case strings.Contains(err.Error(), "manifest unknown"):
// Registry is corrupted, keep going, this manifest may not be relevant for automatic updates
c.Lock()
manifestUnknownCount++
c.Unlock()
c.logger.Log("warn", fmt.Sprintf("manifest for tag %s missing in repository %s", up.ref.Tag, up.ref.Name),
"impact", "flux will fail to auto-release workloads with matching images, ask the repository administrator to fix the inconsistency")
default:
c.logger.Log("err", err, "ref", up.ref)
}
return
}
c.Lock()
successCount++
if entry.ExcludedReason == "" {
result[upCopy.ref.Tag] = entry.Info
}
c.Unlock()
}()
}
awaitFetchers.Wait()
return result, successCount, manifestUnknownCount
}
func (c *repoCacheManager) updateImage(ctx context.Context, update imageToUpdate) (registry.ImageEntry, error) {
imageID := update.ref
if c.trace {
c.logger.Log("trace", "refreshing manifest", "ref", imageID, "previous_refresh", update.previousRefresh.String())
}
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
// Get the image from the remote
entry, err := c.client.Manifest(ctx, imageID.Tag)
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return registry.ImageEntry{}, c.clientTimeoutError()
}
if _, ok := err.(*image.LabelTimestampFormatError); !ok {
return registry.ImageEntry{}, err
}
c.logger.Log("err", err, "ref", imageID)
}
refresh := update.previousRefresh
reason := ""
switch {
case entry.ExcludedReason != "":
c.logger.Log("excluded", entry.ExcludedReason, "ref", imageID)
refresh = excludedRefresh
reason = "image is excluded"
case update.previousDigest == "":
entry.Info.LastFetched = c.now
refresh = update.previousRefresh
reason = "no prior cache entry for image"
case entry.Info.Digest == update.previousDigest:
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh * 2)
reason = "image digest is same"
default: // i.e., not excluded, but the digests differ -> the tag was moved
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh / 2)
reason = "image digest is different"
}
if c.trace {
c.logger.Log("trace", "caching manifest", "ref", imageID, "last_fetched", c.now.Format(time.RFC3339), "refresh", refresh.String(), "reason", reason)
}
key := NewManifestKey(imageID.CanonicalRef())
// Write back to memcached
val, err := json.Marshal(entry)
if err != nil {
return registry.ImageEntry{}, err
}
err = c.cacheClient.SetKey(key, c.now.Add(refresh), val)
if err != nil {
return registry.ImageEntry{}, err
}
return entry, nil
}
func (r *repoCacheManager) clientTimeoutError() error | {
return fmt.Errorf("client timeout (%s) exceeded", r.clientTimeout)
} | identifier_body |
|
repocachemanager.go | package cache
import (
"context"
"encoding/json"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/fluxcd/flux/pkg/image"
"github.com/fluxcd/flux/pkg/registry"
)
type imageToUpdate struct {
ref image.Ref
previousDigest string
previousRefresh time.Duration
}
// repoCacheManager handles cache operations for a container image repository
type repoCacheManager struct {
now time.Time
repoID image.Name
client registry.Client
clientTimeout time.Duration
burst int
trace bool
logger log.Logger
cacheClient Client
sync.Mutex
}
func newRepoCacheManager(now time.Time,
repoID image.Name, clientFactory registry.ClientFactory, creds registry.Credentials, repoClientTimeout time.Duration,
burst int, trace bool, logger log.Logger, cacheClient Client) (*repoCacheManager, error) {
client, err := clientFactory.ClientFor(repoID.CanonicalName(), creds)
if err != nil {
return nil, err
}
manager := &repoCacheManager{
now: now,
repoID: repoID,
client: client,
clientTimeout: repoClientTimeout,
burst: burst,
trace: trace,
logger: logger,
cacheClient: cacheClient,
}
return manager, nil
}
// fetchRepository fetches the repository from the cache
func (c *repoCacheManager) fetchRepository() (ImageRepository, error) {
var result ImageRepository
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, _, err := c.cacheClient.GetKey(repoKey)
if err != nil {
return ImageRepository{}, err
}
if err = json.Unmarshal(bytes, &result); err != nil {
return ImageRepository{}, err
}
return result, nil
}
// getTags gets the tags from the repository
func (c *repoCacheManager) getTags(ctx context.Context) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
tags, err := c.client.Tags(ctx)
if ctx.Err() == context.DeadlineExceeded {
return nil, c.clientTimeoutError()
}
return tags, err
}
// storeRepository stores the repository from the cache
func (c *repoCacheManager) storeRepository(repo ImageRepository) error {
repoKey := NewRepositoryKey(c.repoID.CanonicalName())
bytes, err := json.Marshal(repo)
if err != nil {
return err
}
return c.cacheClient.SetKey(repoKey, c.now.Add(repoRefresh), bytes)
}
// fetchImagesResult is the result of fetching images from the cache
// invariant: len(imagesToUpdate) == imagesToUpdateRefreshCount + imagesToUpdateMissingCount
type fetchImagesResult struct {
imagesFound map[string]image.Info // images found in the cache
imagesToUpdate []imageToUpdate // images which need to be updated
imagesToUpdateRefreshCount int // number of imagesToUpdate which need updating due to their cache entry expiring
imagesToUpdateMissingCount int // number of imagesToUpdate which need updating due to being missing
}
// fetchImages attempts to fetch the images with the provided tags from the cache.
// It returns the images found, those which require updating and details about
// why they need to be updated.
func (c *repoCacheManager) fetchImages(tags []string) (fetchImagesResult, error) {
images := map[string]image.Info{}
// Create a list of images that need updating
var toUpdate []imageToUpdate
// Counters for reporting what happened
var missing, refresh int
for _, tag := range tags {
if tag == "" {
return fetchImagesResult{}, fmt.Errorf("empty tag in fetched tags")
}
// See if we have the manifest already cached
newID := c.repoID.ToRef(tag)
key := NewManifestKey(newID.CanonicalRef())
bytes, deadline, err := c.cacheClient.GetKey(key)
// If err, then we don't have it yet. Update.
switch {
case err != nil: // by and large these are cache misses, but any error shall count as "not found"
if err != ErrNotCached {
c.logger.Log("warning", "error from cache", "err", err, "ref", newID)
}
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
case len(bytes) == 0:
c.logger.Log("warning", "empty result from cache", "ref", newID)
missing++
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: initialRefresh})
default:
var entry registry.ImageEntry
if err := json.Unmarshal(bytes, &entry); err == nil {
if c.trace {
c.logger.Log("trace", "found cached manifest", "ref", newID, "last_fetched", entry.LastFetched.Format(time.RFC3339), "deadline", deadline.Format(time.RFC3339))
}
if entry.ExcludedReason == "" {
images[tag] = entry.Info
if c.now.After(deadline) {
previousRefresh := minRefresh
lastFetched := entry.Info.LastFetched
if !lastFetched.IsZero() {
previousRefresh = deadline.Sub(lastFetched)
}
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: previousRefresh, previousDigest: entry.Info.Digest})
refresh++
}
} else {
if c.trace {
c.logger.Log("trace", "excluded in cache", "ref", newID, "reason", entry.ExcludedReason)
}
if c.now.After(deadline) {
toUpdate = append(toUpdate, imageToUpdate{ref: newID, previousRefresh: excludedRefresh})
refresh++
}
}
}
}
}
result := fetchImagesResult{
imagesFound: images,
imagesToUpdate: toUpdate,
imagesToUpdateRefreshCount: refresh,
imagesToUpdateMissingCount: missing,
}
return result, nil
}
// updateImages, refreshes the cache entries for the images passed. It may not succeed for all images.
// It returns the values stored in cache, the number of images it succeeded for and the number
// of images whose manifest wasn't found in the registry.
func (c *repoCacheManager) updateImages(ctx context.Context, images []imageToUpdate) (map[string]image.Info, int, int) {
// The upper bound for concurrent fetches against a single host is
// w.Burst, so limit the number of fetching goroutines to that.
fetchers := make(chan struct{}, c.burst)
awaitFetchers := &sync.WaitGroup{}
ctxc, cancel := context.WithCancel(ctx)
defer cancel()
var successCount int
var manifestUnknownCount int
var result = map[string]image.Info{}
var warnAboutRateLimit sync.Once
updates:
for _, up := range images {
// to avoid race condition, when accessing it in the go routine
upCopy := up
select {
case <-ctxc.Done():
break updates
case fetchers <- struct{}{}:
}
awaitFetchers.Add(1)
go func() {
defer func() { awaitFetchers.Done(); <-fetchers }()
ctxcc, cancel := context.WithTimeout(ctxc, c.clientTimeout)
defer cancel()
entry, err := c.updateImage(ctxcc, upCopy)
if err != nil {
if err, ok := errors.Cause(err).(net.Error); (ok && err.Timeout()) || ctxcc.Err() == context.DeadlineExceeded {
// This was due to a context timeout, don't bother logging
return
}
switch {
case strings.Contains(err.Error(), "429"), strings.Contains(err.Error(), "toomanyrequests"):
// abort the image tags fetching if we've been rate limited
warnAboutRateLimit.Do(func() {
c.logger.Log("warn", "aborting image tag fetching due to rate limiting, will try again later")
cancel()
})
case strings.Contains(err.Error(), "manifest unknown"):
// Registry is corrupted, keep going, this manifest may not be relevant for automatic updates
c.Lock()
manifestUnknownCount++
c.Unlock()
c.logger.Log("warn", fmt.Sprintf("manifest for tag %s missing in repository %s", up.ref.Tag, up.ref.Name),
"impact", "flux will fail to auto-release workloads with matching images, ask the repository administrator to fix the inconsistency")
default:
c.logger.Log("err", err, "ref", up.ref)
}
return
}
c.Lock()
successCount++
if entry.ExcludedReason == "" {
result[upCopy.ref.Tag] = entry.Info
}
c.Unlock()
}()
}
awaitFetchers.Wait()
return result, successCount, manifestUnknownCount
}
func (c *repoCacheManager) updateImage(ctx context.Context, update imageToUpdate) (registry.ImageEntry, error) {
imageID := update.ref
if c.trace {
c.logger.Log("trace", "refreshing manifest", "ref", imageID, "previous_refresh", update.previousRefresh.String())
}
ctx, cancel := context.WithTimeout(ctx, c.clientTimeout)
defer cancel()
// Get the image from the remote
entry, err := c.client.Manifest(ctx, imageID.Tag)
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return registry.ImageEntry{}, c.clientTimeoutError()
}
if _, ok := err.(*image.LabelTimestampFormatError); !ok {
return registry.ImageEntry{}, err
}
c.logger.Log("err", err, "ref", imageID)
}
refresh := update.previousRefresh
reason := ""
switch {
case entry.ExcludedReason != "":
c.logger.Log("excluded", entry.ExcludedReason, "ref", imageID)
refresh = excludedRefresh
reason = "image is excluded"
case update.previousDigest == "":
entry.Info.LastFetched = c.now
refresh = update.previousRefresh
reason = "no prior cache entry for image"
case entry.Info.Digest == update.previousDigest:
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh * 2)
reason = "image digest is same"
default: // i.e., not excluded, but the digests differ -> the tag was moved
entry.Info.LastFetched = c.now
refresh = clipRefresh(refresh / 2)
reason = "image digest is different"
}
if c.trace {
c.logger.Log("trace", "caching manifest", "ref", imageID, "last_fetched", c.now.Format(time.RFC3339), "refresh", refresh.String(), "reason", reason)
}
key := NewManifestKey(imageID.CanonicalRef())
// Write back to memcached
val, err := json.Marshal(entry)
if err != nil {
return registry.ImageEntry{}, err
}
err = c.cacheClient.SetKey(key, c.now.Add(refresh), val)
if err != nil |
return entry, nil
}
func (r *repoCacheManager) clientTimeoutError() error {
return fmt.Errorf("client timeout (%s) exceeded", r.clientTimeout)
}
| {
return registry.ImageEntry{}, err
} | conditional_block |
sdss_sqldata.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 14 14:12:47 2021
@author: sonic
"""
#%%
import copy
import os, glob
import numpy as np
import pandas as pd
from astropy.io import ascii
import matplotlib.pyplot as plt
from ankepy.phot import anketool as atl # anke package
from astropy.cosmology import WMAP9 as cosmo
from astropy.table import Table, vstack, hstack
#%% working directories
# pathes
path_base = '/home/sonic/research/sed_modeling/'
path_csv = '/home/sonic/research/sed_modeling/csv/'
path_lib = '/home/sonic/research/sed_modeling/lib/'
path_input = '/home/sonic/research/sed_modeling/input/'
path_output = '/home/sonic/research/sed_modeling/output/'
path_result = '/home/sonic/research/sed_modeling/result/'
#%% sql data load
os.chdir(path_base)
os.chdir(path_csv)
# mode = 'spec'
mode = 'phot'
sdssdate = 'sdssdr12_{}'.format(mode)
sql = ascii.read('{}.csv'.format(sdssdate))
os.chdir(path_input)
try:
os.system('mkdir {}'.format(sdssdate))
except:
pass
os.chdir(path_csv)
if mode == 'spec':
sqlc = sql.colnames
sql.rename_column(sqlc[0], '#id')
sql.rename_column(sqlc[1], 'z_spec')
sql.rename_column(sqlc[2], 'f_SDSS_u')
sql.rename_column(sqlc[3], 'e_SDSS_u')
sql.rename_column(sqlc[4], 'f_SDSS_g')
sql.rename_column(sqlc[5], 'e_SDSS_g')
sql.rename_column(sqlc[6], 'f_SDSS_r')
sql.rename_column(sqlc[7], 'e_SDSS_r')
sql.rename_column(sqlc[8], 'f_SDSS_i')
sql.rename_column(sqlc[9], 'e_SDSS_i')
sql.rename_column(sqlc[10], 'f_SDSS_z')
sql.rename_column(sqlc[11], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
else:
sqlc = sql.colnames
sql.rename_column(sqlc[0], '#id')
sql.rename_column(sqlc[2-1], 'f_SDSS_u')
sql.rename_column(sqlc[3-1], 'e_SDSS_u')
sql.rename_column(sqlc[4-1], 'f_SDSS_g')
sql.rename_column(sqlc[5-1], 'e_SDSS_g')
sql.rename_column(sqlc[6-1], 'f_SDSS_r')
sql.rename_column(sqlc[7-1], 'e_SDSS_r')
sql.rename_column(sqlc[8-1], 'f_SDSS_i')
sql.rename_column(sqlc[9-1], 'e_SDSS_i')
sql.rename_column(sqlc[10-1], 'f_SDSS_z')
sql.rename_column(sqlc[11-1], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
#%% FAST input
os.chdir(path_lib)
default = 'hdfn_fs99'
ftrinfo = open('FILTER.RES.latest.info', 'r').readlines() # https://github.com/gbrammer/eazy-photoz
translate = ascii.read('translate.cat') # write down manually
filters = [f for f in sqlc if f.startswith('f_')]
ftrtbl = Table()
for f in filters:
if f not in translate['filter']:
print("Warning: Filter name '{}' is not defined in your translate file.".format(f))
else:
linenum = int(translate[translate['filter']==f]['lines'][0][1:])
lambda_c = float(ftrinfo[linenum-1].split('lambda_c= ')[-1].split(' ')[0])
dummy = Table([[f], [lambda_c]], names=['filter', 'lambda_c'])
ftrtbl = vstack([ftrtbl, dummy])
# catalog file
os.chdir('{}/{}'.format(path_input, sdssdate))
sqlcat = copy.deepcopy(sql)
if mode == 'spec':
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j==1:
sqlcat[i][j] = sql[i][j]
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
else:
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
sqlcat.write('{}.cat'.format(sdssdate), format='ascii', delimiter='\t', overwrite=True)
# parameter file
with open('{}/{}.param'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.param'.format(sdssdate), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
|
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
elif line.startswith('RESOLUTION'):
p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
# translate file
with open('{}/{}.translate'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.translate'.format(sdssdate), 'w') as t:
for line in contents:
t.write(line)
f.close()
t.close()
#%% FAST output
def absolmag(m, z):
dist = np.array(cosmo.luminosity_distance(z))
return m - 5*np.log10(dist) - 25
# input data = sql
os.chdir(path_output+sdssdate)
if mode == 'spec':
specin = copy.deepcopy(sql)
specfout = ascii.read('{}.fout'.format(sdssdate), header_start=16)
passive = specfout[10**specfout['lssfr']*1e9 < np.array(1/(3*cosmo.age(specfout['z'])))]
passive_r = []
passive_z = []
for i in range(len(passive)):
j = passive['id'][i]
passive_r.append(specin['f_SDSS_r'][j-1])
passive_z.append(specin['z_spec'][j-1])
passive_R = absolmag(passive_r, passive_z)
passive['rmag'] = passive_r
passive['z_spec'] = passive_z
passive['rmag(ab)'] = passive_R
else:
photin = copy.deepcopy(sql)
photzout = ascii.read('{}.zout'.format(sdssdate))
photfout = ascii.read('{}.fout'.format(sdssdate), header_start=17)
phot_R = absolmag(photin['f_SDSS_r'], photzout['z_m1'])
photfout['z_phot'] = photzout['z_m1']
# photfout['z_phot'] = photzout[:37578]['z_m1']
photfout['rmag(ab)']= phot_R#[:37578]
#%% volume limited sample
vlspec = passive[passive['z_spec']>0.03]; vlspec = vlspec[vlspec['z_spec']<0.05]; vlspec = vlspec[vlspec['rmag(ab)']<-19]
vlphot = photfout[photfout['z_phot']>0.06]; vlphot = vlphot[vlphot['z_phot']<0.12]; vlphot = vlphot[vlphot['rmag(ab)']<-17]
#%% priority
g1 = 0.701
g2 = 0.356
g3 = 0.411
g4 = -1.968 # Artale+19, BNS, z=0.1
vlspec['prior'] = g1*vlspec['lmass']+g2*vlspec['lsfr']+g3*vlspec['metal']+g4
vlphot['prior'] = g1*vlphot['lmass']+g2*vlphot['lsfr']+g3*vlphot['metal']+g4
vlphot.sort('prior')
vlspec.sort('prior')
#%%
def Mr2logM(Mr):
gr = 0.88
return -0.39*Mr+1.05*gr+1.60
# Define a closure function to register as a callback
def convert(ax_Mr):
"""
Update second axis according with first axis.
"""
y1, y2 = ax_Mr.get_ylim()
ax_lM.set_ylim(Mr2logM(y1), Mr2logM(y2))
ax_lM.figure.canvas.draw()
# plt.rcParams.update({'font.size': 14})
fig, axs = plt.subplots(2, 1, figsize=(9,12))
plt.rcParams.update({'font.size': 18})
ax_Mr = axs[1]
ax_lM = ax_Mr.twinx()
# ax_Mr.set_title('SDSS DR12 Galaxies Distribution')
# automatically update ylim of ax2 when ylim of ax1 changes.
ax_Mr.callbacks.connect("ylim_changed", convert)
ax_Mr.scatter(specin['z_spec'], absolmag(specin['f_SDSS_r'], specin['z_spec']), c='grey', alpha=0.2, label='Galaxies (Spectroscopy)')# (n={})'.format(len(specin)))
ax_Mr.scatter(passive_z, absolmag(passive_r, passive_z), c='crimson', alpha=0.2, label='Passive Galaxies')# (n={})'.format(len(passive)))
# ax_Mr.hlines(-21, 0.07, 0.13, linestyle='--')#, label='Volume Limited Samples')
# ax_Mr.vlines(0.07, -21, -50, linestyle='--')
# ax_Mr.vlines(0.13, -21, -50, linestyle='--')
ax_Mr.hlines(-19, 0.03, 0.05, linestyle='--', label='Volume Limited Samples')
ax_Mr.vlines(0.03, -19, -50, linestyle='--')
ax_Mr.vlines(0.05, -19, -50, linestyle='--')
ax_Mr.set_xlim(0.0075, 0.20)
ax_Mr.set_ylim(-15, -24)
ax_Mr.legend(loc='lower right')
# ax_Mr.set_title('Spectroscopically Confirmed')
ax_Mr.set_ylabel('$M_r$')
ax_Mr.set_xlabel('Spectroscopic redshift')
ax_lM.tick_params(labelsize=12)
ax_Mr.tick_params(labelsize=12)
ax_lM.set_ylabel(r'log($M/M_{\odot}$)')
ax_P = axs[0]
s = ax_P.scatter(photzout[:37578]['z_m1'], absolmag(photin[:37578]['f_SDSS_r'], photzout[:37578]['z_m1']), c=photfout['lmass'], cmap='jet', alpha=0.3, vmin=7, vmax=12, label='Galaxies (Photometry)')
ax_P.hlines(-17.0, 0.06, 0.17, linestyle='--', label='Volume Limited Samples')
ax_P.vlines(0.06, -17.0, -50, linestyle='--')
ax_P.vlines(0.17, -17.0, -50, linestyle='--')
ax_P.set_xlim(0.015, 0.40)
ax_P.set_ylim(-15, -24)
# ax_P.set_title('Photometric Data Only')
caxes = fig.add_axes([0.891, 0.559, 0.01, 0.413])
fig.colorbar(s, ax=ax_P, label=r'log($M/M_{\odot}$)', orientation='vertical', cax=caxes)
ax_P.legend(loc='lower right')
ax_P.set_ylabel('$M_r$')
ax_P.set_xlabel('Photometric redshift')
ax_P.tick_params(labelsize=12)
plt.tight_layout()
plt.savefig('{}hogwarts_0728/{}_VLsets.png'.format(path_result, sdssdate), overwrite=True)
plt.close()
#%%
#%%
fcat = ascii.read('sdssdr12_0502.cat')
zspec = fcat['spec_z'][0]
flxinfo = fcat
flx = []
flxerr = []
wl = []
ftr = []
# when only upper limit is available
flxupp = []
wlupp = []
ftrupp = []
for j in range(len(flxinfo.colnames)):
col = flxinfo.colnames[j]
try: nextcol = flxinfo.colnames[j+1]
except: pass
if flxinfo[col][0] == -99:
pass
elif col[0] == 'f':
if flxinfo[col][0] > 1e-50:
wavelength = ftrtbl[ftrtbl['filter']==col]['lambda_c'][0]
flx.append(atl.flux_f2w(flxinfo[col][0], wavelength))
flxerr.append(atl.flux_f2w(flxinfo[nextcol][0], wavelength))
wl.append(wavelength)
ftr.append(col[2:])
else:
wavelength = ftrtbl[ftrtbl['filter']==col]['lambda_c'][0]
flxupp.append(atl.flux_f2w(flxinfo[nextcol][0], wavelength))
wlupp.append(wavelength)
ftrupp.append(col[2:])
else:
pass
if target not in sus:
plt.errorbar(wl, flx, yerr=flxerr, ms=3, marker='s', ls='', c='dodgerblue', capsize=2, capthick=1)
plt.scatter(wlupp, flxupp, marker='v', c='dodgerblue', s=40)
else:
plt.errorbar(wl, flx, yerr=flxerr, ms=3, marker='s', ls='', c='crimson', capsize=2, capthick=1)
plt.scatter(wlupp, flxupp, marker='v', c='crimson', s=40)
try:
model = ascii.read('sdssdr12_0502_1.fit') # separate fitting
plt.plot(model['col1'], model['col2'], c='grey', alpha=0.7, zorder=0)
except:
print('There are no SED fitting result in the output directory.')
# plt.text(1.7e4, max(flx)*1.3, '{:11} (z={:.3f})'.format(target, zspec), fontsize=12)
# if i <= 15:
# plt.tick_params(axis='x', labelbottom=False)
plt.xlim(000, 50000)
plt.ylim(000, max(flx)*1.5)
#%%
os.chdir(path_output+'sdssdr12_spec')
a = ascii.read('sdssdr12_0502.fout')#, header_start=14)
os.chdir(path_csv)
b = ascii.read('sdssdr12_0502.csv')
c = ascii.read('sdssdr12_spec.zout')
#%%
plt.figure(figsize=(7,7))
plt.scatter(c['z_m1'], c['z_spec'], alpha = 0.1, c='dodgerblue')
plt.plot(np.arange(0,1,0.001), np.arange(0,1,0.001))
plt.xlim(0, 1.05)
plt.ylim(0, 1.05)
plt.title('phot-z test')
plt.xlabel('phot-z')
plt.ylabel('spec-z')
plt.grid(alpha=0.5)
plt.show()
#%% spliter
os.chdir(path_input+sdssdate)
sqlcat = ascii.read('{}.cat'.format(sdssdate))
sqlcat.rename_column('id', '#id')
sqlcat['#id'] = sqlcat['#id'].astype(str)
targets = sqlcat['#id']
core = 5
# dividing targets
for i in range(core):
s = int(len(sqlcat)/core * i)
e = int(len(sqlcat)/core * (i+1))
sqlcat[s:e].write('multicore/{}_{}.cat'.format(sdssdate, i), format='ascii', delimiter='\t', overwrite=True)
with open('{}.param'.format(sdssdate), 'r') as f:
contents = f.readlines()
with open('multicore/{}_{}.param'.format(sdssdate, i), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
p.write(line.replace(sdssdate, '{}_{}'.format(sdssdate, i)))
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
# elif line.startswith('RESOLUTION'):
# p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
os.system('cp {}.translate multicore/{}_{}.translate'.format(sdssdate, sdssdate, i))
| p.write(line.replace(default, sdssdate)) | conditional_block |
sdss_sqldata.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 14 14:12:47 2021
@author: sonic
"""
#%%
import copy
import os, glob
import numpy as np
import pandas as pd
from astropy.io import ascii
import matplotlib.pyplot as plt
from ankepy.phot import anketool as atl # anke package
from astropy.cosmology import WMAP9 as cosmo
from astropy.table import Table, vstack, hstack
#%% working directories
# pathes
path_base = '/home/sonic/research/sed_modeling/'
path_csv = '/home/sonic/research/sed_modeling/csv/'
path_lib = '/home/sonic/research/sed_modeling/lib/'
path_input = '/home/sonic/research/sed_modeling/input/'
path_output = '/home/sonic/research/sed_modeling/output/'
path_result = '/home/sonic/research/sed_modeling/result/'
#%% sql data load
os.chdir(path_base)
os.chdir(path_csv)
# mode = 'spec'
mode = 'phot'
sdssdate = 'sdssdr12_{}'.format(mode)
sql = ascii.read('{}.csv'.format(sdssdate))
os.chdir(path_input)
try:
os.system('mkdir {}'.format(sdssdate))
except:
pass
os.chdir(path_csv)
if mode == 'spec':
sqlc = sql.colnames
sql.rename_column(sqlc[0], '#id')
sql.rename_column(sqlc[1], 'z_spec')
sql.rename_column(sqlc[2], 'f_SDSS_u')
sql.rename_column(sqlc[3], 'e_SDSS_u')
sql.rename_column(sqlc[4], 'f_SDSS_g')
sql.rename_column(sqlc[5], 'e_SDSS_g')
sql.rename_column(sqlc[6], 'f_SDSS_r')
sql.rename_column(sqlc[7], 'e_SDSS_r')
sql.rename_column(sqlc[8], 'f_SDSS_i')
sql.rename_column(sqlc[9], 'e_SDSS_i')
sql.rename_column(sqlc[10], 'f_SDSS_z')
sql.rename_column(sqlc[11], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
else:
sqlc = sql.colnames
sql.rename_column(sqlc[0], '#id')
sql.rename_column(sqlc[2-1], 'f_SDSS_u')
sql.rename_column(sqlc[3-1], 'e_SDSS_u')
sql.rename_column(sqlc[4-1], 'f_SDSS_g')
sql.rename_column(sqlc[5-1], 'e_SDSS_g')
sql.rename_column(sqlc[6-1], 'f_SDSS_r')
sql.rename_column(sqlc[7-1], 'e_SDSS_r')
sql.rename_column(sqlc[8-1], 'f_SDSS_i')
sql.rename_column(sqlc[9-1], 'e_SDSS_i')
sql.rename_column(sqlc[10-1], 'f_SDSS_z')
sql.rename_column(sqlc[11-1], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
#%% FAST input
os.chdir(path_lib)
default = 'hdfn_fs99'
ftrinfo = open('FILTER.RES.latest.info', 'r').readlines() # https://github.com/gbrammer/eazy-photoz
translate = ascii.read('translate.cat') # write down manually
filters = [f for f in sqlc if f.startswith('f_')]
ftrtbl = Table()
for f in filters:
if f not in translate['filter']:
print("Warning: Filter name '{}' is not defined in your translate file.".format(f))
else:
linenum = int(translate[translate['filter']==f]['lines'][0][1:])
lambda_c = float(ftrinfo[linenum-1].split('lambda_c= ')[-1].split(' ')[0])
dummy = Table([[f], [lambda_c]], names=['filter', 'lambda_c'])
ftrtbl = vstack([ftrtbl, dummy])
# catalog file
os.chdir('{}/{}'.format(path_input, sdssdate))
sqlcat = copy.deepcopy(sql)
if mode == 'spec':
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j==1:
sqlcat[i][j] = sql[i][j]
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
else:
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
sqlcat.write('{}.cat'.format(sdssdate), format='ascii', delimiter='\t', overwrite=True)
# parameter file
with open('{}/{}.param'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.param'.format(sdssdate), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
p.write(line.replace(default, sdssdate))
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
elif line.startswith('RESOLUTION'):
p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
# translate file
with open('{}/{}.translate'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.translate'.format(sdssdate), 'w') as t:
for line in contents:
t.write(line)
f.close()
t.close()
#%% FAST output
def absolmag(m, z):
dist = np.array(cosmo.luminosity_distance(z))
return m - 5*np.log10(dist) - 25
# input data = sql
os.chdir(path_output+sdssdate)
if mode == 'spec':
specin = copy.deepcopy(sql)
specfout = ascii.read('{}.fout'.format(sdssdate), header_start=16)
passive = specfout[10**specfout['lssfr']*1e9 < np.array(1/(3*cosmo.age(specfout['z'])))]
passive_r = []
passive_z = []
for i in range(len(passive)):
j = passive['id'][i]
passive_r.append(specin['f_SDSS_r'][j-1])
passive_z.append(specin['z_spec'][j-1])
passive_R = absolmag(passive_r, passive_z)
passive['rmag'] = passive_r
passive['z_spec'] = passive_z
passive['rmag(ab)'] = passive_R
else:
photin = copy.deepcopy(sql)
photzout = ascii.read('{}.zout'.format(sdssdate))
photfout = ascii.read('{}.fout'.format(sdssdate), header_start=17)
phot_R = absolmag(photin['f_SDSS_r'], photzout['z_m1'])
photfout['z_phot'] = photzout['z_m1']
# photfout['z_phot'] = photzout[:37578]['z_m1']
photfout['rmag(ab)']= phot_R#[:37578]
#%% volume limited sample
vlspec = passive[passive['z_spec']>0.03]; vlspec = vlspec[vlspec['z_spec']<0.05]; vlspec = vlspec[vlspec['rmag(ab)']<-19]
vlphot = photfout[photfout['z_phot']>0.06]; vlphot = vlphot[vlphot['z_phot']<0.12]; vlphot = vlphot[vlphot['rmag(ab)']<-17]
#%% priority
g1 = 0.701
g2 = 0.356
g3 = 0.411
g4 = -1.968 # Artale+19, BNS, z=0.1
vlspec['prior'] = g1*vlspec['lmass']+g2*vlspec['lsfr']+g3*vlspec['metal']+g4
vlphot['prior'] = g1*vlphot['lmass']+g2*vlphot['lsfr']+g3*vlphot['metal']+g4
vlphot.sort('prior')
vlspec.sort('prior')
#%%
def Mr2logM(Mr):
gr = 0.88
return -0.39*Mr+1.05*gr+1.60
# Define a closure function to register as a callback
def convert(ax_Mr):
|
# plt.rcParams.update({'font.size': 14})
fig, axs = plt.subplots(2, 1, figsize=(9,12))
plt.rcParams.update({'font.size': 18})
ax_Mr = axs[1]
ax_lM = ax_Mr.twinx()
# ax_Mr.set_title('SDSS DR12 Galaxies Distribution')
# automatically update ylim of ax2 when ylim of ax1 changes.
ax_Mr.callbacks.connect("ylim_changed", convert)
ax_Mr.scatter(specin['z_spec'], absolmag(specin['f_SDSS_r'], specin['z_spec']), c='grey', alpha=0.2, label='Galaxies (Spectroscopy)')# (n={})'.format(len(specin)))
ax_Mr.scatter(passive_z, absolmag(passive_r, passive_z), c='crimson', alpha=0.2, label='Passive Galaxies')# (n={})'.format(len(passive)))
# ax_Mr.hlines(-21, 0.07, 0.13, linestyle='--')#, label='Volume Limited Samples')
# ax_Mr.vlines(0.07, -21, -50, linestyle='--')
# ax_Mr.vlines(0.13, -21, -50, linestyle='--')
ax_Mr.hlines(-19, 0.03, 0.05, linestyle='--', label='Volume Limited Samples')
ax_Mr.vlines(0.03, -19, -50, linestyle='--')
ax_Mr.vlines(0.05, -19, -50, linestyle='--')
ax_Mr.set_xlim(0.0075, 0.20)
ax_Mr.set_ylim(-15, -24)
ax_Mr.legend(loc='lower right')
# ax_Mr.set_title('Spectroscopically Confirmed')
ax_Mr.set_ylabel('$M_r$')
ax_Mr.set_xlabel('Spectroscopic redshift')
ax_lM.tick_params(labelsize=12)
ax_Mr.tick_params(labelsize=12)
ax_lM.set_ylabel(r'log($M/M_{\odot}$)')
ax_P = axs[0]
s = ax_P.scatter(photzout[:37578]['z_m1'], absolmag(photin[:37578]['f_SDSS_r'], photzout[:37578]['z_m1']), c=photfout['lmass'], cmap='jet', alpha=0.3, vmin=7, vmax=12, label='Galaxies (Photometry)')
ax_P.hlines(-17.0, 0.06, 0.17, linestyle='--', label='Volume Limited Samples')
ax_P.vlines(0.06, -17.0, -50, linestyle='--')
ax_P.vlines(0.17, -17.0, -50, linestyle='--')
ax_P.set_xlim(0.015, 0.40)
ax_P.set_ylim(-15, -24)
# ax_P.set_title('Photometric Data Only')
caxes = fig.add_axes([0.891, 0.559, 0.01, 0.413])
fig.colorbar(s, ax=ax_P, label=r'log($M/M_{\odot}$)', orientation='vertical', cax=caxes)
ax_P.legend(loc='lower right')
ax_P.set_ylabel('$M_r$')
ax_P.set_xlabel('Photometric redshift')
ax_P.tick_params(labelsize=12)
plt.tight_layout()
plt.savefig('{}hogwarts_0728/{}_VLsets.png'.format(path_result, sdssdate), overwrite=True)
plt.close()
#%%
#%%
fcat = ascii.read('sdssdr12_0502.cat')
zspec = fcat['spec_z'][0]
flxinfo = fcat
flx = []
flxerr = []
wl = []
ftr = []
# when only upper limit is available
flxupp = []
wlupp = []
ftrupp = []
for j in range(len(flxinfo.colnames)):
col = flxinfo.colnames[j]
try: nextcol = flxinfo.colnames[j+1]
except: pass
if flxinfo[col][0] == -99:
pass
elif col[0] == 'f':
if flxinfo[col][0] > 1e-50:
wavelength = ftrtbl[ftrtbl['filter']==col]['lambda_c'][0]
flx.append(atl.flux_f2w(flxinfo[col][0], wavelength))
flxerr.append(atl.flux_f2w(flxinfo[nextcol][0], wavelength))
wl.append(wavelength)
ftr.append(col[2:])
else:
wavelength = ftrtbl[ftrtbl['filter']==col]['lambda_c'][0]
flxupp.append(atl.flux_f2w(flxinfo[nextcol][0], wavelength))
wlupp.append(wavelength)
ftrupp.append(col[2:])
else:
pass
if target not in sus:
plt.errorbar(wl, flx, yerr=flxerr, ms=3, marker='s', ls='', c='dodgerblue', capsize=2, capthick=1)
plt.scatter(wlupp, flxupp, marker='v', c='dodgerblue', s=40)
else:
plt.errorbar(wl, flx, yerr=flxerr, ms=3, marker='s', ls='', c='crimson', capsize=2, capthick=1)
plt.scatter(wlupp, flxupp, marker='v', c='crimson', s=40)
try:
model = ascii.read('sdssdr12_0502_1.fit') # separate fitting
plt.plot(model['col1'], model['col2'], c='grey', alpha=0.7, zorder=0)
except:
print('There are no SED fitting result in the output directory.')
# plt.text(1.7e4, max(flx)*1.3, '{:11} (z={:.3f})'.format(target, zspec), fontsize=12)
# if i <= 15:
# plt.tick_params(axis='x', labelbottom=False)
plt.xlim(000, 50000)
plt.ylim(000, max(flx)*1.5)
#%%
os.chdir(path_output+'sdssdr12_spec')
a = ascii.read('sdssdr12_0502.fout')#, header_start=14)
os.chdir(path_csv)
b = ascii.read('sdssdr12_0502.csv')
c = ascii.read('sdssdr12_spec.zout')
#%%
plt.figure(figsize=(7,7))
plt.scatter(c['z_m1'], c['z_spec'], alpha = 0.1, c='dodgerblue')
plt.plot(np.arange(0,1,0.001), np.arange(0,1,0.001))
plt.xlim(0, 1.05)
plt.ylim(0, 1.05)
plt.title('phot-z test')
plt.xlabel('phot-z')
plt.ylabel('spec-z')
plt.grid(alpha=0.5)
plt.show()
#%% spliter
os.chdir(path_input+sdssdate)
sqlcat = ascii.read('{}.cat'.format(sdssdate))
sqlcat.rename_column('id', '#id')
sqlcat['#id'] = sqlcat['#id'].astype(str)
targets = sqlcat['#id']
core = 5
# dividing targets
for i in range(core):
s = int(len(sqlcat)/core * i)
e = int(len(sqlcat)/core * (i+1))
sqlcat[s:e].write('multicore/{}_{}.cat'.format(sdssdate, i), format='ascii', delimiter='\t', overwrite=True)
with open('{}.param'.format(sdssdate), 'r') as f:
contents = f.readlines()
with open('multicore/{}_{}.param'.format(sdssdate, i), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
p.write(line.replace(sdssdate, '{}_{}'.format(sdssdate, i)))
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
# elif line.startswith('RESOLUTION'):
# p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
os.system('cp {}.translate multicore/{}_{}.translate'.format(sdssdate, sdssdate, i))
| """
Update second axis according with first axis.
"""
y1, y2 = ax_Mr.get_ylim()
ax_lM.set_ylim(Mr2logM(y1), Mr2logM(y2))
ax_lM.figure.canvas.draw() | identifier_body |
sdss_sqldata.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 14 14:12:47 2021
@author: sonic
"""
#%%
import copy
import os, glob
import numpy as np
import pandas as pd
from astropy.io import ascii
import matplotlib.pyplot as plt
from ankepy.phot import anketool as atl # anke package
from astropy.cosmology import WMAP9 as cosmo
from astropy.table import Table, vstack, hstack
#%% working directories
# pathes
path_base = '/home/sonic/research/sed_modeling/'
path_csv = '/home/sonic/research/sed_modeling/csv/'
path_lib = '/home/sonic/research/sed_modeling/lib/'
path_input = '/home/sonic/research/sed_modeling/input/'
path_output = '/home/sonic/research/sed_modeling/output/'
path_result = '/home/sonic/research/sed_modeling/result/'
#%% sql data load
os.chdir(path_base)
os.chdir(path_csv)
# mode = 'spec'
mode = 'phot'
sdssdate = 'sdssdr12_{}'.format(mode)
sql = ascii.read('{}.csv'.format(sdssdate))
os.chdir(path_input)
try:
os.system('mkdir {}'.format(sdssdate))
except:
pass
os.chdir(path_csv)
if mode == 'spec':
sqlc = sql.colnames
sql.rename_column(sqlc[0], '#id')
sql.rename_column(sqlc[1], 'z_spec')
sql.rename_column(sqlc[2], 'f_SDSS_u')
sql.rename_column(sqlc[3], 'e_SDSS_u')
sql.rename_column(sqlc[4], 'f_SDSS_g')
sql.rename_column(sqlc[5], 'e_SDSS_g')
sql.rename_column(sqlc[6], 'f_SDSS_r')
sql.rename_column(sqlc[7], 'e_SDSS_r')
sql.rename_column(sqlc[8], 'f_SDSS_i')
sql.rename_column(sqlc[9], 'e_SDSS_i')
sql.rename_column(sqlc[10], 'f_SDSS_z')
sql.rename_column(sqlc[11], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
else:
sqlc = sql.colnames
sql.rename_column(sqlc[0], '#id')
sql.rename_column(sqlc[2-1], 'f_SDSS_u')
sql.rename_column(sqlc[3-1], 'e_SDSS_u')
sql.rename_column(sqlc[4-1], 'f_SDSS_g')
sql.rename_column(sqlc[5-1], 'e_SDSS_g')
sql.rename_column(sqlc[6-1], 'f_SDSS_r')
sql.rename_column(sqlc[7-1], 'e_SDSS_r')
sql.rename_column(sqlc[8-1], 'f_SDSS_i')
sql.rename_column(sqlc[9-1], 'e_SDSS_i')
sql.rename_column(sqlc[10-1], 'f_SDSS_z')
sql.rename_column(sqlc[11-1], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
#%% FAST input
os.chdir(path_lib)
default = 'hdfn_fs99'
ftrinfo = open('FILTER.RES.latest.info', 'r').readlines() # https://github.com/gbrammer/eazy-photoz
translate = ascii.read('translate.cat') # write down manually
filters = [f for f in sqlc if f.startswith('f_')]
ftrtbl = Table()
for f in filters:
if f not in translate['filter']:
print("Warning: Filter name '{}' is not defined in your translate file.".format(f))
else:
linenum = int(translate[translate['filter']==f]['lines'][0][1:])
lambda_c = float(ftrinfo[linenum-1].split('lambda_c= ')[-1].split(' ')[0])
dummy = Table([[f], [lambda_c]], names=['filter', 'lambda_c'])
ftrtbl = vstack([ftrtbl, dummy])
# catalog file
os.chdir('{}/{}'.format(path_input, sdssdate))
sqlcat = copy.deepcopy(sql)
if mode == 'spec':
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j==1:
sqlcat[i][j] = sql[i][j]
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
else:
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
sqlcat.write('{}.cat'.format(sdssdate), format='ascii', delimiter='\t', overwrite=True)
# parameter file
with open('{}/{}.param'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.param'.format(sdssdate), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
p.write(line.replace(default, sdssdate))
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
elif line.startswith('RESOLUTION'):
p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
# translate file
with open('{}/{}.translate'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.translate'.format(sdssdate), 'w') as t:
for line in contents:
t.write(line)
f.close()
t.close()
#%% FAST output
def absolmag(m, z):
dist = np.array(cosmo.luminosity_distance(z))
return m - 5*np.log10(dist) - 25
# input data = sql
os.chdir(path_output+sdssdate)
if mode == 'spec':
specin = copy.deepcopy(sql)
specfout = ascii.read('{}.fout'.format(sdssdate), header_start=16)
passive = specfout[10**specfout['lssfr']*1e9 < np.array(1/(3*cosmo.age(specfout['z'])))]
passive_r = []
passive_z = []
for i in range(len(passive)):
j = passive['id'][i]
passive_r.append(specin['f_SDSS_r'][j-1])
passive_z.append(specin['z_spec'][j-1])
passive_R = absolmag(passive_r, passive_z)
passive['rmag'] = passive_r
passive['z_spec'] = passive_z
passive['rmag(ab)'] = passive_R
else:
photin = copy.deepcopy(sql)
photzout = ascii.read('{}.zout'.format(sdssdate))
photfout = ascii.read('{}.fout'.format(sdssdate), header_start=17)
phot_R = absolmag(photin['f_SDSS_r'], photzout['z_m1'])
photfout['z_phot'] = photzout['z_m1']
# photfout['z_phot'] = photzout[:37578]['z_m1']
photfout['rmag(ab)']= phot_R#[:37578]
#%% volume limited sample
vlspec = passive[passive['z_spec']>0.03]; vlspec = vlspec[vlspec['z_spec']<0.05]; vlspec = vlspec[vlspec['rmag(ab)']<-19]
vlphot = photfout[photfout['z_phot']>0.06]; vlphot = vlphot[vlphot['z_phot']<0.12]; vlphot = vlphot[vlphot['rmag(ab)']<-17]
#%% priority
g1 = 0.701
g2 = 0.356
g3 = 0.411
g4 = -1.968 # Artale+19, BNS, z=0.1
vlspec['prior'] = g1*vlspec['lmass']+g2*vlspec['lsfr']+g3*vlspec['metal']+g4
vlphot['prior'] = g1*vlphot['lmass']+g2*vlphot['lsfr']+g3*vlphot['metal']+g4
vlphot.sort('prior')
vlspec.sort('prior')
#%%
def Mr2logM(Mr):
gr = 0.88
return -0.39*Mr+1.05*gr+1.60
# Define a closure function to register as a callback
def convert(ax_Mr):
"""
Update second axis according with first axis.
"""
y1, y2 = ax_Mr.get_ylim()
ax_lM.set_ylim(Mr2logM(y1), Mr2logM(y2))
ax_lM.figure.canvas.draw()
# plt.rcParams.update({'font.size': 14})
fig, axs = plt.subplots(2, 1, figsize=(9,12))
plt.rcParams.update({'font.size': 18})
ax_Mr = axs[1]
ax_lM = ax_Mr.twinx()
# ax_Mr.set_title('SDSS DR12 Galaxies Distribution')
# automatically update ylim of ax2 when ylim of ax1 changes.
ax_Mr.callbacks.connect("ylim_changed", convert)
ax_Mr.scatter(specin['z_spec'], absolmag(specin['f_SDSS_r'], specin['z_spec']), c='grey', alpha=0.2, label='Galaxies (Spectroscopy)')# (n={})'.format(len(specin)))
ax_Mr.scatter(passive_z, absolmag(passive_r, passive_z), c='crimson', alpha=0.2, label='Passive Galaxies')# (n={})'.format(len(passive)))
# ax_Mr.hlines(-21, 0.07, 0.13, linestyle='--')#, label='Volume Limited Samples')
# ax_Mr.vlines(0.07, -21, -50, linestyle='--')
# ax_Mr.vlines(0.13, -21, -50, linestyle='--')
ax_Mr.hlines(-19, 0.03, 0.05, linestyle='--', label='Volume Limited Samples')
ax_Mr.vlines(0.03, -19, -50, linestyle='--')
ax_Mr.vlines(0.05, -19, -50, linestyle='--') | ax_Mr.set_xlabel('Spectroscopic redshift')
ax_lM.tick_params(labelsize=12)
ax_Mr.tick_params(labelsize=12)
ax_lM.set_ylabel(r'log($M/M_{\odot}$)')
ax_P = axs[0]
s = ax_P.scatter(photzout[:37578]['z_m1'], absolmag(photin[:37578]['f_SDSS_r'], photzout[:37578]['z_m1']), c=photfout['lmass'], cmap='jet', alpha=0.3, vmin=7, vmax=12, label='Galaxies (Photometry)')
ax_P.hlines(-17.0, 0.06, 0.17, linestyle='--', label='Volume Limited Samples')
ax_P.vlines(0.06, -17.0, -50, linestyle='--')
ax_P.vlines(0.17, -17.0, -50, linestyle='--')
ax_P.set_xlim(0.015, 0.40)
ax_P.set_ylim(-15, -24)
# ax_P.set_title('Photometric Data Only')
caxes = fig.add_axes([0.891, 0.559, 0.01, 0.413])
fig.colorbar(s, ax=ax_P, label=r'log($M/M_{\odot}$)', orientation='vertical', cax=caxes)
ax_P.legend(loc='lower right')
ax_P.set_ylabel('$M_r$')
ax_P.set_xlabel('Photometric redshift')
ax_P.tick_params(labelsize=12)
plt.tight_layout()
plt.savefig('{}hogwarts_0728/{}_VLsets.png'.format(path_result, sdssdate), overwrite=True)
plt.close()
#%%
#%%
fcat = ascii.read('sdssdr12_0502.cat')
zspec = fcat['spec_z'][0]
flxinfo = fcat
flx = []
flxerr = []
wl = []
ftr = []
# when only upper limit is available
flxupp = []
wlupp = []
ftrupp = []
for j in range(len(flxinfo.colnames)):
col = flxinfo.colnames[j]
try: nextcol = flxinfo.colnames[j+1]
except: pass
if flxinfo[col][0] == -99:
pass
elif col[0] == 'f':
if flxinfo[col][0] > 1e-50:
wavelength = ftrtbl[ftrtbl['filter']==col]['lambda_c'][0]
flx.append(atl.flux_f2w(flxinfo[col][0], wavelength))
flxerr.append(atl.flux_f2w(flxinfo[nextcol][0], wavelength))
wl.append(wavelength)
ftr.append(col[2:])
else:
wavelength = ftrtbl[ftrtbl['filter']==col]['lambda_c'][0]
flxupp.append(atl.flux_f2w(flxinfo[nextcol][0], wavelength))
wlupp.append(wavelength)
ftrupp.append(col[2:])
else:
pass
if target not in sus:
plt.errorbar(wl, flx, yerr=flxerr, ms=3, marker='s', ls='', c='dodgerblue', capsize=2, capthick=1)
plt.scatter(wlupp, flxupp, marker='v', c='dodgerblue', s=40)
else:
plt.errorbar(wl, flx, yerr=flxerr, ms=3, marker='s', ls='', c='crimson', capsize=2, capthick=1)
plt.scatter(wlupp, flxupp, marker='v', c='crimson', s=40)
try:
model = ascii.read('sdssdr12_0502_1.fit') # separate fitting
plt.plot(model['col1'], model['col2'], c='grey', alpha=0.7, zorder=0)
except:
print('There are no SED fitting result in the output directory.')
# plt.text(1.7e4, max(flx)*1.3, '{:11} (z={:.3f})'.format(target, zspec), fontsize=12)
# if i <= 15:
# plt.tick_params(axis='x', labelbottom=False)
plt.xlim(000, 50000)
plt.ylim(000, max(flx)*1.5)
#%%
os.chdir(path_output+'sdssdr12_spec')
a = ascii.read('sdssdr12_0502.fout')#, header_start=14)
os.chdir(path_csv)
b = ascii.read('sdssdr12_0502.csv')
c = ascii.read('sdssdr12_spec.zout')
#%%
plt.figure(figsize=(7,7))
plt.scatter(c['z_m1'], c['z_spec'], alpha = 0.1, c='dodgerblue')
plt.plot(np.arange(0,1,0.001), np.arange(0,1,0.001))
plt.xlim(0, 1.05)
plt.ylim(0, 1.05)
plt.title('phot-z test')
plt.xlabel('phot-z')
plt.ylabel('spec-z')
plt.grid(alpha=0.5)
plt.show()
#%% spliter
os.chdir(path_input+sdssdate)
sqlcat = ascii.read('{}.cat'.format(sdssdate))
sqlcat.rename_column('id', '#id')
sqlcat['#id'] = sqlcat['#id'].astype(str)
targets = sqlcat['#id']
core = 5
# dividing targets
for i in range(core):
s = int(len(sqlcat)/core * i)
e = int(len(sqlcat)/core * (i+1))
sqlcat[s:e].write('multicore/{}_{}.cat'.format(sdssdate, i), format='ascii', delimiter='\t', overwrite=True)
with open('{}.param'.format(sdssdate), 'r') as f:
contents = f.readlines()
with open('multicore/{}_{}.param'.format(sdssdate, i), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
p.write(line.replace(sdssdate, '{}_{}'.format(sdssdate, i)))
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
# elif line.startswith('RESOLUTION'):
# p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
os.system('cp {}.translate multicore/{}_{}.translate'.format(sdssdate, sdssdate, i)) | ax_Mr.set_xlim(0.0075, 0.20)
ax_Mr.set_ylim(-15, -24)
ax_Mr.legend(loc='lower right')
# ax_Mr.set_title('Spectroscopically Confirmed')
ax_Mr.set_ylabel('$M_r$') | random_line_split |
sdss_sqldata.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 14 14:12:47 2021
@author: sonic
"""
#%%
import copy
import os, glob
import numpy as np
import pandas as pd
from astropy.io import ascii
import matplotlib.pyplot as plt
from ankepy.phot import anketool as atl # anke package
from astropy.cosmology import WMAP9 as cosmo
from astropy.table import Table, vstack, hstack
#%% working directories
# pathes
path_base = '/home/sonic/research/sed_modeling/'
path_csv = '/home/sonic/research/sed_modeling/csv/'
path_lib = '/home/sonic/research/sed_modeling/lib/'
path_input = '/home/sonic/research/sed_modeling/input/'
path_output = '/home/sonic/research/sed_modeling/output/'
path_result = '/home/sonic/research/sed_modeling/result/'
#%% sql data load
os.chdir(path_base)
os.chdir(path_csv)
# mode = 'spec'
mode = 'phot'
sdssdate = 'sdssdr12_{}'.format(mode)
sql = ascii.read('{}.csv'.format(sdssdate))
os.chdir(path_input)
try:
os.system('mkdir {}'.format(sdssdate))
except:
pass
os.chdir(path_csv)
if mode == 'spec':
sqlc = sql.colnames
sql.rename_column(sqlc[0], '#id')
sql.rename_column(sqlc[1], 'z_spec')
sql.rename_column(sqlc[2], 'f_SDSS_u')
sql.rename_column(sqlc[3], 'e_SDSS_u')
sql.rename_column(sqlc[4], 'f_SDSS_g')
sql.rename_column(sqlc[5], 'e_SDSS_g')
sql.rename_column(sqlc[6], 'f_SDSS_r')
sql.rename_column(sqlc[7], 'e_SDSS_r')
sql.rename_column(sqlc[8], 'f_SDSS_i')
sql.rename_column(sqlc[9], 'e_SDSS_i')
sql.rename_column(sqlc[10], 'f_SDSS_z')
sql.rename_column(sqlc[11], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
else:
sqlc = sql.colnames
sql.rename_column(sqlc[0], '#id')
sql.rename_column(sqlc[2-1], 'f_SDSS_u')
sql.rename_column(sqlc[3-1], 'e_SDSS_u')
sql.rename_column(sqlc[4-1], 'f_SDSS_g')
sql.rename_column(sqlc[5-1], 'e_SDSS_g')
sql.rename_column(sqlc[6-1], 'f_SDSS_r')
sql.rename_column(sqlc[7-1], 'e_SDSS_r')
sql.rename_column(sqlc[8-1], 'f_SDSS_i')
sql.rename_column(sqlc[9-1], 'e_SDSS_i')
sql.rename_column(sqlc[10-1], 'f_SDSS_z')
sql.rename_column(sqlc[11-1], 'e_SDSS_z')
sql['#id'] = sql['#id'].astype('str')
targets = sql['#id']
sqlc = sql.colnames
#%% FAST input
os.chdir(path_lib)
default = 'hdfn_fs99'
ftrinfo = open('FILTER.RES.latest.info', 'r').readlines() # https://github.com/gbrammer/eazy-photoz
translate = ascii.read('translate.cat') # write down manually
filters = [f for f in sqlc if f.startswith('f_')]
ftrtbl = Table()
for f in filters:
if f not in translate['filter']:
print("Warning: Filter name '{}' is not defined in your translate file.".format(f))
else:
linenum = int(translate[translate['filter']==f]['lines'][0][1:])
lambda_c = float(ftrinfo[linenum-1].split('lambda_c= ')[-1].split(' ')[0])
dummy = Table([[f], [lambda_c]], names=['filter', 'lambda_c'])
ftrtbl = vstack([ftrtbl, dummy])
# catalog file
os.chdir('{}/{}'.format(path_input, sdssdate))
sqlcat = copy.deepcopy(sql)
if mode == 'spec':
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j==1:
sqlcat[i][j] = sql[i][j]
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
else:
for i in range(len(sql)):
for j in range(len(sqlc)):
if j == 0:
sqlcat[i][j] = str(i+1).zfill(5)
elif j%2 == 1:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.AB2Jy(sql[i][j]), 5)
elif j%2 == 0:
if sql[i][j] == -9999:
sqlcat[i][j] = -99
else:
sqlcat[i][j] = atl.rsig(atl.eAB2Jy(sql[i][j-1], sql[i][j]), 5)
sqlcat.write('{}.cat'.format(sdssdate), format='ascii', delimiter='\t', overwrite=True)
# parameter file
with open('{}/{}.param'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.param'.format(sdssdate), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
p.write(line.replace(default, sdssdate))
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
elif line.startswith('RESOLUTION'):
p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
# translate file
with open('{}/{}.translate'.format(path_lib, default), 'r') as f:
contents = f.readlines()
with open('{}.translate'.format(sdssdate), 'w') as t:
for line in contents:
t.write(line)
f.close()
t.close()
#%% FAST output
def absolmag(m, z):
dist = np.array(cosmo.luminosity_distance(z))
return m - 5*np.log10(dist) - 25
# input data = sql
os.chdir(path_output+sdssdate)
if mode == 'spec':
specin = copy.deepcopy(sql)
specfout = ascii.read('{}.fout'.format(sdssdate), header_start=16)
passive = specfout[10**specfout['lssfr']*1e9 < np.array(1/(3*cosmo.age(specfout['z'])))]
passive_r = []
passive_z = []
for i in range(len(passive)):
j = passive['id'][i]
passive_r.append(specin['f_SDSS_r'][j-1])
passive_z.append(specin['z_spec'][j-1])
passive_R = absolmag(passive_r, passive_z)
passive['rmag'] = passive_r
passive['z_spec'] = passive_z
passive['rmag(ab)'] = passive_R
else:
photin = copy.deepcopy(sql)
photzout = ascii.read('{}.zout'.format(sdssdate))
photfout = ascii.read('{}.fout'.format(sdssdate), header_start=17)
phot_R = absolmag(photin['f_SDSS_r'], photzout['z_m1'])
photfout['z_phot'] = photzout['z_m1']
# photfout['z_phot'] = photzout[:37578]['z_m1']
photfout['rmag(ab)']= phot_R#[:37578]
#%% volume limited sample
vlspec = passive[passive['z_spec']>0.03]; vlspec = vlspec[vlspec['z_spec']<0.05]; vlspec = vlspec[vlspec['rmag(ab)']<-19]
vlphot = photfout[photfout['z_phot']>0.06]; vlphot = vlphot[vlphot['z_phot']<0.12]; vlphot = vlphot[vlphot['rmag(ab)']<-17]
#%% priority
g1 = 0.701
g2 = 0.356
g3 = 0.411
g4 = -1.968 # Artale+19, BNS, z=0.1
vlspec['prior'] = g1*vlspec['lmass']+g2*vlspec['lsfr']+g3*vlspec['metal']+g4
vlphot['prior'] = g1*vlphot['lmass']+g2*vlphot['lsfr']+g3*vlphot['metal']+g4
vlphot.sort('prior')
vlspec.sort('prior')
#%%
def Mr2logM(Mr):
gr = 0.88
return -0.39*Mr+1.05*gr+1.60
# Define a closure function to register as a callback
def | (ax_Mr):
"""
Update second axis according with first axis.
"""
y1, y2 = ax_Mr.get_ylim()
ax_lM.set_ylim(Mr2logM(y1), Mr2logM(y2))
ax_lM.figure.canvas.draw()
# plt.rcParams.update({'font.size': 14})
fig, axs = plt.subplots(2, 1, figsize=(9,12))
plt.rcParams.update({'font.size': 18})
ax_Mr = axs[1]
ax_lM = ax_Mr.twinx()
# ax_Mr.set_title('SDSS DR12 Galaxies Distribution')
# automatically update ylim of ax2 when ylim of ax1 changes.
ax_Mr.callbacks.connect("ylim_changed", convert)
ax_Mr.scatter(specin['z_spec'], absolmag(specin['f_SDSS_r'], specin['z_spec']), c='grey', alpha=0.2, label='Galaxies (Spectroscopy)')# (n={})'.format(len(specin)))
ax_Mr.scatter(passive_z, absolmag(passive_r, passive_z), c='crimson', alpha=0.2, label='Passive Galaxies')# (n={})'.format(len(passive)))
# ax_Mr.hlines(-21, 0.07, 0.13, linestyle='--')#, label='Volume Limited Samples')
# ax_Mr.vlines(0.07, -21, -50, linestyle='--')
# ax_Mr.vlines(0.13, -21, -50, linestyle='--')
ax_Mr.hlines(-19, 0.03, 0.05, linestyle='--', label='Volume Limited Samples')
ax_Mr.vlines(0.03, -19, -50, linestyle='--')
ax_Mr.vlines(0.05, -19, -50, linestyle='--')
ax_Mr.set_xlim(0.0075, 0.20)
ax_Mr.set_ylim(-15, -24)
ax_Mr.legend(loc='lower right')
# ax_Mr.set_title('Spectroscopically Confirmed')
ax_Mr.set_ylabel('$M_r$')
ax_Mr.set_xlabel('Spectroscopic redshift')
ax_lM.tick_params(labelsize=12)
ax_Mr.tick_params(labelsize=12)
ax_lM.set_ylabel(r'log($M/M_{\odot}$)')
ax_P = axs[0]
s = ax_P.scatter(photzout[:37578]['z_m1'], absolmag(photin[:37578]['f_SDSS_r'], photzout[:37578]['z_m1']), c=photfout['lmass'], cmap='jet', alpha=0.3, vmin=7, vmax=12, label='Galaxies (Photometry)')
ax_P.hlines(-17.0, 0.06, 0.17, linestyle='--', label='Volume Limited Samples')
ax_P.vlines(0.06, -17.0, -50, linestyle='--')
ax_P.vlines(0.17, -17.0, -50, linestyle='--')
ax_P.set_xlim(0.015, 0.40)
ax_P.set_ylim(-15, -24)
# ax_P.set_title('Photometric Data Only')
caxes = fig.add_axes([0.891, 0.559, 0.01, 0.413])
fig.colorbar(s, ax=ax_P, label=r'log($M/M_{\odot}$)', orientation='vertical', cax=caxes)
ax_P.legend(loc='lower right')
ax_P.set_ylabel('$M_r$')
ax_P.set_xlabel('Photometric redshift')
ax_P.tick_params(labelsize=12)
plt.tight_layout()
plt.savefig('{}hogwarts_0728/{}_VLsets.png'.format(path_result, sdssdate), overwrite=True)
plt.close()
#%%
#%%
fcat = ascii.read('sdssdr12_0502.cat')
zspec = fcat['spec_z'][0]
flxinfo = fcat
flx = []
flxerr = []
wl = []
ftr = []
# when only upper limit is available
flxupp = []
wlupp = []
ftrupp = []
for j in range(len(flxinfo.colnames)):
col = flxinfo.colnames[j]
try: nextcol = flxinfo.colnames[j+1]
except: pass
if flxinfo[col][0] == -99:
pass
elif col[0] == 'f':
if flxinfo[col][0] > 1e-50:
wavelength = ftrtbl[ftrtbl['filter']==col]['lambda_c'][0]
flx.append(atl.flux_f2w(flxinfo[col][0], wavelength))
flxerr.append(atl.flux_f2w(flxinfo[nextcol][0], wavelength))
wl.append(wavelength)
ftr.append(col[2:])
else:
wavelength = ftrtbl[ftrtbl['filter']==col]['lambda_c'][0]
flxupp.append(atl.flux_f2w(flxinfo[nextcol][0], wavelength))
wlupp.append(wavelength)
ftrupp.append(col[2:])
else:
pass
if target not in sus:
plt.errorbar(wl, flx, yerr=flxerr, ms=3, marker='s', ls='', c='dodgerblue', capsize=2, capthick=1)
plt.scatter(wlupp, flxupp, marker='v', c='dodgerblue', s=40)
else:
plt.errorbar(wl, flx, yerr=flxerr, ms=3, marker='s', ls='', c='crimson', capsize=2, capthick=1)
plt.scatter(wlupp, flxupp, marker='v', c='crimson', s=40)
try:
model = ascii.read('sdssdr12_0502_1.fit') # separate fitting
plt.plot(model['col1'], model['col2'], c='grey', alpha=0.7, zorder=0)
except:
print('There are no SED fitting result in the output directory.')
# plt.text(1.7e4, max(flx)*1.3, '{:11} (z={:.3f})'.format(target, zspec), fontsize=12)
# if i <= 15:
# plt.tick_params(axis='x', labelbottom=False)
plt.xlim(000, 50000)
plt.ylim(000, max(flx)*1.5)
#%%
os.chdir(path_output+'sdssdr12_spec')
a = ascii.read('sdssdr12_0502.fout')#, header_start=14)
os.chdir(path_csv)
b = ascii.read('sdssdr12_0502.csv')
c = ascii.read('sdssdr12_spec.zout')
#%%
plt.figure(figsize=(7,7))
plt.scatter(c['z_m1'], c['z_spec'], alpha = 0.1, c='dodgerblue')
plt.plot(np.arange(0,1,0.001), np.arange(0,1,0.001))
plt.xlim(0, 1.05)
plt.ylim(0, 1.05)
plt.title('phot-z test')
plt.xlabel('phot-z')
plt.ylabel('spec-z')
plt.grid(alpha=0.5)
plt.show()
#%% spliter
os.chdir(path_input+sdssdate)
sqlcat = ascii.read('{}.cat'.format(sdssdate))
sqlcat.rename_column('id', '#id')
sqlcat['#id'] = sqlcat['#id'].astype(str)
targets = sqlcat['#id']
core = 5
# dividing targets
for i in range(core):
s = int(len(sqlcat)/core * i)
e = int(len(sqlcat)/core * (i+1))
sqlcat[s:e].write('multicore/{}_{}.cat'.format(sdssdate, i), format='ascii', delimiter='\t', overwrite=True)
with open('{}.param'.format(sdssdate), 'r') as f:
contents = f.readlines()
with open('multicore/{}_{}.param'.format(sdssdate, i), 'w') as p:
for line in contents:
if line.startswith('CATALOG'):
p.write(line.replace(sdssdate, '{}_{}'.format(sdssdate, i)))
# elif line.startswith('N_SIM'):
# p.write(line.replace('0', '100'))
# elif line.startswith('RESOLUTION'):
# p.write(line.replace("= 'hr'", "= 'lr'"))
# elif line.startswith('NO_MAX_AGE'):
# p.write(line.replace('= 0', '= 1'))
else:
p.write(line)
f.close()
p.close()
os.system('cp {}.translate multicore/{}_{}.translate'.format(sdssdate, sdssdate, i))
| convert | identifier_name |
instance.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ecloud
import (
"context"
"fmt"
"time"
"yunion.io/x/jsonutils"
"yunion.io/x/pkg/errors"
"yunion.io/x/pkg/util/sets"
billing_api "yunion.io/x/onecloud/pkg/apis/billing"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
"yunion.io/x/onecloud/pkg/util/billing"
)
type SNovaRequest struct {
SApiRequest
}
func NewNovaRequest(ar *SApiRequest) *SNovaRequest {
return &SNovaRequest{
SApiRequest: *ar,
}
}
func (nr *SNovaRequest) GetPort() string {
if nr.RegionId == "guangzhou-2" {
return ""
}
return nr.SApiRequest.GetPort()
}
type SInstance struct {
multicloud.SInstanceBase
multicloud.EcloudTags
multicloud.SBillingBase
SZoneRegionBase
SCreateTime
nicComplete bool
host *SHost
image *SImage
sysDisk cloudprovider.ICloudDisk
dataDisks []cloudprovider.ICloudDisk
Id string
Name string
Vcpu int
Vmemory int
KeyName string
ImageRef string
ImageName string
ImageOsType string
FlavorRef string
SystemDiskSizeGB int `json:"vdisk"`
SystemDiskId string
ServerType string
ServerVmType string
EcStatus string
BootVolumeType string
Deleted int
Visible bool
Region string
PortDetail []SInstanceNic
}
func (i *SInstance) GetBillingType() string {
return billing_api.BILLING_TYPE_POSTPAID
}
func (i *SInstance) GetExpiredAt() time.Time {
return time.Time{}
}
func (i *SInstance) GetId() string {
return i.Id
}
func (i *SInstance) GetName() string {
return i.Name
}
func (i *SInstance) GetGlobalId() string {
return i.GetId()
}
func (i *SInstance) GetStatus() string {
switch i.EcStatus {
case "active":
return api.VM_RUNNING
case "suspended", "paused":
return api.VM_SUSPEND
case "build", "rebuild", "resize", "verify_resize", "revert_resize", "password":
return api.VM_STARTING
case "reboot", "hard_reboot":
return api.VM_STOPPING
case "stopped", "shutoff":
return api.VM_READY
case "migrating":
return api.VM_MIGRATING
case "backuping":
return api.VM_BACKUP_CREATING
default:
return api.VM_UNKNOWN
}
}
func (i *SInstance) Refresh() error {
// TODO
return nil
}
func (i *SInstance) IsEmulated() bool {
return false
}
func (self *SInstance) GetBootOrder() string {
return "dcn"
}
func (self *SInstance) GetVga() string {
return "std"
}
func (self *SInstance) GetVdi() string {
return "vnc"
}
func (i *SInstance) GetImage() (*SImage, error) {
if i.image != nil {
return i.image, nil
}
image, err := i.host.zone.region.GetImage(i.ImageRef)
if err != nil {
return nil, err
}
i.image = image
return i.image, nil
}
func (i *SInstance) GetOSType() string {
return i.ImageOsType
}
func (i *SInstance) GetOSName() string {
image, err := i.GetImage()
if err != nil {
return ""
}
return image.OsName
}
func (i *SInstance) GetBios() string {
return "BIOS"
}
func (i *SInstance) GetMachine() string {
return "pc"
}
func (i *SInstance) GetInstanceType() string {
return i.FlavorRef
}
func (self *SInstance) GetSysTags() map[string]string {
data := map[string]string{}
// TODO
lowerOs := self.GetOSType()
priceKey := fmt.Sprintf("%s::%s::%s", self.host.zone.region.GetId(), self.GetInstanceType(), lowerOs)
data["price_key"] = priceKey
data["zone_ext_id"] = self.host.zone.GetGlobalId()
image, _ := self.GetImage()
if image != nil {
meta := image.GetSysTags()
for k, v := range meta {
data[k] = v
}
}
return data
}
func (in *SInstance) GetProjectId() string {
return ""
}
func (in *SInstance) GetIHost() cloudprovider.ICloudHost {
return in.host
}
func (in *SInstance) GetIDisks() ([]cloudprovider.ICloudDisk, error) {
if in.sysDisk == nil {
in.fetchSysDisk()
}
if in.dataDisks == nil {
err := in.fetchDataDisks()
if err != nil {
return nil, err
}
}
return append([]cloudprovider.ICloudDisk{in.sysDisk}, in.dataDisks...), nil
}
func (in *SInstance) GetINics() ([]cloudprovider.ICloudNic, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
inics := make([]cloudprovider.ICloudNic, len(in.PortDetail))
for i := range in.PortDetail {
in.PortDetail[i].instance = in
inics[i] = &in.PortDetail[i]
}
return inics, nil
}
func (in *SInstance) GetIEIP() (cloudprovider.ICloudEIP, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
var eipId string
for i := range in.PortDetail {
if len(in.PortDetail[i].IpId) > 0 {
eipId = in.PortDetail[i].IpId
break
}
}
if len(eipId) == 0 {
return nil, nil
}
return in.host.zone.region.GetEipById(eipId)
}
func (in *SInstance) GetSecurityGroupIds() ([]string, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
ret := sets.NewString()
for i := range in.PortDetail {
for _, group := range in.PortDetail[i].SecurityGroups {
ret.Insert(group.Id)
}
}
return ret.UnsortedList(), nil
}
func (in *SInstance) GetVcpuCount() int {
return in.Vcpu
}
func (in *SInstance) GetVmemSizeMB() int {
return in.Vmemory
}
func (in *SInstance) AssignSecurityGroup(id string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) SetSecurityGroups(ids []string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) GetHypervisor() string {
return api.HYPERVISOR_ECLOUD
}
func (in *SInstance) StartVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) StopVM(ctx context.Context, opts *cloudprovider.ServerStopOptions) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeleteVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) UpdateVM(ctx context.Context, name string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) UpdateUserData(userData string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) RebuildRoot(ctx context.Context, config *cloudprovider.SManagedVMRebuildRootConfig) (string, error) {
return "", cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeployVM(ctx context.Context, name string, username string, password string, publicKey string, deleteKeypair bool, description string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) ChangeConfig(ctx context.Context, config *cloudprovider.SManagedVMChangeConfig) error {
return errors.ErrNotImplemented
}
func (in *SInstance) GetVNCInfo() (jsonutils.JSONObject, error) {
url, err := in.host.zone.region.GetInstanceVNCUrl(in.GetId())
if err != nil {
return nil, err
}
ret := jsonutils.NewDict()
ret.Add(jsonutils.NewString(url), "url")
ret.Add(jsonutils.NewString("ecloud"), "protocol")
ret.Add(jsonutils.NewString(in.GetId()), "instance_id")
return ret, nil
}
func (in *SInstance) AttachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) DetachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) CreateDisk(ctx context.Context, sizeMb int, uuid string, driver string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) Renew(bc billing.SBillingCycle) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) GetError() error |
func (in *SInstance) fetchSysDisk() {
storage, _ := in.host.zone.getStorageByType(api.STORAGE_ECLOUD_SYSTEM)
disk := SDisk{
storage: storage,
ManualAttr: SDiskManualAttr{
IsVirtual: true,
TempalteId: in.ImageRef,
ServerId: in.Id,
},
SCreateTime: in.SCreateTime,
SZoneRegionBase: in.SZoneRegionBase,
ServerId: []string{in.Id},
IsShare: false,
IsDelete: false,
SizeGB: in.SystemDiskSizeGB,
ID: in.SystemDiskId,
Name: fmt.Sprintf("%s-root", in.Name),
Status: "in-use",
Type: api.STORAGE_ECLOUD_SYSTEM,
}
in.sysDisk = &disk
return
}
func (in *SInstance) fetchDataDisks() error {
request := NewNovaRequest(NewApiRequest(in.host.zone.region.ID, "/api/v2/volume/volume/mount/list",
map[string]string{"serverId": in.Id}, nil))
disks := make([]SDisk, 0, 5)
err := in.host.zone.region.client.doList(context.Background(), request, &disks)
if err != nil {
return err
}
idisks := make([]cloudprovider.ICloudDisk, len(disks))
for i := range idisks {
storageType := disks[i].Type
storage, err := in.host.zone.getStorageByType(storageType)
if err != nil {
return errors.Wrapf(err, "unable to fetch storage with stoageType %s", storageType)
}
disks[i].storage = storage
idisks[i] = &disks[i]
}
in.dataDisks = idisks
return nil
}
func (in *SInstance) makeNicComplete() error {
routerIds := sets.NewString()
nics := make(map[string]*SInstanceNic, len(in.PortDetail))
for i := range in.PortDetail {
nic := &in.PortDetail[i]
routerIds.Insert(nic.RouterId)
nics[nic.PortId] = nic
}
for _, routerId := range routerIds.UnsortedList() {
request := NewConsoleRequest(in.host.zone.region.ID, fmt.Sprintf("/api/vpc/%s/nic", routerId),
map[string]string{
"resourceId": in.Id,
}, nil,
)
completeNics := make([]SInstanceNic, 0, len(nics)/2)
err := in.host.zone.region.client.doList(context.Background(), request, &completeNics)
if err != nil {
return errors.Wrapf(err, "unable to get nics with instance %s in vpc %s", in.Id, routerId)
}
for i := range completeNics {
id := completeNics[i].Id
nic, ok := nics[id]
if !ok {
continue
}
nic.SInstanceNicDetail = completeNics[i].SInstanceNicDetail
}
}
return nil
}
func (r *SRegion) findHost(zoneRegion string) (*SHost, error) {
zone, err := r.FindZone(zoneRegion)
if err != nil {
return nil, err
}
return &SHost{
zone: zone,
}, nil
}
func (r *SRegion) GetInstancesWithHost(zoneRegion string) ([]SInstance, error) {
instances, err := r.GetInstances(zoneRegion)
if err != nil {
return nil, err
}
for i := range instances {
host, _ := r.findHost(instances[i].Region)
instances[i].host = host
}
return instances, nil
}
func (r *SRegion) GetInstances(zoneRegion string) ([]SInstance, error) {
return r.getInstances(zoneRegion, "")
}
func (r *SRegion) getInstances(zoneRegion string, serverId string) ([]SInstance, error) {
query := map[string]string{
"serverTypes": "VM",
"productTypes": "NORMAL,AUTOSCALING,VO,CDN,PAAS_MASTER,PAAS_SLAVE,VCPE,EMR,LOGAUDIT",
//"productTypes": "NORMAL",
"visible": "true",
}
if len(serverId) > 0 {
query["serverId"] = serverId
}
if len(zoneRegion) > 0 {
query["region"] = zoneRegion
}
request := NewNovaRequest(NewApiRequest(r.ID, "/api/v2/server/web/with/network", query, nil))
var instances []SInstance
err := r.client.doList(context.Background(), request, &instances)
if err != nil {
return nil, err
}
return instances, nil
}
func (r *SRegion) GetInstanceById(id string) (*SInstance, error) {
instances, err := r.getInstances("", id)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, cloudprovider.ErrNotFound
}
instance := &instances[0]
host, err := r.findHost(instance.Region)
if err == nil {
instance.host = host
}
return instance, nil
}
func (r *SRegion) GetInstanceVNCUrl(instanceId string) (string, error) {
request := NewNovaRequest(NewApiRequest(r.ID, fmt.Sprintf("/api/server/%s/vnc", instanceId), nil, nil))
var url string
err := r.client.doGet(context.Background(), request, &url)
if err != nil {
return "", err
}
return url, nil
}
| {
return nil
} | identifier_body |
instance.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ecloud
import (
"context"
"fmt"
"time"
"yunion.io/x/jsonutils"
"yunion.io/x/pkg/errors"
"yunion.io/x/pkg/util/sets"
billing_api "yunion.io/x/onecloud/pkg/apis/billing"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
"yunion.io/x/onecloud/pkg/util/billing"
)
type SNovaRequest struct {
SApiRequest
}
func NewNovaRequest(ar *SApiRequest) *SNovaRequest {
return &SNovaRequest{
SApiRequest: *ar,
}
}
func (nr *SNovaRequest) GetPort() string {
if nr.RegionId == "guangzhou-2" {
return ""
}
return nr.SApiRequest.GetPort()
}
type SInstance struct {
multicloud.SInstanceBase
multicloud.EcloudTags
multicloud.SBillingBase
SZoneRegionBase
SCreateTime
nicComplete bool
host *SHost
image *SImage
sysDisk cloudprovider.ICloudDisk
dataDisks []cloudprovider.ICloudDisk
Id string
Name string
Vcpu int
Vmemory int
KeyName string
ImageRef string
ImageName string
ImageOsType string
FlavorRef string
SystemDiskSizeGB int `json:"vdisk"`
SystemDiskId string
ServerType string
ServerVmType string
EcStatus string
BootVolumeType string
Deleted int
Visible bool
Region string
PortDetail []SInstanceNic
}
func (i *SInstance) GetBillingType() string {
return billing_api.BILLING_TYPE_POSTPAID
}
func (i *SInstance) GetExpiredAt() time.Time {
return time.Time{}
}
func (i *SInstance) GetId() string {
return i.Id
}
func (i *SInstance) GetName() string {
return i.Name
}
func (i *SInstance) GetGlobalId() string {
return i.GetId()
}
func (i *SInstance) GetStatus() string {
switch i.EcStatus {
case "active":
return api.VM_RUNNING
case "suspended", "paused":
return api.VM_SUSPEND
case "build", "rebuild", "resize", "verify_resize", "revert_resize", "password":
return api.VM_STARTING
case "reboot", "hard_reboot":
return api.VM_STOPPING
case "stopped", "shutoff":
return api.VM_READY
case "migrating":
return api.VM_MIGRATING
case "backuping":
return api.VM_BACKUP_CREATING
default:
return api.VM_UNKNOWN
}
}
func (i *SInstance) Refresh() error {
// TODO
return nil
}
func (i *SInstance) IsEmulated() bool {
return false
}
func (self *SInstance) GetBootOrder() string {
return "dcn"
}
func (self *SInstance) GetVga() string {
return "std"
}
func (self *SInstance) GetVdi() string {
return "vnc"
}
func (i *SInstance) GetImage() (*SImage, error) {
if i.image != nil {
return i.image, nil
}
image, err := i.host.zone.region.GetImage(i.ImageRef)
if err != nil {
return nil, err
}
i.image = image
return i.image, nil
}
func (i *SInstance) GetOSType() string {
return i.ImageOsType
}
func (i *SInstance) GetOSName() string {
image, err := i.GetImage()
if err != nil {
return ""
}
return image.OsName
}
func (i *SInstance) GetBios() string {
return "BIOS"
}
func (i *SInstance) GetMachine() string {
return "pc"
}
func (i *SInstance) GetInstanceType() string {
return i.FlavorRef
}
func (self *SInstance) GetSysTags() map[string]string {
data := map[string]string{}
// TODO
lowerOs := self.GetOSType()
priceKey := fmt.Sprintf("%s::%s::%s", self.host.zone.region.GetId(), self.GetInstanceType(), lowerOs)
data["price_key"] = priceKey
data["zone_ext_id"] = self.host.zone.GetGlobalId()
image, _ := self.GetImage()
if image != nil {
meta := image.GetSysTags()
for k, v := range meta {
data[k] = v
}
}
return data
}
func (in *SInstance) GetProjectId() string {
return ""
}
func (in *SInstance) GetIHost() cloudprovider.ICloudHost {
return in.host
}
func (in *SInstance) GetIDisks() ([]cloudprovider.ICloudDisk, error) {
if in.sysDisk == nil {
in.fetchSysDisk()
}
if in.dataDisks == nil {
err := in.fetchDataDisks()
if err != nil {
return nil, err
}
}
return append([]cloudprovider.ICloudDisk{in.sysDisk}, in.dataDisks...), nil
}
func (in *SInstance) GetINics() ([]cloudprovider.ICloudNic, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
inics := make([]cloudprovider.ICloudNic, len(in.PortDetail))
for i := range in.PortDetail {
in.PortDetail[i].instance = in
inics[i] = &in.PortDetail[i]
}
return inics, nil
}
func (in *SInstance) GetIEIP() (cloudprovider.ICloudEIP, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
var eipId string
for i := range in.PortDetail {
if len(in.PortDetail[i].IpId) > 0 {
eipId = in.PortDetail[i].IpId
break
}
}
if len(eipId) == 0 {
return nil, nil
}
return in.host.zone.region.GetEipById(eipId)
}
func (in *SInstance) GetSecurityGroupIds() ([]string, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
ret := sets.NewString()
for i := range in.PortDetail {
for _, group := range in.PortDetail[i].SecurityGroups {
ret.Insert(group.Id)
}
}
return ret.UnsortedList(), nil
}
func (in *SInstance) GetVcpuCount() int {
return in.Vcpu
}
func (in *SInstance) GetVmemSizeMB() int {
return in.Vmemory
}
func (in *SInstance) AssignSecurityGroup(id string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) SetSecurityGroups(ids []string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) GetHypervisor() string {
return api.HYPERVISOR_ECLOUD
}
func (in *SInstance) StartVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) StopVM(ctx context.Context, opts *cloudprovider.ServerStopOptions) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeleteVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) UpdateVM(ctx context.Context, name string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) UpdateUserData(userData string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) RebuildRoot(ctx context.Context, config *cloudprovider.SManagedVMRebuildRootConfig) (string, error) {
return "", cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeployVM(ctx context.Context, name string, username string, password string, publicKey string, deleteKeypair bool, description string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) ChangeConfig(ctx context.Context, config *cloudprovider.SManagedVMChangeConfig) error {
return errors.ErrNotImplemented
}
func (in *SInstance) GetVNCInfo() (jsonutils.JSONObject, error) {
url, err := in.host.zone.region.GetInstanceVNCUrl(in.GetId())
if err != nil {
return nil, err
}
ret := jsonutils.NewDict()
ret.Add(jsonutils.NewString(url), "url")
ret.Add(jsonutils.NewString("ecloud"), "protocol")
ret.Add(jsonutils.NewString(in.GetId()), "instance_id")
return ret, nil
}
func (in *SInstance) AttachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) DetachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) CreateDisk(ctx context.Context, sizeMb int, uuid string, driver string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) | (bc billing.SBillingCycle) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) GetError() error {
return nil
}
func (in *SInstance) fetchSysDisk() {
storage, _ := in.host.zone.getStorageByType(api.STORAGE_ECLOUD_SYSTEM)
disk := SDisk{
storage: storage,
ManualAttr: SDiskManualAttr{
IsVirtual: true,
TempalteId: in.ImageRef,
ServerId: in.Id,
},
SCreateTime: in.SCreateTime,
SZoneRegionBase: in.SZoneRegionBase,
ServerId: []string{in.Id},
IsShare: false,
IsDelete: false,
SizeGB: in.SystemDiskSizeGB,
ID: in.SystemDiskId,
Name: fmt.Sprintf("%s-root", in.Name),
Status: "in-use",
Type: api.STORAGE_ECLOUD_SYSTEM,
}
in.sysDisk = &disk
return
}
func (in *SInstance) fetchDataDisks() error {
request := NewNovaRequest(NewApiRequest(in.host.zone.region.ID, "/api/v2/volume/volume/mount/list",
map[string]string{"serverId": in.Id}, nil))
disks := make([]SDisk, 0, 5)
err := in.host.zone.region.client.doList(context.Background(), request, &disks)
if err != nil {
return err
}
idisks := make([]cloudprovider.ICloudDisk, len(disks))
for i := range idisks {
storageType := disks[i].Type
storage, err := in.host.zone.getStorageByType(storageType)
if err != nil {
return errors.Wrapf(err, "unable to fetch storage with stoageType %s", storageType)
}
disks[i].storage = storage
idisks[i] = &disks[i]
}
in.dataDisks = idisks
return nil
}
func (in *SInstance) makeNicComplete() error {
routerIds := sets.NewString()
nics := make(map[string]*SInstanceNic, len(in.PortDetail))
for i := range in.PortDetail {
nic := &in.PortDetail[i]
routerIds.Insert(nic.RouterId)
nics[nic.PortId] = nic
}
for _, routerId := range routerIds.UnsortedList() {
request := NewConsoleRequest(in.host.zone.region.ID, fmt.Sprintf("/api/vpc/%s/nic", routerId),
map[string]string{
"resourceId": in.Id,
}, nil,
)
completeNics := make([]SInstanceNic, 0, len(nics)/2)
err := in.host.zone.region.client.doList(context.Background(), request, &completeNics)
if err != nil {
return errors.Wrapf(err, "unable to get nics with instance %s in vpc %s", in.Id, routerId)
}
for i := range completeNics {
id := completeNics[i].Id
nic, ok := nics[id]
if !ok {
continue
}
nic.SInstanceNicDetail = completeNics[i].SInstanceNicDetail
}
}
return nil
}
func (r *SRegion) findHost(zoneRegion string) (*SHost, error) {
zone, err := r.FindZone(zoneRegion)
if err != nil {
return nil, err
}
return &SHost{
zone: zone,
}, nil
}
func (r *SRegion) GetInstancesWithHost(zoneRegion string) ([]SInstance, error) {
instances, err := r.GetInstances(zoneRegion)
if err != nil {
return nil, err
}
for i := range instances {
host, _ := r.findHost(instances[i].Region)
instances[i].host = host
}
return instances, nil
}
func (r *SRegion) GetInstances(zoneRegion string) ([]SInstance, error) {
return r.getInstances(zoneRegion, "")
}
func (r *SRegion) getInstances(zoneRegion string, serverId string) ([]SInstance, error) {
query := map[string]string{
"serverTypes": "VM",
"productTypes": "NORMAL,AUTOSCALING,VO,CDN,PAAS_MASTER,PAAS_SLAVE,VCPE,EMR,LOGAUDIT",
//"productTypes": "NORMAL",
"visible": "true",
}
if len(serverId) > 0 {
query["serverId"] = serverId
}
if len(zoneRegion) > 0 {
query["region"] = zoneRegion
}
request := NewNovaRequest(NewApiRequest(r.ID, "/api/v2/server/web/with/network", query, nil))
var instances []SInstance
err := r.client.doList(context.Background(), request, &instances)
if err != nil {
return nil, err
}
return instances, nil
}
func (r *SRegion) GetInstanceById(id string) (*SInstance, error) {
instances, err := r.getInstances("", id)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, cloudprovider.ErrNotFound
}
instance := &instances[0]
host, err := r.findHost(instance.Region)
if err == nil {
instance.host = host
}
return instance, nil
}
func (r *SRegion) GetInstanceVNCUrl(instanceId string) (string, error) {
request := NewNovaRequest(NewApiRequest(r.ID, fmt.Sprintf("/api/server/%s/vnc", instanceId), nil, nil))
var url string
err := r.client.doGet(context.Background(), request, &url)
if err != nil {
return "", err
}
return url, nil
}
| Renew | identifier_name |
instance.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ecloud
import (
"context"
"fmt"
"time"
"yunion.io/x/jsonutils"
"yunion.io/x/pkg/errors"
"yunion.io/x/pkg/util/sets"
billing_api "yunion.io/x/onecloud/pkg/apis/billing"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
"yunion.io/x/onecloud/pkg/util/billing"
)
type SNovaRequest struct {
SApiRequest
}
func NewNovaRequest(ar *SApiRequest) *SNovaRequest {
return &SNovaRequest{
SApiRequest: *ar,
}
}
func (nr *SNovaRequest) GetPort() string {
if nr.RegionId == "guangzhou-2" {
return ""
}
return nr.SApiRequest.GetPort()
}
type SInstance struct {
multicloud.SInstanceBase
multicloud.EcloudTags
multicloud.SBillingBase
SZoneRegionBase
SCreateTime
nicComplete bool
host *SHost
image *SImage
sysDisk cloudprovider.ICloudDisk
dataDisks []cloudprovider.ICloudDisk
Id string
Name string
Vcpu int
Vmemory int
KeyName string
ImageRef string
ImageName string
ImageOsType string
FlavorRef string
SystemDiskSizeGB int `json:"vdisk"`
SystemDiskId string
ServerType string
ServerVmType string
EcStatus string
BootVolumeType string
Deleted int
Visible bool
Region string
PortDetail []SInstanceNic
}
func (i *SInstance) GetBillingType() string {
return billing_api.BILLING_TYPE_POSTPAID
}
func (i *SInstance) GetExpiredAt() time.Time {
return time.Time{}
}
func (i *SInstance) GetId() string {
return i.Id
}
func (i *SInstance) GetName() string {
return i.Name
}
func (i *SInstance) GetGlobalId() string {
return i.GetId()
}
func (i *SInstance) GetStatus() string {
switch i.EcStatus {
case "active":
return api.VM_RUNNING
case "suspended", "paused":
return api.VM_SUSPEND
case "build", "rebuild", "resize", "verify_resize", "revert_resize", "password":
return api.VM_STARTING
case "reboot", "hard_reboot":
return api.VM_STOPPING
case "stopped", "shutoff":
return api.VM_READY
case "migrating":
return api.VM_MIGRATING
case "backuping":
return api.VM_BACKUP_CREATING
default:
return api.VM_UNKNOWN
}
}
func (i *SInstance) Refresh() error {
// TODO
return nil
}
func (i *SInstance) IsEmulated() bool {
return false
}
func (self *SInstance) GetBootOrder() string {
return "dcn"
}
func (self *SInstance) GetVga() string {
return "std"
}
func (self *SInstance) GetVdi() string {
return "vnc"
}
func (i *SInstance) GetImage() (*SImage, error) {
if i.image != nil {
return i.image, nil
}
image, err := i.host.zone.region.GetImage(i.ImageRef)
if err != nil {
return nil, err
}
i.image = image
return i.image, nil
}
func (i *SInstance) GetOSType() string {
return i.ImageOsType
}
func (i *SInstance) GetOSName() string {
image, err := i.GetImage()
if err != nil {
return ""
}
return image.OsName
}
func (i *SInstance) GetBios() string {
return "BIOS"
}
func (i *SInstance) GetMachine() string {
return "pc"
}
func (i *SInstance) GetInstanceType() string {
return i.FlavorRef
}
func (self *SInstance) GetSysTags() map[string]string {
data := map[string]string{}
// TODO
lowerOs := self.GetOSType()
priceKey := fmt.Sprintf("%s::%s::%s", self.host.zone.region.GetId(), self.GetInstanceType(), lowerOs)
data["price_key"] = priceKey
data["zone_ext_id"] = self.host.zone.GetGlobalId()
image, _ := self.GetImage()
if image != nil {
meta := image.GetSysTags()
for k, v := range meta {
data[k] = v
}
}
return data
}
func (in *SInstance) GetProjectId() string {
return ""
}
func (in *SInstance) GetIHost() cloudprovider.ICloudHost {
return in.host
}
func (in *SInstance) GetIDisks() ([]cloudprovider.ICloudDisk, error) {
if in.sysDisk == nil {
in.fetchSysDisk()
}
if in.dataDisks == nil {
err := in.fetchDataDisks()
if err != nil {
return nil, err
}
}
return append([]cloudprovider.ICloudDisk{in.sysDisk}, in.dataDisks...), nil
}
func (in *SInstance) GetINics() ([]cloudprovider.ICloudNic, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
inics := make([]cloudprovider.ICloudNic, len(in.PortDetail))
for i := range in.PortDetail {
in.PortDetail[i].instance = in
inics[i] = &in.PortDetail[i]
}
return inics, nil
}
func (in *SInstance) GetIEIP() (cloudprovider.ICloudEIP, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
var eipId string
for i := range in.PortDetail {
if len(in.PortDetail[i].IpId) > 0 {
eipId = in.PortDetail[i].IpId
break
}
}
if len(eipId) == 0 {
return nil, nil
}
return in.host.zone.region.GetEipById(eipId)
}
func (in *SInstance) GetSecurityGroupIds() ([]string, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
ret := sets.NewString()
for i := range in.PortDetail {
for _, group := range in.PortDetail[i].SecurityGroups {
ret.Insert(group.Id)
}
}
return ret.UnsortedList(), nil
}
func (in *SInstance) GetVcpuCount() int {
return in.Vcpu
}
func (in *SInstance) GetVmemSizeMB() int {
return in.Vmemory
}
func (in *SInstance) AssignSecurityGroup(id string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) SetSecurityGroups(ids []string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) GetHypervisor() string {
return api.HYPERVISOR_ECLOUD
}
func (in *SInstance) StartVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) StopVM(ctx context.Context, opts *cloudprovider.ServerStopOptions) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeleteVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) UpdateVM(ctx context.Context, name string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) UpdateUserData(userData string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) RebuildRoot(ctx context.Context, config *cloudprovider.SManagedVMRebuildRootConfig) (string, error) {
return "", cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeployVM(ctx context.Context, name string, username string, password string, publicKey string, deleteKeypair bool, description string) error { | }
func (in *SInstance) ChangeConfig(ctx context.Context, config *cloudprovider.SManagedVMChangeConfig) error {
return errors.ErrNotImplemented
}
func (in *SInstance) GetVNCInfo() (jsonutils.JSONObject, error) {
url, err := in.host.zone.region.GetInstanceVNCUrl(in.GetId())
if err != nil {
return nil, err
}
ret := jsonutils.NewDict()
ret.Add(jsonutils.NewString(url), "url")
ret.Add(jsonutils.NewString("ecloud"), "protocol")
ret.Add(jsonutils.NewString(in.GetId()), "instance_id")
return ret, nil
}
func (in *SInstance) AttachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) DetachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) CreateDisk(ctx context.Context, sizeMb int, uuid string, driver string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) Renew(bc billing.SBillingCycle) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) GetError() error {
return nil
}
func (in *SInstance) fetchSysDisk() {
storage, _ := in.host.zone.getStorageByType(api.STORAGE_ECLOUD_SYSTEM)
disk := SDisk{
storage: storage,
ManualAttr: SDiskManualAttr{
IsVirtual: true,
TempalteId: in.ImageRef,
ServerId: in.Id,
},
SCreateTime: in.SCreateTime,
SZoneRegionBase: in.SZoneRegionBase,
ServerId: []string{in.Id},
IsShare: false,
IsDelete: false,
SizeGB: in.SystemDiskSizeGB,
ID: in.SystemDiskId,
Name: fmt.Sprintf("%s-root", in.Name),
Status: "in-use",
Type: api.STORAGE_ECLOUD_SYSTEM,
}
in.sysDisk = &disk
return
}
func (in *SInstance) fetchDataDisks() error {
request := NewNovaRequest(NewApiRequest(in.host.zone.region.ID, "/api/v2/volume/volume/mount/list",
map[string]string{"serverId": in.Id}, nil))
disks := make([]SDisk, 0, 5)
err := in.host.zone.region.client.doList(context.Background(), request, &disks)
if err != nil {
return err
}
idisks := make([]cloudprovider.ICloudDisk, len(disks))
for i := range idisks {
storageType := disks[i].Type
storage, err := in.host.zone.getStorageByType(storageType)
if err != nil {
return errors.Wrapf(err, "unable to fetch storage with stoageType %s", storageType)
}
disks[i].storage = storage
idisks[i] = &disks[i]
}
in.dataDisks = idisks
return nil
}
func (in *SInstance) makeNicComplete() error {
routerIds := sets.NewString()
nics := make(map[string]*SInstanceNic, len(in.PortDetail))
for i := range in.PortDetail {
nic := &in.PortDetail[i]
routerIds.Insert(nic.RouterId)
nics[nic.PortId] = nic
}
for _, routerId := range routerIds.UnsortedList() {
request := NewConsoleRequest(in.host.zone.region.ID, fmt.Sprintf("/api/vpc/%s/nic", routerId),
map[string]string{
"resourceId": in.Id,
}, nil,
)
completeNics := make([]SInstanceNic, 0, len(nics)/2)
err := in.host.zone.region.client.doList(context.Background(), request, &completeNics)
if err != nil {
return errors.Wrapf(err, "unable to get nics with instance %s in vpc %s", in.Id, routerId)
}
for i := range completeNics {
id := completeNics[i].Id
nic, ok := nics[id]
if !ok {
continue
}
nic.SInstanceNicDetail = completeNics[i].SInstanceNicDetail
}
}
return nil
}
func (r *SRegion) findHost(zoneRegion string) (*SHost, error) {
zone, err := r.FindZone(zoneRegion)
if err != nil {
return nil, err
}
return &SHost{
zone: zone,
}, nil
}
func (r *SRegion) GetInstancesWithHost(zoneRegion string) ([]SInstance, error) {
instances, err := r.GetInstances(zoneRegion)
if err != nil {
return nil, err
}
for i := range instances {
host, _ := r.findHost(instances[i].Region)
instances[i].host = host
}
return instances, nil
}
func (r *SRegion) GetInstances(zoneRegion string) ([]SInstance, error) {
return r.getInstances(zoneRegion, "")
}
func (r *SRegion) getInstances(zoneRegion string, serverId string) ([]SInstance, error) {
query := map[string]string{
"serverTypes": "VM",
"productTypes": "NORMAL,AUTOSCALING,VO,CDN,PAAS_MASTER,PAAS_SLAVE,VCPE,EMR,LOGAUDIT",
//"productTypes": "NORMAL",
"visible": "true",
}
if len(serverId) > 0 {
query["serverId"] = serverId
}
if len(zoneRegion) > 0 {
query["region"] = zoneRegion
}
request := NewNovaRequest(NewApiRequest(r.ID, "/api/v2/server/web/with/network", query, nil))
var instances []SInstance
err := r.client.doList(context.Background(), request, &instances)
if err != nil {
return nil, err
}
return instances, nil
}
func (r *SRegion) GetInstanceById(id string) (*SInstance, error) {
instances, err := r.getInstances("", id)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, cloudprovider.ErrNotFound
}
instance := &instances[0]
host, err := r.findHost(instance.Region)
if err == nil {
instance.host = host
}
return instance, nil
}
func (r *SRegion) GetInstanceVNCUrl(instanceId string) (string, error) {
request := NewNovaRequest(NewApiRequest(r.ID, fmt.Sprintf("/api/server/%s/vnc", instanceId), nil, nil))
var url string
err := r.client.doGet(context.Background(), request, &url)
if err != nil {
return "", err
}
return url, nil
} | return cloudprovider.ErrNotImplemented | random_line_split |
instance.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ecloud
import (
"context"
"fmt"
"time"
"yunion.io/x/jsonutils"
"yunion.io/x/pkg/errors"
"yunion.io/x/pkg/util/sets"
billing_api "yunion.io/x/onecloud/pkg/apis/billing"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
"yunion.io/x/onecloud/pkg/util/billing"
)
type SNovaRequest struct {
SApiRequest
}
func NewNovaRequest(ar *SApiRequest) *SNovaRequest {
return &SNovaRequest{
SApiRequest: *ar,
}
}
func (nr *SNovaRequest) GetPort() string {
if nr.RegionId == "guangzhou-2" {
return ""
}
return nr.SApiRequest.GetPort()
}
type SInstance struct {
multicloud.SInstanceBase
multicloud.EcloudTags
multicloud.SBillingBase
SZoneRegionBase
SCreateTime
nicComplete bool
host *SHost
image *SImage
sysDisk cloudprovider.ICloudDisk
dataDisks []cloudprovider.ICloudDisk
Id string
Name string
Vcpu int
Vmemory int
KeyName string
ImageRef string
ImageName string
ImageOsType string
FlavorRef string
SystemDiskSizeGB int `json:"vdisk"`
SystemDiskId string
ServerType string
ServerVmType string
EcStatus string
BootVolumeType string
Deleted int
Visible bool
Region string
PortDetail []SInstanceNic
}
func (i *SInstance) GetBillingType() string {
return billing_api.BILLING_TYPE_POSTPAID
}
func (i *SInstance) GetExpiredAt() time.Time {
return time.Time{}
}
func (i *SInstance) GetId() string {
return i.Id
}
func (i *SInstance) GetName() string {
return i.Name
}
func (i *SInstance) GetGlobalId() string {
return i.GetId()
}
func (i *SInstance) GetStatus() string {
switch i.EcStatus {
case "active":
return api.VM_RUNNING
case "suspended", "paused":
return api.VM_SUSPEND
case "build", "rebuild", "resize", "verify_resize", "revert_resize", "password":
return api.VM_STARTING
case "reboot", "hard_reboot":
return api.VM_STOPPING
case "stopped", "shutoff":
return api.VM_READY
case "migrating":
return api.VM_MIGRATING
case "backuping":
return api.VM_BACKUP_CREATING
default:
return api.VM_UNKNOWN
}
}
func (i *SInstance) Refresh() error {
// TODO
return nil
}
func (i *SInstance) IsEmulated() bool {
return false
}
func (self *SInstance) GetBootOrder() string {
return "dcn"
}
func (self *SInstance) GetVga() string {
return "std"
}
func (self *SInstance) GetVdi() string {
return "vnc"
}
func (i *SInstance) GetImage() (*SImage, error) {
if i.image != nil {
return i.image, nil
}
image, err := i.host.zone.region.GetImage(i.ImageRef)
if err != nil {
return nil, err
}
i.image = image
return i.image, nil
}
func (i *SInstance) GetOSType() string {
return i.ImageOsType
}
func (i *SInstance) GetOSName() string {
image, err := i.GetImage()
if err != nil {
return ""
}
return image.OsName
}
func (i *SInstance) GetBios() string {
return "BIOS"
}
func (i *SInstance) GetMachine() string {
return "pc"
}
func (i *SInstance) GetInstanceType() string {
return i.FlavorRef
}
func (self *SInstance) GetSysTags() map[string]string {
data := map[string]string{}
// TODO
lowerOs := self.GetOSType()
priceKey := fmt.Sprintf("%s::%s::%s", self.host.zone.region.GetId(), self.GetInstanceType(), lowerOs)
data["price_key"] = priceKey
data["zone_ext_id"] = self.host.zone.GetGlobalId()
image, _ := self.GetImage()
if image != nil {
meta := image.GetSysTags()
for k, v := range meta {
data[k] = v
}
}
return data
}
func (in *SInstance) GetProjectId() string {
return ""
}
func (in *SInstance) GetIHost() cloudprovider.ICloudHost {
return in.host
}
func (in *SInstance) GetIDisks() ([]cloudprovider.ICloudDisk, error) {
if in.sysDisk == nil {
in.fetchSysDisk()
}
if in.dataDisks == nil {
err := in.fetchDataDisks()
if err != nil {
return nil, err
}
}
return append([]cloudprovider.ICloudDisk{in.sysDisk}, in.dataDisks...), nil
}
func (in *SInstance) GetINics() ([]cloudprovider.ICloudNic, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
inics := make([]cloudprovider.ICloudNic, len(in.PortDetail))
for i := range in.PortDetail {
in.PortDetail[i].instance = in
inics[i] = &in.PortDetail[i]
}
return inics, nil
}
func (in *SInstance) GetIEIP() (cloudprovider.ICloudEIP, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
var eipId string
for i := range in.PortDetail {
if len(in.PortDetail[i].IpId) > 0 {
eipId = in.PortDetail[i].IpId
break
}
}
if len(eipId) == 0 {
return nil, nil
}
return in.host.zone.region.GetEipById(eipId)
}
func (in *SInstance) GetSecurityGroupIds() ([]string, error) {
if !in.nicComplete {
err := in.makeNicComplete()
if err != nil {
return nil, errors.Wrap(err, "unable to make nics complete")
}
in.nicComplete = true
}
ret := sets.NewString()
for i := range in.PortDetail {
for _, group := range in.PortDetail[i].SecurityGroups {
ret.Insert(group.Id)
}
}
return ret.UnsortedList(), nil
}
func (in *SInstance) GetVcpuCount() int {
return in.Vcpu
}
func (in *SInstance) GetVmemSizeMB() int {
return in.Vmemory
}
func (in *SInstance) AssignSecurityGroup(id string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) SetSecurityGroups(ids []string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) GetHypervisor() string {
return api.HYPERVISOR_ECLOUD
}
func (in *SInstance) StartVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) StopVM(ctx context.Context, opts *cloudprovider.ServerStopOptions) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeleteVM(ctx context.Context) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) UpdateVM(ctx context.Context, name string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) UpdateUserData(userData string) error {
return cloudprovider.ErrNotSupported
}
func (self *SInstance) RebuildRoot(ctx context.Context, config *cloudprovider.SManagedVMRebuildRootConfig) (string, error) {
return "", cloudprovider.ErrNotImplemented
}
func (self *SInstance) DeployVM(ctx context.Context, name string, username string, password string, publicKey string, deleteKeypair bool, description string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) ChangeConfig(ctx context.Context, config *cloudprovider.SManagedVMChangeConfig) error {
return errors.ErrNotImplemented
}
func (in *SInstance) GetVNCInfo() (jsonutils.JSONObject, error) {
url, err := in.host.zone.region.GetInstanceVNCUrl(in.GetId())
if err != nil {
return nil, err
}
ret := jsonutils.NewDict()
ret.Add(jsonutils.NewString(url), "url")
ret.Add(jsonutils.NewString("ecloud"), "protocol")
ret.Add(jsonutils.NewString(in.GetId()), "instance_id")
return ret, nil
}
func (in *SInstance) AttachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (in *SInstance) DetachDisk(ctx context.Context, diskId string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) CreateDisk(ctx context.Context, sizeMb int, uuid string, driver string) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) Renew(bc billing.SBillingCycle) error {
return cloudprovider.ErrNotImplemented
}
func (self *SInstance) GetError() error {
return nil
}
func (in *SInstance) fetchSysDisk() {
storage, _ := in.host.zone.getStorageByType(api.STORAGE_ECLOUD_SYSTEM)
disk := SDisk{
storage: storage,
ManualAttr: SDiskManualAttr{
IsVirtual: true,
TempalteId: in.ImageRef,
ServerId: in.Id,
},
SCreateTime: in.SCreateTime,
SZoneRegionBase: in.SZoneRegionBase,
ServerId: []string{in.Id},
IsShare: false,
IsDelete: false,
SizeGB: in.SystemDiskSizeGB,
ID: in.SystemDiskId,
Name: fmt.Sprintf("%s-root", in.Name),
Status: "in-use",
Type: api.STORAGE_ECLOUD_SYSTEM,
}
in.sysDisk = &disk
return
}
func (in *SInstance) fetchDataDisks() error {
request := NewNovaRequest(NewApiRequest(in.host.zone.region.ID, "/api/v2/volume/volume/mount/list",
map[string]string{"serverId": in.Id}, nil))
disks := make([]SDisk, 0, 5)
err := in.host.zone.region.client.doList(context.Background(), request, &disks)
if err != nil {
return err
}
idisks := make([]cloudprovider.ICloudDisk, len(disks))
for i := range idisks {
storageType := disks[i].Type
storage, err := in.host.zone.getStorageByType(storageType)
if err != nil {
return errors.Wrapf(err, "unable to fetch storage with stoageType %s", storageType)
}
disks[i].storage = storage
idisks[i] = &disks[i]
}
in.dataDisks = idisks
return nil
}
func (in *SInstance) makeNicComplete() error {
routerIds := sets.NewString()
nics := make(map[string]*SInstanceNic, len(in.PortDetail))
for i := range in.PortDetail |
for _, routerId := range routerIds.UnsortedList() {
request := NewConsoleRequest(in.host.zone.region.ID, fmt.Sprintf("/api/vpc/%s/nic", routerId),
map[string]string{
"resourceId": in.Id,
}, nil,
)
completeNics := make([]SInstanceNic, 0, len(nics)/2)
err := in.host.zone.region.client.doList(context.Background(), request, &completeNics)
if err != nil {
return errors.Wrapf(err, "unable to get nics with instance %s in vpc %s", in.Id, routerId)
}
for i := range completeNics {
id := completeNics[i].Id
nic, ok := nics[id]
if !ok {
continue
}
nic.SInstanceNicDetail = completeNics[i].SInstanceNicDetail
}
}
return nil
}
func (r *SRegion) findHost(zoneRegion string) (*SHost, error) {
zone, err := r.FindZone(zoneRegion)
if err != nil {
return nil, err
}
return &SHost{
zone: zone,
}, nil
}
func (r *SRegion) GetInstancesWithHost(zoneRegion string) ([]SInstance, error) {
instances, err := r.GetInstances(zoneRegion)
if err != nil {
return nil, err
}
for i := range instances {
host, _ := r.findHost(instances[i].Region)
instances[i].host = host
}
return instances, nil
}
func (r *SRegion) GetInstances(zoneRegion string) ([]SInstance, error) {
return r.getInstances(zoneRegion, "")
}
func (r *SRegion) getInstances(zoneRegion string, serverId string) ([]SInstance, error) {
query := map[string]string{
"serverTypes": "VM",
"productTypes": "NORMAL,AUTOSCALING,VO,CDN,PAAS_MASTER,PAAS_SLAVE,VCPE,EMR,LOGAUDIT",
//"productTypes": "NORMAL",
"visible": "true",
}
if len(serverId) > 0 {
query["serverId"] = serverId
}
if len(zoneRegion) > 0 {
query["region"] = zoneRegion
}
request := NewNovaRequest(NewApiRequest(r.ID, "/api/v2/server/web/with/network", query, nil))
var instances []SInstance
err := r.client.doList(context.Background(), request, &instances)
if err != nil {
return nil, err
}
return instances, nil
}
func (r *SRegion) GetInstanceById(id string) (*SInstance, error) {
instances, err := r.getInstances("", id)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, cloudprovider.ErrNotFound
}
instance := &instances[0]
host, err := r.findHost(instance.Region)
if err == nil {
instance.host = host
}
return instance, nil
}
func (r *SRegion) GetInstanceVNCUrl(instanceId string) (string, error) {
request := NewNovaRequest(NewApiRequest(r.ID, fmt.Sprintf("/api/server/%s/vnc", instanceId), nil, nil))
var url string
err := r.client.doGet(context.Background(), request, &url)
if err != nil {
return "", err
}
return url, nil
}
| {
nic := &in.PortDetail[i]
routerIds.Insert(nic.RouterId)
nics[nic.PortId] = nic
} | conditional_block |
lifecycle.go | package consumer
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.gazette.dev/core/broker/client"
pb "go.gazette.dev/core/broker/protocol"
pc "go.gazette.dev/core/consumer/protocol"
"go.gazette.dev/core/consumer/recoverylog"
"go.gazette.dev/core/labels"
"go.gazette.dev/core/message"
"go.gazette.dev/core/metrics"
"google.golang.org/grpc/codes"
)
// playLog fetches current shard hints and plays them back into a temporary directory using the Player.
func playLog(shard Shard, pl *recoverylog.Player, etcd *clientv3.Client) error {
if dir, err := ioutil.TempDir("", shard.Spec().Id.String()+"-"); err != nil {
return extendErr(err, "creating shard working directory")
} else if h, err := fetchHints(shard.Context(), shard.Spec(), etcd); err != nil {
return extendErr(err, "fetching FSM hints")
} else if logSpec, err := fetchJournalSpec(shard.Context(), pickFirstHints(h).Log, shard.JournalClient()); err != nil {
return extendErr(err, "fetching JournalSpec")
} else if ct := logSpec.LabelSet.ValueOf(labels.ContentType); ct != labels.ContentType_RecoveryLog {
return errors.Errorf("expected label %s value %s (got %v)", labels.ContentType, labels.ContentType_RecoveryLog, ct)
} else if err = pl.Play(shard.Context(), pickFirstHints(h), dir, shard.JournalClient()); err != nil {
return extendErr(err, "playing log %s", pickFirstHints(h).Log)
}
return nil
}
// completePlayback injects a new AuthorID into the log to complete playback,
// stores recovered hints, initializes an Application Store, and returns
// offsets at which journal consumption should continue.
func completePlayback(shard Shard, app Application, pl *recoverylog.Player,
etcd *clientv3.Client) (Store, map[pb.Journal]int64, error) {
var author, err = recoverylog.NewRandomAuthorID()
if err != nil {
return nil, nil, extendErr(err, "generating Author")
}
// Ask |pl| to inject a hand-off to our generated |author|, so that other
// tailing readers will apply our write operations over those of a previous
// recorder which may still be shutting down.
pl.InjectHandoff(author)
select {
case <-pl.Done():
// Pass.
case <-shard.Context().Done():
return nil, nil, shard.Context().Err()
}
if pl.FSM == nil {
return nil, nil, errors.Errorf("completePlayback aborting due to Play failure")
}
// We've completed log playback, and we're likely the most recent shard
// primary to do so. Snapshot our recovered hints. We'll sanity-check that
// we can open the recovered store & load offsets, and only then persist
// these recovered hints.
var recoveredHints = pl.FSM.BuildHints()
// Initialize the store.
var recorder = recoverylog.NewRecorder(pl.FSM, author, pl.Dir, shard.JournalClient())
var store Store
var offsets map[pb.Journal]int64
if store, err = app.NewStore(shard, pl.Dir, recorder); err != nil {
return nil, nil, extendErr(err, "initializing store")
} else if offsets, err = store.FetchJournalOffsets(); err != nil {
return nil, nil, extendErr(err, "fetching journal offsets from store")
} else if err = storeRecoveredHints(shard, recoveredHints, etcd); err != nil {
return nil, nil, extendErr(err, "storingRecoveredHints")
}
// Lower-bound each source to its ShardSpec.Source.MinOffset.
for _, src := range shard.Spec().Sources {
if offsets[src.Journal] < src.MinOffset {
offsets[src.Journal] = src.MinOffset
}
}
return store, offsets, nil
}
// pumpMessages reads and decodes messages from a Journal & offset into the provided channel.
func pumpMessages(shard Shard, app Application, journal pb.Journal, offset int64, msgCh chan<- message.Envelope) error {
var spec, err = fetchJournalSpec(shard.Context(), journal, shard.JournalClient())
if err != nil {
return extendErr(err, "fetching JournalSpec")
}
framing, err := message.FramingByContentType(spec.LabelSet.ValueOf(labels.ContentType))
if err != nil {
return extendErr(err, "determining framing (%s)", journal)
}
var rr = client.NewRetryReader(shard.Context(), shard.JournalClient(), pb.ReadRequest{
Journal: journal,
Offset: offset,
Block: true,
DoNotProxy: !shard.JournalClient().IsNoopRouter(),
})
var br = bufio.NewReader(rr)
for next := offset; ; offset = next {
var frame []byte
var msg message.Message
if frame, err = framing.Unpack(br); err != nil {
// Swallow ErrNoProgress from our bufio.Reader. client.Reader returns
// an empty read to allow for inspection of the ReadResponse message,
// and client.RetryReader also surface these empty reads. A journal
// with no active appends can eventually cause our bufio.Reader to
// give up, though no error has occurred.
if errors.Cause(err) == io.ErrNoProgress {
continue
}
// ErrOffsetJump indicates the next byte of available content is at an
// offset larger than the one requested. This can happen if a range of
// content was deleted from the journal. Log a warning, but continue
// processing at the jumped-to offset.
if errors.Cause(err) == client.ErrOffsetJump {
log.WithFields(log.Fields{"journal": journal, "from": offset, "to": rr.Offset()}).
Warn("source journal offset jump")
next = rr.Offset()
continue
}
return extendErr(err, "unpacking frame (%s:%d)", spec.Name, offset)
}
next = rr.AdjustedOffset(br)
if msg, err = app.NewMessage(spec); err != nil {
return extendErr(err, "NewMessage (%s)", journal)
} else if err = framing.Unmarshal(frame, msg); err != nil {
log.WithFields(log.Fields{"journal": journal, "offset": offset, "err": err}).
Error("failed to unmarshal message")
continue
}
select {
case msgCh <- message.Envelope{
JournalSpec: spec,
Fragment: rr.Reader.Response.Fragment,
NextOffset: next,
Message: msg,
}: // Pass.
case <-shard.Context().Done():
return extendErr(shard.Context().Err(), "sending msg (%s:%d)", spec.Name, offset)
}
metrics.GazetteConsumerBytesConsumedTotal.Add(float64(next - offset))
}
}
// consumeMessages runs consumer transactions, consuming from the provided
// |msgCh| and, when notified by |hintsCh|, occasionally stores recorded FSMHints.
func consumeMessages(shard Shard, store Store, app Application, etcd *clientv3.Client,
msgCh <-chan message.Envelope, hintsCh <-chan time.Time) (err error) {
// Supply an idle timer for txnStep's use in timing transaction durations.
var realTimer = time.NewTimer(0)
if !realTimer.Stop() {
<-realTimer.C
}
var timer = txnTimer{
C: realTimer.C,
Reset: realTimer.Reset,
Stop: realTimer.Stop,
}
var txn, prior transaction
for {
select {
case <-hintsCh:
var hints recoverylog.FSMHints
if hints, err = store.Recorder().BuildHints(); err == nil {
err = storeRecordedHints(shard, hints, etcd)
}
if err != nil {
err = extendErr(err, "storeRecordedHints")
return
}
default:
// Pass.
}
var spec = shard.Spec()
txn.minDur, txn.maxDur = spec.MinTxnDuration, spec.MaxTxnDuration
txn.msgCh = msgCh
txn.offsets = make(map[pb.Journal]int64)
// Run the transaction until completion or error.
for done := false; !done && err == nil; done, err = txnStep(&txn, &prior, shard, store, app, timer) {
}
if err != nil {
err = extendErr(err, "txnStep")
}
if ba, ok := app.(BeginFinisher); ok && txn.msgCount != 0 {
if finishErr := ba.FinishTxn(shard, store, err); err == nil && finishErr != nil {
err = extendErr(finishErr, "FinishTxn")
}
}
if err != nil {
return
}
recordMetrics(&prior)
prior, txn = txn, transaction{doneCh: txn.barrier.Done()}
}
}
// fetchJournalSpec retrieves the current JournalSpec.
func fetchJournalSpec(ctx context.Context, name pb.Journal, journals pb.JournalClient) (spec *pb.JournalSpec, err error) {
var lr *pb.ListResponse
lr, err = client.ListAllJournals(ctx, journals, pb.ListRequest{
Selector: pb.LabelSelector{
Include: pb.LabelSet{Labels: []pb.Label{{Name: "name", Value: name.String()}}},
},
})
if err == nil && len(lr.Journals) == 0 {
err = errors.Errorf("named journal does not exist (%s)", name)
}
if err == nil {
spec = &lr.Journals[0].Spec
}
return
}
type fetchedHints struct {
spec *pc.ShardSpec
txnResp *clientv3.TxnResponse
hints []*recoverylog.FSMHints
}
| func pickFirstHints(f fetchedHints) recoverylog.FSMHints {
for _, currHints := range f.hints {
if currHints == nil {
continue
}
return *currHints
}
return recoverylog.FSMHints{Log: f.spec.RecoveryLog()}
}
// fetchHints retrieves and decodes all FSMHints for the ShardSpec.
// Nil values will be returned where hint values have not been written. It also
// returns a TxnResponse holding each of the hints values, which can be used for
// transactional updates of hints.
func fetchHints(ctx context.Context, spec *pc.ShardSpec, etcd *clientv3.Client) (out fetchedHints, err error) {
var ops = []clientv3.Op{clientv3.OpGet(spec.HintPrimaryKey())}
for _, hk := range spec.HintBackupKeys() {
ops = append(ops, clientv3.OpGet(hk))
}
out.spec = spec
if out.txnResp, err = etcd.Txn(ctx).If().Then(ops...).Commit(); err != nil {
err = extendErr(err, "fetching ShardSpec.HintKeys")
return
}
for i := range out.txnResp.Responses {
var currHints recoverylog.FSMHints
if kvs := out.txnResp.Responses[i].GetResponseRange().Kvs; len(kvs) == 0 {
out.hints = append(out.hints, nil)
continue
} else if err = json.Unmarshal(kvs[0].Value, &currHints); err != nil {
err = extendErr(err, "unmarshal FSMHints")
} else if _, err = recoverylog.NewFSM(currHints); err != nil { // Validate hints.
err = extendErr(err, "validating FSMHints")
} else if currHints.Log != spec.RecoveryLog() {
err = errors.Errorf("recovered hints recovery log doesn't match ShardSpec.RecoveryLog (%s vs %s)",
currHints.Log, spec.RecoveryLog())
}
if err != nil {
return
}
out.hints = append(out.hints, &currHints)
}
return
}
// storeRecordedHints writes FSMHints into the primary hint key of the spec.
func storeRecordedHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) error {
var val, err = json.Marshal(hints)
if err != nil {
return extendErr(err, "marshal FSMHints")
}
var asn = shard.Assignment()
_, err = etcd.Txn(shard.Context()).
// Verify our Assignment is still in effect (eg, we're still primary), then write |hints| to HintPrimaryKey.
// Compare CreateRevision to allow for a raced ReplicaState update.
If(clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)), "=", asn.Raw.CreateRevision)).
Then(clientv3.OpPut(shard.Spec().HintPrimaryKey(), string(val))).
Commit()
if etcdErr, ok := err.(rpctypes.EtcdError); ok && etcdErr.Code() == codes.Unavailable {
// Recorded hints are advisory and can generally tolerate omitted
// updates. It's also annoying for temporary Etcd partitions to abort
// an otherwise-fine shard primary. So, log but allow shard processing
// to continue; we'll retry on the next hints flush interval.
log.WithFields(log.Fields{
"key": shard.Spec().HintPrimaryKey(),
"err": err,
}).Warn("failed to store recorded FSMHints (will retry)")
} else if err != nil {
return extendErr(err, "storing recorded FSMHints")
}
return nil
}
// storeRecoveredHints writes the FSMHints into the first backup hint key of the spec,
// rotating hints previously stored under that key to the second backup hint key,
// and so on as a single transaction.
func storeRecoveredHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) (err error) {
var (
spec = shard.Spec()
asn = shard.Assignment()
backups = shard.Spec().HintBackupKeys()
h fetchedHints
)
if h, err = fetchHints(shard.Context(), spec, etcd); err != nil {
return
}
// |hints| is serialized and written to backups[1]. In the same txn,
// rotate the current value at backups[1] => backups[2], and so on.
var val []byte
if val, err = json.Marshal(hints); err != nil {
return
}
var cmp []clientv3.Cmp
var ops []clientv3.Op
// The txn responses returned from fetchHints are structured such that the first response will
// be the primary response and the subsequent responses are the backup responses, this slice
// represents just the backup responses.
var backupResponses = h.txnResp.Responses[1:]
// Verify our Assignment is still in effect (eg, we're still primary).
cmp = append(cmp, clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)),
"=", asn.Raw.CreateRevision))
for i := 0; i != len(backups) && val != nil; i++ {
ops = append(ops, clientv3.OpPut(backups[i], string(val)))
if kvs := backupResponses[i].GetResponseRange().Kvs; len(kvs) == 0 {
// Verify there is still no current key/value at this hints key slot.
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", 0))
val = nil
} else {
// Verify the key/value at this hints key slot is unchanged.
// Retain its value to rotate into the next slot (if one exists).
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", kvs[0].ModRevision))
val = kvs[0].Value
}
}
if _, err = etcd.Txn(shard.Context()).If(cmp...).Then(ops...).Commit(); err != nil {
err = extendErr(err, "storing recovered FSMHints")
}
return
}
// transaction models state and metrics used in the execution of a consumer transaction.
type transaction struct {
barrier *client.AsyncAppend // Write barrier of the txn at commit.
minDur, maxDur time.Duration // Minimum and maximum durations. Marked as -1 when elapsed.
msgCh <-chan message.Envelope // Message source. Nil'd upon reaching |maxDur|.
msgCount int // Number of messages batched into this transaction.
offsets map[pb.Journal]int64 // End (exclusive) journal offsets of the transaction.
doneCh <-chan struct{} // DoneCh of prior transaction barrier.
beganAt time.Time // Time at which transaction began.
stalledAt time.Time // Time at which processing stalled while waiting on IO.
flushedAt time.Time // Time at which flush began.
committedAt time.Time // Time at which commit began.
syncedAt time.Time // Time at which txn |barrier| resolved.
}
// txnTimer is a time.Timer which can be mocked within unit tests.
type txnTimer struct {
C <-chan time.Time
Reset func(time.Duration) bool
Stop func() bool
}
// txnStep progresses a consumer transaction by a single step. If the transaction
// is complete, it returns done=true. Otherwise, txnStep should be called again
// to continue making progress on the transaction.
func txnStep(txn, prior *transaction, shard Shard, store Store, app Application, timer txnTimer) (done bool, err error) {
// If the minimum batching duration hasn't elapsed *or* the prior transaction
// barrier hasn't completed, continue performing blocking reads of messages.
if txn.msgCount == 0 || txn.minDur != -1 || txn.doneCh != nil {
select {
case msg := <-txn.msgCh:
if txn.msgCount == 0 {
if ba, ok := app.(BeginFinisher); ok {
// BeginTxn may block arbitrarily.
if err = ba.BeginTxn(shard, store); err != nil {
err = extendErr(err, "app.BeginTxn")
return
}
}
txn.beganAt = timeNow()
timer.Reset(txn.minDur)
}
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.minDur)) {
panic("unexpected tick")
}
txn.minDur = -1 // Mark as completed.
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
timer.Reset(txn.beganAt.Add(txn.maxDur).Sub(tick))
} else {
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
txn.stalledAt = timeNow() // We're stalled waiting for prior txn IO.
}
return
case _ = <-txn.doneCh:
prior.syncedAt = timeNow()
txn.doneCh = nil
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
}
panic("not reached")
}
// Continue reading messages so long as we do not block or reach |maxDur|.
select {
case msg := <-txn.msgCh:
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
panic("unexpected tick")
}
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
default:
// |msgCh| stalled. Fallthrough to complete the transaction.
}
if txn.flushedAt = timeNow(); txn.stalledAt.IsZero() {
txn.stalledAt = txn.flushedAt // We spent no time stalled.
}
if err = app.FinalizeTxn(shard, store); err != nil {
err = extendErr(err, "app.FinalizeTxn")
return
}
// Inject a strong write barrier which resolves only after pending writes
// to all journals have completed. We do this before store.Flush to ensure
// that writes driven by transaction messages have completed before we
// persist updated offsets which step past those messages.
store.Recorder().StrongBarrier()
if err = store.Flush(txn.offsets); err != nil {
err = extendErr(err, "store.Flush")
return
}
txn.barrier = store.Recorder().WeakBarrier()
txn.committedAt = timeNow()
// If the timer is still running, stop and drain it.
if txn.maxDur != -1 && !timer.Stop() {
<-timer.C
}
done = true
return
}
// recordMetrics of a fully completed transaction.
func recordMetrics(txn *transaction) {
metrics.GazetteConsumerTxCountTotal.Inc()
metrics.GazetteConsumerTxMessagesTotal.Add(float64(txn.msgCount))
metrics.GazetteConsumerTxSecondsTotal.Add(txn.syncedAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxConsumeSecondsTotal.Add(txn.stalledAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxStalledSecondsTotal.Add(txn.flushedAt.Sub(txn.stalledAt).Seconds())
metrics.GazetteConsumerTxFlushSecondsTotal.Add(txn.committedAt.Sub(txn.flushedAt).Seconds())
metrics.GazetteConsumerTxSyncSecondsTotal.Add(txn.syncedAt.Sub(txn.committedAt).Seconds())
}
func extendErr(err error, mFmt string, args ...interface{}) error {
if err == nil {
panic("expected error")
} else if err == context.Canceled || err == context.DeadlineExceeded {
return err
} else if _, ok := err.(interface{ StackTrace() errors.StackTrace }); ok {
// Avoid attaching another errors.StackTrace if one is already present.
return errors.WithMessage(err, fmt.Sprintf(mFmt, args...))
} else {
// Use Wrapf to simultaneously attach |mFmt| and the current stack trace.
return errors.Wrapf(err, mFmt, args...)
}
}
var timeNow = time.Now | // pickFirstHints retrieves the first hints from |f|. If there are no primary
// hints available the most recent backup hints will be returned. If there are
// no hints available an empty set of hints is returned. | random_line_split |
lifecycle.go | package consumer
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.gazette.dev/core/broker/client"
pb "go.gazette.dev/core/broker/protocol"
pc "go.gazette.dev/core/consumer/protocol"
"go.gazette.dev/core/consumer/recoverylog"
"go.gazette.dev/core/labels"
"go.gazette.dev/core/message"
"go.gazette.dev/core/metrics"
"google.golang.org/grpc/codes"
)
// playLog fetches current shard hints and plays them back into a temporary directory using the Player.
func playLog(shard Shard, pl *recoverylog.Player, etcd *clientv3.Client) error {
if dir, err := ioutil.TempDir("", shard.Spec().Id.String()+"-"); err != nil {
return extendErr(err, "creating shard working directory")
} else if h, err := fetchHints(shard.Context(), shard.Spec(), etcd); err != nil {
return extendErr(err, "fetching FSM hints")
} else if logSpec, err := fetchJournalSpec(shard.Context(), pickFirstHints(h).Log, shard.JournalClient()); err != nil {
return extendErr(err, "fetching JournalSpec")
} else if ct := logSpec.LabelSet.ValueOf(labels.ContentType); ct != labels.ContentType_RecoveryLog {
return errors.Errorf("expected label %s value %s (got %v)", labels.ContentType, labels.ContentType_RecoveryLog, ct)
} else if err = pl.Play(shard.Context(), pickFirstHints(h), dir, shard.JournalClient()); err != nil {
return extendErr(err, "playing log %s", pickFirstHints(h).Log)
}
return nil
}
// completePlayback injects a new AuthorID into the log to complete playback,
// stores recovered hints, initializes an Application Store, and returns
// offsets at which journal consumption should continue.
func completePlayback(shard Shard, app Application, pl *recoverylog.Player,
etcd *clientv3.Client) (Store, map[pb.Journal]int64, error) {
var author, err = recoverylog.NewRandomAuthorID()
if err != nil {
return nil, nil, extendErr(err, "generating Author")
}
// Ask |pl| to inject a hand-off to our generated |author|, so that other
// tailing readers will apply our write operations over those of a previous
// recorder which may still be shutting down.
pl.InjectHandoff(author)
select {
case <-pl.Done():
// Pass.
case <-shard.Context().Done():
return nil, nil, shard.Context().Err()
}
if pl.FSM == nil {
return nil, nil, errors.Errorf("completePlayback aborting due to Play failure")
}
// We've completed log playback, and we're likely the most recent shard
// primary to do so. Snapshot our recovered hints. We'll sanity-check that
// we can open the recovered store & load offsets, and only then persist
// these recovered hints.
var recoveredHints = pl.FSM.BuildHints()
// Initialize the store.
var recorder = recoverylog.NewRecorder(pl.FSM, author, pl.Dir, shard.JournalClient())
var store Store
var offsets map[pb.Journal]int64
if store, err = app.NewStore(shard, pl.Dir, recorder); err != nil {
return nil, nil, extendErr(err, "initializing store")
} else if offsets, err = store.FetchJournalOffsets(); err != nil {
return nil, nil, extendErr(err, "fetching journal offsets from store")
} else if err = storeRecoveredHints(shard, recoveredHints, etcd); err != nil {
return nil, nil, extendErr(err, "storingRecoveredHints")
}
// Lower-bound each source to its ShardSpec.Source.MinOffset.
for _, src := range shard.Spec().Sources {
if offsets[src.Journal] < src.MinOffset {
offsets[src.Journal] = src.MinOffset
}
}
return store, offsets, nil
}
// pumpMessages reads and decodes messages from a Journal & offset into the provided channel.
func pumpMessages(shard Shard, app Application, journal pb.Journal, offset int64, msgCh chan<- message.Envelope) error {
var spec, err = fetchJournalSpec(shard.Context(), journal, shard.JournalClient())
if err != nil {
return extendErr(err, "fetching JournalSpec")
}
framing, err := message.FramingByContentType(spec.LabelSet.ValueOf(labels.ContentType))
if err != nil {
return extendErr(err, "determining framing (%s)", journal)
}
var rr = client.NewRetryReader(shard.Context(), shard.JournalClient(), pb.ReadRequest{
Journal: journal,
Offset: offset,
Block: true,
DoNotProxy: !shard.JournalClient().IsNoopRouter(),
})
var br = bufio.NewReader(rr)
for next := offset; ; offset = next {
var frame []byte
var msg message.Message
if frame, err = framing.Unpack(br); err != nil {
// Swallow ErrNoProgress from our bufio.Reader. client.Reader returns
// an empty read to allow for inspection of the ReadResponse message,
// and client.RetryReader also surface these empty reads. A journal
// with no active appends can eventually cause our bufio.Reader to
// give up, though no error has occurred.
if errors.Cause(err) == io.ErrNoProgress {
continue
}
// ErrOffsetJump indicates the next byte of available content is at an
// offset larger than the one requested. This can happen if a range of
// content was deleted from the journal. Log a warning, but continue
// processing at the jumped-to offset.
if errors.Cause(err) == client.ErrOffsetJump {
log.WithFields(log.Fields{"journal": journal, "from": offset, "to": rr.Offset()}).
Warn("source journal offset jump")
next = rr.Offset()
continue
}
return extendErr(err, "unpacking frame (%s:%d)", spec.Name, offset)
}
next = rr.AdjustedOffset(br)
if msg, err = app.NewMessage(spec); err != nil {
return extendErr(err, "NewMessage (%s)", journal)
} else if err = framing.Unmarshal(frame, msg); err != nil {
log.WithFields(log.Fields{"journal": journal, "offset": offset, "err": err}).
Error("failed to unmarshal message")
continue
}
select {
case msgCh <- message.Envelope{
JournalSpec: spec,
Fragment: rr.Reader.Response.Fragment,
NextOffset: next,
Message: msg,
}: // Pass.
case <-shard.Context().Done():
return extendErr(shard.Context().Err(), "sending msg (%s:%d)", spec.Name, offset)
}
metrics.GazetteConsumerBytesConsumedTotal.Add(float64(next - offset))
}
}
// consumeMessages runs consumer transactions, consuming from the provided
// |msgCh| and, when notified by |hintsCh|, occasionally stores recorded FSMHints.
func consumeMessages(shard Shard, store Store, app Application, etcd *clientv3.Client,
msgCh <-chan message.Envelope, hintsCh <-chan time.Time) (err error) {
// Supply an idle timer for txnStep's use in timing transaction durations.
var realTimer = time.NewTimer(0)
if !realTimer.Stop() {
<-realTimer.C
}
var timer = txnTimer{
C: realTimer.C,
Reset: realTimer.Reset,
Stop: realTimer.Stop,
}
var txn, prior transaction
for {
select {
case <-hintsCh:
var hints recoverylog.FSMHints
if hints, err = store.Recorder().BuildHints(); err == nil {
err = storeRecordedHints(shard, hints, etcd)
}
if err != nil {
err = extendErr(err, "storeRecordedHints")
return
}
default:
// Pass.
}
var spec = shard.Spec()
txn.minDur, txn.maxDur = spec.MinTxnDuration, spec.MaxTxnDuration
txn.msgCh = msgCh
txn.offsets = make(map[pb.Journal]int64)
// Run the transaction until completion or error.
for done := false; !done && err == nil; done, err = txnStep(&txn, &prior, shard, store, app, timer) {
}
if err != nil {
err = extendErr(err, "txnStep")
}
if ba, ok := app.(BeginFinisher); ok && txn.msgCount != 0 {
if finishErr := ba.FinishTxn(shard, store, err); err == nil && finishErr != nil {
err = extendErr(finishErr, "FinishTxn")
}
}
if err != nil {
return
}
recordMetrics(&prior)
prior, txn = txn, transaction{doneCh: txn.barrier.Done()}
}
}
// fetchJournalSpec retrieves the current JournalSpec.
func fetchJournalSpec(ctx context.Context, name pb.Journal, journals pb.JournalClient) (spec *pb.JournalSpec, err error) {
var lr *pb.ListResponse
lr, err = client.ListAllJournals(ctx, journals, pb.ListRequest{
Selector: pb.LabelSelector{
Include: pb.LabelSet{Labels: []pb.Label{{Name: "name", Value: name.String()}}},
},
})
if err == nil && len(lr.Journals) == 0 {
err = errors.Errorf("named journal does not exist (%s)", name)
}
if err == nil {
spec = &lr.Journals[0].Spec
}
return
}
type fetchedHints struct {
spec *pc.ShardSpec
txnResp *clientv3.TxnResponse
hints []*recoverylog.FSMHints
}
// pickFirstHints retrieves the first hints from |f|. If there are no primary
// hints available the most recent backup hints will be returned. If there are
// no hints available an empty set of hints is returned.
func pickFirstHints(f fetchedHints) recoverylog.FSMHints {
for _, currHints := range f.hints {
if currHints == nil {
continue
}
return *currHints
}
return recoverylog.FSMHints{Log: f.spec.RecoveryLog()}
}
// fetchHints retrieves and decodes all FSMHints for the ShardSpec.
// Nil values will be returned where hint values have not been written. It also
// returns a TxnResponse holding each of the hints values, which can be used for
// transactional updates of hints.
func fetchHints(ctx context.Context, spec *pc.ShardSpec, etcd *clientv3.Client) (out fetchedHints, err error) {
var ops = []clientv3.Op{clientv3.OpGet(spec.HintPrimaryKey())}
for _, hk := range spec.HintBackupKeys() {
ops = append(ops, clientv3.OpGet(hk))
}
out.spec = spec
if out.txnResp, err = etcd.Txn(ctx).If().Then(ops...).Commit(); err != nil {
err = extendErr(err, "fetching ShardSpec.HintKeys")
return
}
for i := range out.txnResp.Responses {
var currHints recoverylog.FSMHints
if kvs := out.txnResp.Responses[i].GetResponseRange().Kvs; len(kvs) == 0 {
out.hints = append(out.hints, nil)
continue
} else if err = json.Unmarshal(kvs[0].Value, &currHints); err != nil {
err = extendErr(err, "unmarshal FSMHints")
} else if _, err = recoverylog.NewFSM(currHints); err != nil { // Validate hints.
err = extendErr(err, "validating FSMHints")
} else if currHints.Log != spec.RecoveryLog() {
err = errors.Errorf("recovered hints recovery log doesn't match ShardSpec.RecoveryLog (%s vs %s)",
currHints.Log, spec.RecoveryLog())
}
if err != nil {
return
}
out.hints = append(out.hints, &currHints)
}
return
}
// storeRecordedHints writes FSMHints into the primary hint key of the spec.
func storeRecordedHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) error {
var val, err = json.Marshal(hints)
if err != nil {
return extendErr(err, "marshal FSMHints")
}
var asn = shard.Assignment()
_, err = etcd.Txn(shard.Context()).
// Verify our Assignment is still in effect (eg, we're still primary), then write |hints| to HintPrimaryKey.
// Compare CreateRevision to allow for a raced ReplicaState update.
If(clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)), "=", asn.Raw.CreateRevision)).
Then(clientv3.OpPut(shard.Spec().HintPrimaryKey(), string(val))).
Commit()
if etcdErr, ok := err.(rpctypes.EtcdError); ok && etcdErr.Code() == codes.Unavailable {
// Recorded hints are advisory and can generally tolerate omitted
// updates. It's also annoying for temporary Etcd partitions to abort
// an otherwise-fine shard primary. So, log but allow shard processing
// to continue; we'll retry on the next hints flush interval.
log.WithFields(log.Fields{
"key": shard.Spec().HintPrimaryKey(),
"err": err,
}).Warn("failed to store recorded FSMHints (will retry)")
} else if err != nil {
return extendErr(err, "storing recorded FSMHints")
}
return nil
}
// storeRecoveredHints writes the FSMHints into the first backup hint key of the spec,
// rotating hints previously stored under that key to the second backup hint key,
// and so on as a single transaction.
func storeRecoveredHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) (err error) {
var (
spec = shard.Spec()
asn = shard.Assignment()
backups = shard.Spec().HintBackupKeys()
h fetchedHints
)
if h, err = fetchHints(shard.Context(), spec, etcd); err != nil {
return
}
// |hints| is serialized and written to backups[1]. In the same txn,
// rotate the current value at backups[1] => backups[2], and so on.
var val []byte
if val, err = json.Marshal(hints); err != nil {
return
}
var cmp []clientv3.Cmp
var ops []clientv3.Op
// The txn responses returned from fetchHints are structured such that the first response will
// be the primary response and the subsequent responses are the backup responses, this slice
// represents just the backup responses.
var backupResponses = h.txnResp.Responses[1:]
// Verify our Assignment is still in effect (eg, we're still primary).
cmp = append(cmp, clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)),
"=", asn.Raw.CreateRevision))
for i := 0; i != len(backups) && val != nil; i++ {
ops = append(ops, clientv3.OpPut(backups[i], string(val)))
if kvs := backupResponses[i].GetResponseRange().Kvs; len(kvs) == 0 {
// Verify there is still no current key/value at this hints key slot.
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", 0))
val = nil
} else {
// Verify the key/value at this hints key slot is unchanged.
// Retain its value to rotate into the next slot (if one exists).
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", kvs[0].ModRevision))
val = kvs[0].Value
}
}
if _, err = etcd.Txn(shard.Context()).If(cmp...).Then(ops...).Commit(); err != nil {
err = extendErr(err, "storing recovered FSMHints")
}
return
}
// transaction models state and metrics used in the execution of a consumer transaction.
type transaction struct {
barrier *client.AsyncAppend // Write barrier of the txn at commit.
minDur, maxDur time.Duration // Minimum and maximum durations. Marked as -1 when elapsed.
msgCh <-chan message.Envelope // Message source. Nil'd upon reaching |maxDur|.
msgCount int // Number of messages batched into this transaction.
offsets map[pb.Journal]int64 // End (exclusive) journal offsets of the transaction.
doneCh <-chan struct{} // DoneCh of prior transaction barrier.
beganAt time.Time // Time at which transaction began.
stalledAt time.Time // Time at which processing stalled while waiting on IO.
flushedAt time.Time // Time at which flush began.
committedAt time.Time // Time at which commit began.
syncedAt time.Time // Time at which txn |barrier| resolved.
}
// txnTimer is a time.Timer which can be mocked within unit tests.
type txnTimer struct {
C <-chan time.Time
Reset func(time.Duration) bool
Stop func() bool
}
// txnStep progresses a consumer transaction by a single step. If the transaction
// is complete, it returns done=true. Otherwise, txnStep should be called again
// to continue making progress on the transaction.
func txnStep(txn, prior *transaction, shard Shard, store Store, app Application, timer txnTimer) (done bool, err error) {
// If the minimum batching duration hasn't elapsed *or* the prior transaction
// barrier hasn't completed, continue performing blocking reads of messages.
if txn.msgCount == 0 || txn.minDur != -1 || txn.doneCh != nil {
select {
case msg := <-txn.msgCh:
if txn.msgCount == 0 {
if ba, ok := app.(BeginFinisher); ok {
// BeginTxn may block arbitrarily.
if err = ba.BeginTxn(shard, store); err != nil {
err = extendErr(err, "app.BeginTxn")
return
}
}
txn.beganAt = timeNow()
timer.Reset(txn.minDur)
}
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.minDur)) {
panic("unexpected tick")
}
txn.minDur = -1 // Mark as completed.
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
timer.Reset(txn.beganAt.Add(txn.maxDur).Sub(tick))
} else {
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
txn.stalledAt = timeNow() // We're stalled waiting for prior txn IO.
}
return
case _ = <-txn.doneCh:
prior.syncedAt = timeNow()
txn.doneCh = nil
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
}
panic("not reached")
}
// Continue reading messages so long as we do not block or reach |maxDur|.
select {
case msg := <-txn.msgCh:
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
panic("unexpected tick")
}
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
default:
// |msgCh| stalled. Fallthrough to complete the transaction.
}
if txn.flushedAt = timeNow(); txn.stalledAt.IsZero() {
txn.stalledAt = txn.flushedAt // We spent no time stalled.
}
if err = app.FinalizeTxn(shard, store); err != nil {
err = extendErr(err, "app.FinalizeTxn")
return
}
// Inject a strong write barrier which resolves only after pending writes
// to all journals have completed. We do this before store.Flush to ensure
// that writes driven by transaction messages have completed before we
// persist updated offsets which step past those messages.
store.Recorder().StrongBarrier()
if err = store.Flush(txn.offsets); err != nil {
err = extendErr(err, "store.Flush")
return
}
txn.barrier = store.Recorder().WeakBarrier()
txn.committedAt = timeNow()
// If the timer is still running, stop and drain it.
if txn.maxDur != -1 && !timer.Stop() {
<-timer.C
}
done = true
return
}
// recordMetrics of a fully completed transaction.
func recordMetrics(txn *transaction) {
metrics.GazetteConsumerTxCountTotal.Inc()
metrics.GazetteConsumerTxMessagesTotal.Add(float64(txn.msgCount))
metrics.GazetteConsumerTxSecondsTotal.Add(txn.syncedAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxConsumeSecondsTotal.Add(txn.stalledAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxStalledSecondsTotal.Add(txn.flushedAt.Sub(txn.stalledAt).Seconds())
metrics.GazetteConsumerTxFlushSecondsTotal.Add(txn.committedAt.Sub(txn.flushedAt).Seconds())
metrics.GazetteConsumerTxSyncSecondsTotal.Add(txn.syncedAt.Sub(txn.committedAt).Seconds())
}
func extendErr(err error, mFmt string, args ...interface{}) error |
var timeNow = time.Now
| {
if err == nil {
panic("expected error")
} else if err == context.Canceled || err == context.DeadlineExceeded {
return err
} else if _, ok := err.(interface{ StackTrace() errors.StackTrace }); ok {
// Avoid attaching another errors.StackTrace if one is already present.
return errors.WithMessage(err, fmt.Sprintf(mFmt, args...))
} else {
// Use Wrapf to simultaneously attach |mFmt| and the current stack trace.
return errors.Wrapf(err, mFmt, args...)
}
} | identifier_body |
lifecycle.go | package consumer
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.gazette.dev/core/broker/client"
pb "go.gazette.dev/core/broker/protocol"
pc "go.gazette.dev/core/consumer/protocol"
"go.gazette.dev/core/consumer/recoverylog"
"go.gazette.dev/core/labels"
"go.gazette.dev/core/message"
"go.gazette.dev/core/metrics"
"google.golang.org/grpc/codes"
)
// playLog fetches current shard hints and plays them back into a temporary directory using the Player.
func playLog(shard Shard, pl *recoverylog.Player, etcd *clientv3.Client) error {
if dir, err := ioutil.TempDir("", shard.Spec().Id.String()+"-"); err != nil {
return extendErr(err, "creating shard working directory")
} else if h, err := fetchHints(shard.Context(), shard.Spec(), etcd); err != nil {
return extendErr(err, "fetching FSM hints")
} else if logSpec, err := fetchJournalSpec(shard.Context(), pickFirstHints(h).Log, shard.JournalClient()); err != nil {
return extendErr(err, "fetching JournalSpec")
} else if ct := logSpec.LabelSet.ValueOf(labels.ContentType); ct != labels.ContentType_RecoveryLog {
return errors.Errorf("expected label %s value %s (got %v)", labels.ContentType, labels.ContentType_RecoveryLog, ct)
} else if err = pl.Play(shard.Context(), pickFirstHints(h), dir, shard.JournalClient()); err != nil {
return extendErr(err, "playing log %s", pickFirstHints(h).Log)
}
return nil
}
// completePlayback injects a new AuthorID into the log to complete playback,
// stores recovered hints, initializes an Application Store, and returns
// offsets at which journal consumption should continue.
func completePlayback(shard Shard, app Application, pl *recoverylog.Player,
etcd *clientv3.Client) (Store, map[pb.Journal]int64, error) {
var author, err = recoverylog.NewRandomAuthorID()
if err != nil {
return nil, nil, extendErr(err, "generating Author")
}
// Ask |pl| to inject a hand-off to our generated |author|, so that other
// tailing readers will apply our write operations over those of a previous
// recorder which may still be shutting down.
pl.InjectHandoff(author)
select {
case <-pl.Done():
// Pass.
case <-shard.Context().Done():
return nil, nil, shard.Context().Err()
}
if pl.FSM == nil {
return nil, nil, errors.Errorf("completePlayback aborting due to Play failure")
}
// We've completed log playback, and we're likely the most recent shard
// primary to do so. Snapshot our recovered hints. We'll sanity-check that
// we can open the recovered store & load offsets, and only then persist
// these recovered hints.
var recoveredHints = pl.FSM.BuildHints()
// Initialize the store.
var recorder = recoverylog.NewRecorder(pl.FSM, author, pl.Dir, shard.JournalClient())
var store Store
var offsets map[pb.Journal]int64
if store, err = app.NewStore(shard, pl.Dir, recorder); err != nil {
return nil, nil, extendErr(err, "initializing store")
} else if offsets, err = store.FetchJournalOffsets(); err != nil {
return nil, nil, extendErr(err, "fetching journal offsets from store")
} else if err = storeRecoveredHints(shard, recoveredHints, etcd); err != nil {
return nil, nil, extendErr(err, "storingRecoveredHints")
}
// Lower-bound each source to its ShardSpec.Source.MinOffset.
for _, src := range shard.Spec().Sources {
if offsets[src.Journal] < src.MinOffset {
offsets[src.Journal] = src.MinOffset
}
}
return store, offsets, nil
}
// pumpMessages reads and decodes messages from a Journal & offset into the provided channel.
func pumpMessages(shard Shard, app Application, journal pb.Journal, offset int64, msgCh chan<- message.Envelope) error {
var spec, err = fetchJournalSpec(shard.Context(), journal, shard.JournalClient())
if err != nil {
return extendErr(err, "fetching JournalSpec")
}
framing, err := message.FramingByContentType(spec.LabelSet.ValueOf(labels.ContentType))
if err != nil {
return extendErr(err, "determining framing (%s)", journal)
}
var rr = client.NewRetryReader(shard.Context(), shard.JournalClient(), pb.ReadRequest{
Journal: journal,
Offset: offset,
Block: true,
DoNotProxy: !shard.JournalClient().IsNoopRouter(),
})
var br = bufio.NewReader(rr)
for next := offset; ; offset = next {
var frame []byte
var msg message.Message
if frame, err = framing.Unpack(br); err != nil {
// Swallow ErrNoProgress from our bufio.Reader. client.Reader returns
// an empty read to allow for inspection of the ReadResponse message,
// and client.RetryReader also surface these empty reads. A journal
// with no active appends can eventually cause our bufio.Reader to
// give up, though no error has occurred.
if errors.Cause(err) == io.ErrNoProgress {
continue
}
// ErrOffsetJump indicates the next byte of available content is at an
// offset larger than the one requested. This can happen if a range of
// content was deleted from the journal. Log a warning, but continue
// processing at the jumped-to offset.
if errors.Cause(err) == client.ErrOffsetJump {
log.WithFields(log.Fields{"journal": journal, "from": offset, "to": rr.Offset()}).
Warn("source journal offset jump")
next = rr.Offset()
continue
}
return extendErr(err, "unpacking frame (%s:%d)", spec.Name, offset)
}
next = rr.AdjustedOffset(br)
if msg, err = app.NewMessage(spec); err != nil {
return extendErr(err, "NewMessage (%s)", journal)
} else if err = framing.Unmarshal(frame, msg); err != nil {
log.WithFields(log.Fields{"journal": journal, "offset": offset, "err": err}).
Error("failed to unmarshal message")
continue
}
select {
case msgCh <- message.Envelope{
JournalSpec: spec,
Fragment: rr.Reader.Response.Fragment,
NextOffset: next,
Message: msg,
}: // Pass.
case <-shard.Context().Done():
return extendErr(shard.Context().Err(), "sending msg (%s:%d)", spec.Name, offset)
}
metrics.GazetteConsumerBytesConsumedTotal.Add(float64(next - offset))
}
}
// consumeMessages runs consumer transactions, consuming from the provided
// |msgCh| and, when notified by |hintsCh|, occasionally stores recorded FSMHints.
func consumeMessages(shard Shard, store Store, app Application, etcd *clientv3.Client,
msgCh <-chan message.Envelope, hintsCh <-chan time.Time) (err error) {
// Supply an idle timer for txnStep's use in timing transaction durations.
var realTimer = time.NewTimer(0)
if !realTimer.Stop() {
<-realTimer.C
}
var timer = txnTimer{
C: realTimer.C,
Reset: realTimer.Reset,
Stop: realTimer.Stop,
}
var txn, prior transaction
for {
select {
case <-hintsCh:
var hints recoverylog.FSMHints
if hints, err = store.Recorder().BuildHints(); err == nil {
err = storeRecordedHints(shard, hints, etcd)
}
if err != nil {
err = extendErr(err, "storeRecordedHints")
return
}
default:
// Pass.
}
var spec = shard.Spec()
txn.minDur, txn.maxDur = spec.MinTxnDuration, spec.MaxTxnDuration
txn.msgCh = msgCh
txn.offsets = make(map[pb.Journal]int64)
// Run the transaction until completion or error.
for done := false; !done && err == nil; done, err = txnStep(&txn, &prior, shard, store, app, timer) {
}
if err != nil {
err = extendErr(err, "txnStep")
}
if ba, ok := app.(BeginFinisher); ok && txn.msgCount != 0 {
if finishErr := ba.FinishTxn(shard, store, err); err == nil && finishErr != nil {
err = extendErr(finishErr, "FinishTxn")
}
}
if err != nil {
return
}
recordMetrics(&prior)
prior, txn = txn, transaction{doneCh: txn.barrier.Done()}
}
}
// fetchJournalSpec retrieves the current JournalSpec.
func fetchJournalSpec(ctx context.Context, name pb.Journal, journals pb.JournalClient) (spec *pb.JournalSpec, err error) {
var lr *pb.ListResponse
lr, err = client.ListAllJournals(ctx, journals, pb.ListRequest{
Selector: pb.LabelSelector{
Include: pb.LabelSet{Labels: []pb.Label{{Name: "name", Value: name.String()}}},
},
})
if err == nil && len(lr.Journals) == 0 {
err = errors.Errorf("named journal does not exist (%s)", name)
}
if err == nil {
spec = &lr.Journals[0].Spec
}
return
}
type fetchedHints struct {
spec *pc.ShardSpec
txnResp *clientv3.TxnResponse
hints []*recoverylog.FSMHints
}
// pickFirstHints retrieves the first hints from |f|. If there are no primary
// hints available the most recent backup hints will be returned. If there are
// no hints available an empty set of hints is returned.
func pickFirstHints(f fetchedHints) recoverylog.FSMHints {
for _, currHints := range f.hints {
if currHints == nil {
continue
}
return *currHints
}
return recoverylog.FSMHints{Log: f.spec.RecoveryLog()}
}
// fetchHints retrieves and decodes all FSMHints for the ShardSpec.
// Nil values will be returned where hint values have not been written. It also
// returns a TxnResponse holding each of the hints values, which can be used for
// transactional updates of hints.
func fetchHints(ctx context.Context, spec *pc.ShardSpec, etcd *clientv3.Client) (out fetchedHints, err error) {
var ops = []clientv3.Op{clientv3.OpGet(spec.HintPrimaryKey())}
for _, hk := range spec.HintBackupKeys() {
ops = append(ops, clientv3.OpGet(hk))
}
out.spec = spec
if out.txnResp, err = etcd.Txn(ctx).If().Then(ops...).Commit(); err != nil |
for i := range out.txnResp.Responses {
var currHints recoverylog.FSMHints
if kvs := out.txnResp.Responses[i].GetResponseRange().Kvs; len(kvs) == 0 {
out.hints = append(out.hints, nil)
continue
} else if err = json.Unmarshal(kvs[0].Value, &currHints); err != nil {
err = extendErr(err, "unmarshal FSMHints")
} else if _, err = recoverylog.NewFSM(currHints); err != nil { // Validate hints.
err = extendErr(err, "validating FSMHints")
} else if currHints.Log != spec.RecoveryLog() {
err = errors.Errorf("recovered hints recovery log doesn't match ShardSpec.RecoveryLog (%s vs %s)",
currHints.Log, spec.RecoveryLog())
}
if err != nil {
return
}
out.hints = append(out.hints, &currHints)
}
return
}
// storeRecordedHints writes FSMHints into the primary hint key of the spec.
func storeRecordedHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) error {
var val, err = json.Marshal(hints)
if err != nil {
return extendErr(err, "marshal FSMHints")
}
var asn = shard.Assignment()
_, err = etcd.Txn(shard.Context()).
// Verify our Assignment is still in effect (eg, we're still primary), then write |hints| to HintPrimaryKey.
// Compare CreateRevision to allow for a raced ReplicaState update.
If(clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)), "=", asn.Raw.CreateRevision)).
Then(clientv3.OpPut(shard.Spec().HintPrimaryKey(), string(val))).
Commit()
if etcdErr, ok := err.(rpctypes.EtcdError); ok && etcdErr.Code() == codes.Unavailable {
// Recorded hints are advisory and can generally tolerate omitted
// updates. It's also annoying for temporary Etcd partitions to abort
// an otherwise-fine shard primary. So, log but allow shard processing
// to continue; we'll retry on the next hints flush interval.
log.WithFields(log.Fields{
"key": shard.Spec().HintPrimaryKey(),
"err": err,
}).Warn("failed to store recorded FSMHints (will retry)")
} else if err != nil {
return extendErr(err, "storing recorded FSMHints")
}
return nil
}
// storeRecoveredHints writes the FSMHints into the first backup hint key of the spec,
// rotating hints previously stored under that key to the second backup hint key,
// and so on as a single transaction.
func storeRecoveredHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) (err error) {
var (
spec = shard.Spec()
asn = shard.Assignment()
backups = shard.Spec().HintBackupKeys()
h fetchedHints
)
if h, err = fetchHints(shard.Context(), spec, etcd); err != nil {
return
}
// |hints| is serialized and written to backups[1]. In the same txn,
// rotate the current value at backups[1] => backups[2], and so on.
var val []byte
if val, err = json.Marshal(hints); err != nil {
return
}
var cmp []clientv3.Cmp
var ops []clientv3.Op
// The txn responses returned from fetchHints are structured such that the first response will
// be the primary response and the subsequent responses are the backup responses, this slice
// represents just the backup responses.
var backupResponses = h.txnResp.Responses[1:]
// Verify our Assignment is still in effect (eg, we're still primary).
cmp = append(cmp, clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)),
"=", asn.Raw.CreateRevision))
for i := 0; i != len(backups) && val != nil; i++ {
ops = append(ops, clientv3.OpPut(backups[i], string(val)))
if kvs := backupResponses[i].GetResponseRange().Kvs; len(kvs) == 0 {
// Verify there is still no current key/value at this hints key slot.
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", 0))
val = nil
} else {
// Verify the key/value at this hints key slot is unchanged.
// Retain its value to rotate into the next slot (if one exists).
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", kvs[0].ModRevision))
val = kvs[0].Value
}
}
if _, err = etcd.Txn(shard.Context()).If(cmp...).Then(ops...).Commit(); err != nil {
err = extendErr(err, "storing recovered FSMHints")
}
return
}
// transaction models state and metrics used in the execution of a consumer transaction.
type transaction struct {
barrier *client.AsyncAppend // Write barrier of the txn at commit.
minDur, maxDur time.Duration // Minimum and maximum durations. Marked as -1 when elapsed.
msgCh <-chan message.Envelope // Message source. Nil'd upon reaching |maxDur|.
msgCount int // Number of messages batched into this transaction.
offsets map[pb.Journal]int64 // End (exclusive) journal offsets of the transaction.
doneCh <-chan struct{} // DoneCh of prior transaction barrier.
beganAt time.Time // Time at which transaction began.
stalledAt time.Time // Time at which processing stalled while waiting on IO.
flushedAt time.Time // Time at which flush began.
committedAt time.Time // Time at which commit began.
syncedAt time.Time // Time at which txn |barrier| resolved.
}
// txnTimer is a time.Timer which can be mocked within unit tests.
type txnTimer struct {
C <-chan time.Time
Reset func(time.Duration) bool
Stop func() bool
}
// txnStep progresses a consumer transaction by a single step. If the transaction
// is complete, it returns done=true. Otherwise, txnStep should be called again
// to continue making progress on the transaction.
func txnStep(txn, prior *transaction, shard Shard, store Store, app Application, timer txnTimer) (done bool, err error) {
// If the minimum batching duration hasn't elapsed *or* the prior transaction
// barrier hasn't completed, continue performing blocking reads of messages.
if txn.msgCount == 0 || txn.minDur != -1 || txn.doneCh != nil {
select {
case msg := <-txn.msgCh:
if txn.msgCount == 0 {
if ba, ok := app.(BeginFinisher); ok {
// BeginTxn may block arbitrarily.
if err = ba.BeginTxn(shard, store); err != nil {
err = extendErr(err, "app.BeginTxn")
return
}
}
txn.beganAt = timeNow()
timer.Reset(txn.minDur)
}
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.minDur)) {
panic("unexpected tick")
}
txn.minDur = -1 // Mark as completed.
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
timer.Reset(txn.beganAt.Add(txn.maxDur).Sub(tick))
} else {
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
txn.stalledAt = timeNow() // We're stalled waiting for prior txn IO.
}
return
case _ = <-txn.doneCh:
prior.syncedAt = timeNow()
txn.doneCh = nil
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
}
panic("not reached")
}
// Continue reading messages so long as we do not block or reach |maxDur|.
select {
case msg := <-txn.msgCh:
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
panic("unexpected tick")
}
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
default:
// |msgCh| stalled. Fallthrough to complete the transaction.
}
if txn.flushedAt = timeNow(); txn.stalledAt.IsZero() {
txn.stalledAt = txn.flushedAt // We spent no time stalled.
}
if err = app.FinalizeTxn(shard, store); err != nil {
err = extendErr(err, "app.FinalizeTxn")
return
}
// Inject a strong write barrier which resolves only after pending writes
// to all journals have completed. We do this before store.Flush to ensure
// that writes driven by transaction messages have completed before we
// persist updated offsets which step past those messages.
store.Recorder().StrongBarrier()
if err = store.Flush(txn.offsets); err != nil {
err = extendErr(err, "store.Flush")
return
}
txn.barrier = store.Recorder().WeakBarrier()
txn.committedAt = timeNow()
// If the timer is still running, stop and drain it.
if txn.maxDur != -1 && !timer.Stop() {
<-timer.C
}
done = true
return
}
// recordMetrics of a fully completed transaction.
func recordMetrics(txn *transaction) {
metrics.GazetteConsumerTxCountTotal.Inc()
metrics.GazetteConsumerTxMessagesTotal.Add(float64(txn.msgCount))
metrics.GazetteConsumerTxSecondsTotal.Add(txn.syncedAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxConsumeSecondsTotal.Add(txn.stalledAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxStalledSecondsTotal.Add(txn.flushedAt.Sub(txn.stalledAt).Seconds())
metrics.GazetteConsumerTxFlushSecondsTotal.Add(txn.committedAt.Sub(txn.flushedAt).Seconds())
metrics.GazetteConsumerTxSyncSecondsTotal.Add(txn.syncedAt.Sub(txn.committedAt).Seconds())
}
func extendErr(err error, mFmt string, args ...interface{}) error {
if err == nil {
panic("expected error")
} else if err == context.Canceled || err == context.DeadlineExceeded {
return err
} else if _, ok := err.(interface{ StackTrace() errors.StackTrace }); ok {
// Avoid attaching another errors.StackTrace if one is already present.
return errors.WithMessage(err, fmt.Sprintf(mFmt, args...))
} else {
// Use Wrapf to simultaneously attach |mFmt| and the current stack trace.
return errors.Wrapf(err, mFmt, args...)
}
}
var timeNow = time.Now
| {
err = extendErr(err, "fetching ShardSpec.HintKeys")
return
} | conditional_block |
lifecycle.go | package consumer
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.gazette.dev/core/broker/client"
pb "go.gazette.dev/core/broker/protocol"
pc "go.gazette.dev/core/consumer/protocol"
"go.gazette.dev/core/consumer/recoverylog"
"go.gazette.dev/core/labels"
"go.gazette.dev/core/message"
"go.gazette.dev/core/metrics"
"google.golang.org/grpc/codes"
)
// playLog fetches current shard hints and plays them back into a temporary directory using the Player.
func playLog(shard Shard, pl *recoverylog.Player, etcd *clientv3.Client) error {
if dir, err := ioutil.TempDir("", shard.Spec().Id.String()+"-"); err != nil {
return extendErr(err, "creating shard working directory")
} else if h, err := fetchHints(shard.Context(), shard.Spec(), etcd); err != nil {
return extendErr(err, "fetching FSM hints")
} else if logSpec, err := fetchJournalSpec(shard.Context(), pickFirstHints(h).Log, shard.JournalClient()); err != nil {
return extendErr(err, "fetching JournalSpec")
} else if ct := logSpec.LabelSet.ValueOf(labels.ContentType); ct != labels.ContentType_RecoveryLog {
return errors.Errorf("expected label %s value %s (got %v)", labels.ContentType, labels.ContentType_RecoveryLog, ct)
} else if err = pl.Play(shard.Context(), pickFirstHints(h), dir, shard.JournalClient()); err != nil {
return extendErr(err, "playing log %s", pickFirstHints(h).Log)
}
return nil
}
// completePlayback injects a new AuthorID into the log to complete playback,
// stores recovered hints, initializes an Application Store, and returns
// offsets at which journal consumption should continue.
func completePlayback(shard Shard, app Application, pl *recoverylog.Player,
etcd *clientv3.Client) (Store, map[pb.Journal]int64, error) {
var author, err = recoverylog.NewRandomAuthorID()
if err != nil {
return nil, nil, extendErr(err, "generating Author")
}
// Ask |pl| to inject a hand-off to our generated |author|, so that other
// tailing readers will apply our write operations over those of a previous
// recorder which may still be shutting down.
pl.InjectHandoff(author)
select {
case <-pl.Done():
// Pass.
case <-shard.Context().Done():
return nil, nil, shard.Context().Err()
}
if pl.FSM == nil {
return nil, nil, errors.Errorf("completePlayback aborting due to Play failure")
}
// We've completed log playback, and we're likely the most recent shard
// primary to do so. Snapshot our recovered hints. We'll sanity-check that
// we can open the recovered store & load offsets, and only then persist
// these recovered hints.
var recoveredHints = pl.FSM.BuildHints()
// Initialize the store.
var recorder = recoverylog.NewRecorder(pl.FSM, author, pl.Dir, shard.JournalClient())
var store Store
var offsets map[pb.Journal]int64
if store, err = app.NewStore(shard, pl.Dir, recorder); err != nil {
return nil, nil, extendErr(err, "initializing store")
} else if offsets, err = store.FetchJournalOffsets(); err != nil {
return nil, nil, extendErr(err, "fetching journal offsets from store")
} else if err = storeRecoveredHints(shard, recoveredHints, etcd); err != nil {
return nil, nil, extendErr(err, "storingRecoveredHints")
}
// Lower-bound each source to its ShardSpec.Source.MinOffset.
for _, src := range shard.Spec().Sources {
if offsets[src.Journal] < src.MinOffset {
offsets[src.Journal] = src.MinOffset
}
}
return store, offsets, nil
}
// pumpMessages reads and decodes messages from a Journal & offset into the provided channel.
func pumpMessages(shard Shard, app Application, journal pb.Journal, offset int64, msgCh chan<- message.Envelope) error {
var spec, err = fetchJournalSpec(shard.Context(), journal, shard.JournalClient())
if err != nil {
return extendErr(err, "fetching JournalSpec")
}
framing, err := message.FramingByContentType(spec.LabelSet.ValueOf(labels.ContentType))
if err != nil {
return extendErr(err, "determining framing (%s)", journal)
}
var rr = client.NewRetryReader(shard.Context(), shard.JournalClient(), pb.ReadRequest{
Journal: journal,
Offset: offset,
Block: true,
DoNotProxy: !shard.JournalClient().IsNoopRouter(),
})
var br = bufio.NewReader(rr)
for next := offset; ; offset = next {
var frame []byte
var msg message.Message
if frame, err = framing.Unpack(br); err != nil {
// Swallow ErrNoProgress from our bufio.Reader. client.Reader returns
// an empty read to allow for inspection of the ReadResponse message,
// and client.RetryReader also surface these empty reads. A journal
// with no active appends can eventually cause our bufio.Reader to
// give up, though no error has occurred.
if errors.Cause(err) == io.ErrNoProgress {
continue
}
// ErrOffsetJump indicates the next byte of available content is at an
// offset larger than the one requested. This can happen if a range of
// content was deleted from the journal. Log a warning, but continue
// processing at the jumped-to offset.
if errors.Cause(err) == client.ErrOffsetJump {
log.WithFields(log.Fields{"journal": journal, "from": offset, "to": rr.Offset()}).
Warn("source journal offset jump")
next = rr.Offset()
continue
}
return extendErr(err, "unpacking frame (%s:%d)", spec.Name, offset)
}
next = rr.AdjustedOffset(br)
if msg, err = app.NewMessage(spec); err != nil {
return extendErr(err, "NewMessage (%s)", journal)
} else if err = framing.Unmarshal(frame, msg); err != nil {
log.WithFields(log.Fields{"journal": journal, "offset": offset, "err": err}).
Error("failed to unmarshal message")
continue
}
select {
case msgCh <- message.Envelope{
JournalSpec: spec,
Fragment: rr.Reader.Response.Fragment,
NextOffset: next,
Message: msg,
}: // Pass.
case <-shard.Context().Done():
return extendErr(shard.Context().Err(), "sending msg (%s:%d)", spec.Name, offset)
}
metrics.GazetteConsumerBytesConsumedTotal.Add(float64(next - offset))
}
}
// consumeMessages runs consumer transactions, consuming from the provided
// |msgCh| and, when notified by |hintsCh|, occasionally stores recorded FSMHints.
func consumeMessages(shard Shard, store Store, app Application, etcd *clientv3.Client,
msgCh <-chan message.Envelope, hintsCh <-chan time.Time) (err error) {
// Supply an idle timer for txnStep's use in timing transaction durations.
var realTimer = time.NewTimer(0)
if !realTimer.Stop() {
<-realTimer.C
}
var timer = txnTimer{
C: realTimer.C,
Reset: realTimer.Reset,
Stop: realTimer.Stop,
}
var txn, prior transaction
for {
select {
case <-hintsCh:
var hints recoverylog.FSMHints
if hints, err = store.Recorder().BuildHints(); err == nil {
err = storeRecordedHints(shard, hints, etcd)
}
if err != nil {
err = extendErr(err, "storeRecordedHints")
return
}
default:
// Pass.
}
var spec = shard.Spec()
txn.minDur, txn.maxDur = spec.MinTxnDuration, spec.MaxTxnDuration
txn.msgCh = msgCh
txn.offsets = make(map[pb.Journal]int64)
// Run the transaction until completion or error.
for done := false; !done && err == nil; done, err = txnStep(&txn, &prior, shard, store, app, timer) {
}
if err != nil {
err = extendErr(err, "txnStep")
}
if ba, ok := app.(BeginFinisher); ok && txn.msgCount != 0 {
if finishErr := ba.FinishTxn(shard, store, err); err == nil && finishErr != nil {
err = extendErr(finishErr, "FinishTxn")
}
}
if err != nil {
return
}
recordMetrics(&prior)
prior, txn = txn, transaction{doneCh: txn.barrier.Done()}
}
}
// fetchJournalSpec retrieves the current JournalSpec.
func fetchJournalSpec(ctx context.Context, name pb.Journal, journals pb.JournalClient) (spec *pb.JournalSpec, err error) {
var lr *pb.ListResponse
lr, err = client.ListAllJournals(ctx, journals, pb.ListRequest{
Selector: pb.LabelSelector{
Include: pb.LabelSet{Labels: []pb.Label{{Name: "name", Value: name.String()}}},
},
})
if err == nil && len(lr.Journals) == 0 {
err = errors.Errorf("named journal does not exist (%s)", name)
}
if err == nil {
spec = &lr.Journals[0].Spec
}
return
}
type fetchedHints struct {
spec *pc.ShardSpec
txnResp *clientv3.TxnResponse
hints []*recoverylog.FSMHints
}
// pickFirstHints retrieves the first hints from |f|. If there are no primary
// hints available the most recent backup hints will be returned. If there are
// no hints available an empty set of hints is returned.
func pickFirstHints(f fetchedHints) recoverylog.FSMHints {
for _, currHints := range f.hints {
if currHints == nil {
continue
}
return *currHints
}
return recoverylog.FSMHints{Log: f.spec.RecoveryLog()}
}
// fetchHints retrieves and decodes all FSMHints for the ShardSpec.
// Nil values will be returned where hint values have not been written. It also
// returns a TxnResponse holding each of the hints values, which can be used for
// transactional updates of hints.
func fetchHints(ctx context.Context, spec *pc.ShardSpec, etcd *clientv3.Client) (out fetchedHints, err error) {
var ops = []clientv3.Op{clientv3.OpGet(spec.HintPrimaryKey())}
for _, hk := range spec.HintBackupKeys() {
ops = append(ops, clientv3.OpGet(hk))
}
out.spec = spec
if out.txnResp, err = etcd.Txn(ctx).If().Then(ops...).Commit(); err != nil {
err = extendErr(err, "fetching ShardSpec.HintKeys")
return
}
for i := range out.txnResp.Responses {
var currHints recoverylog.FSMHints
if kvs := out.txnResp.Responses[i].GetResponseRange().Kvs; len(kvs) == 0 {
out.hints = append(out.hints, nil)
continue
} else if err = json.Unmarshal(kvs[0].Value, &currHints); err != nil {
err = extendErr(err, "unmarshal FSMHints")
} else if _, err = recoverylog.NewFSM(currHints); err != nil { // Validate hints.
err = extendErr(err, "validating FSMHints")
} else if currHints.Log != spec.RecoveryLog() {
err = errors.Errorf("recovered hints recovery log doesn't match ShardSpec.RecoveryLog (%s vs %s)",
currHints.Log, spec.RecoveryLog())
}
if err != nil {
return
}
out.hints = append(out.hints, &currHints)
}
return
}
// storeRecordedHints writes FSMHints into the primary hint key of the spec.
func storeRecordedHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) error {
var val, err = json.Marshal(hints)
if err != nil {
return extendErr(err, "marshal FSMHints")
}
var asn = shard.Assignment()
_, err = etcd.Txn(shard.Context()).
// Verify our Assignment is still in effect (eg, we're still primary), then write |hints| to HintPrimaryKey.
// Compare CreateRevision to allow for a raced ReplicaState update.
If(clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)), "=", asn.Raw.CreateRevision)).
Then(clientv3.OpPut(shard.Spec().HintPrimaryKey(), string(val))).
Commit()
if etcdErr, ok := err.(rpctypes.EtcdError); ok && etcdErr.Code() == codes.Unavailable {
// Recorded hints are advisory and can generally tolerate omitted
// updates. It's also annoying for temporary Etcd partitions to abort
// an otherwise-fine shard primary. So, log but allow shard processing
// to continue; we'll retry on the next hints flush interval.
log.WithFields(log.Fields{
"key": shard.Spec().HintPrimaryKey(),
"err": err,
}).Warn("failed to store recorded FSMHints (will retry)")
} else if err != nil {
return extendErr(err, "storing recorded FSMHints")
}
return nil
}
// storeRecoveredHints writes the FSMHints into the first backup hint key of the spec,
// rotating hints previously stored under that key to the second backup hint key,
// and so on as a single transaction.
func storeRecoveredHints(shard Shard, hints recoverylog.FSMHints, etcd *clientv3.Client) (err error) {
var (
spec = shard.Spec()
asn = shard.Assignment()
backups = shard.Spec().HintBackupKeys()
h fetchedHints
)
if h, err = fetchHints(shard.Context(), spec, etcd); err != nil {
return
}
// |hints| is serialized and written to backups[1]. In the same txn,
// rotate the current value at backups[1] => backups[2], and so on.
var val []byte
if val, err = json.Marshal(hints); err != nil {
return
}
var cmp []clientv3.Cmp
var ops []clientv3.Op
// The txn responses returned from fetchHints are structured such that the first response will
// be the primary response and the subsequent responses are the backup responses, this slice
// represents just the backup responses.
var backupResponses = h.txnResp.Responses[1:]
// Verify our Assignment is still in effect (eg, we're still primary).
cmp = append(cmp, clientv3.Compare(clientv3.CreateRevision(string(asn.Raw.Key)),
"=", asn.Raw.CreateRevision))
for i := 0; i != len(backups) && val != nil; i++ {
ops = append(ops, clientv3.OpPut(backups[i], string(val)))
if kvs := backupResponses[i].GetResponseRange().Kvs; len(kvs) == 0 {
// Verify there is still no current key/value at this hints key slot.
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", 0))
val = nil
} else {
// Verify the key/value at this hints key slot is unchanged.
// Retain its value to rotate into the next slot (if one exists).
cmp = append(cmp, clientv3.Compare(clientv3.ModRevision(backups[i]), "=", kvs[0].ModRevision))
val = kvs[0].Value
}
}
if _, err = etcd.Txn(shard.Context()).If(cmp...).Then(ops...).Commit(); err != nil {
err = extendErr(err, "storing recovered FSMHints")
}
return
}
// transaction models state and metrics used in the execution of a consumer transaction.
type transaction struct {
barrier *client.AsyncAppend // Write barrier of the txn at commit.
minDur, maxDur time.Duration // Minimum and maximum durations. Marked as -1 when elapsed.
msgCh <-chan message.Envelope // Message source. Nil'd upon reaching |maxDur|.
msgCount int // Number of messages batched into this transaction.
offsets map[pb.Journal]int64 // End (exclusive) journal offsets of the transaction.
doneCh <-chan struct{} // DoneCh of prior transaction barrier.
beganAt time.Time // Time at which transaction began.
stalledAt time.Time // Time at which processing stalled while waiting on IO.
flushedAt time.Time // Time at which flush began.
committedAt time.Time // Time at which commit began.
syncedAt time.Time // Time at which txn |barrier| resolved.
}
// txnTimer is a time.Timer which can be mocked within unit tests.
type txnTimer struct {
C <-chan time.Time
Reset func(time.Duration) bool
Stop func() bool
}
// txnStep progresses a consumer transaction by a single step. If the transaction
// is complete, it returns done=true. Otherwise, txnStep should be called again
// to continue making progress on the transaction.
func txnStep(txn, prior *transaction, shard Shard, store Store, app Application, timer txnTimer) (done bool, err error) {
// If the minimum batching duration hasn't elapsed *or* the prior transaction
// barrier hasn't completed, continue performing blocking reads of messages.
if txn.msgCount == 0 || txn.minDur != -1 || txn.doneCh != nil {
select {
case msg := <-txn.msgCh:
if txn.msgCount == 0 {
if ba, ok := app.(BeginFinisher); ok {
// BeginTxn may block arbitrarily.
if err = ba.BeginTxn(shard, store); err != nil {
err = extendErr(err, "app.BeginTxn")
return
}
}
txn.beganAt = timeNow()
timer.Reset(txn.minDur)
}
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.minDur)) {
panic("unexpected tick")
}
txn.minDur = -1 // Mark as completed.
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
timer.Reset(txn.beganAt.Add(txn.maxDur).Sub(tick))
} else {
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
txn.stalledAt = timeNow() // We're stalled waiting for prior txn IO.
}
return
case _ = <-txn.doneCh:
prior.syncedAt = timeNow()
txn.doneCh = nil
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
}
panic("not reached")
}
// Continue reading messages so long as we do not block or reach |maxDur|.
select {
case msg := <-txn.msgCh:
txn.msgCount++
txn.offsets[msg.JournalSpec.Name] = msg.NextOffset
if err = app.ConsumeMessage(shard, store, msg); err != nil {
err = extendErr(err, "app.ConsumeMessage")
}
return
case tick := <-timer.C:
if tick.Before(txn.beganAt.Add(txn.maxDur)) {
panic("unexpected tick")
}
txn.maxDur = -1 // Mark as completed.
txn.msgCh = nil // Stop reading messages.
return
case _ = <-shard.Context().Done():
err = shard.Context().Err()
return
default:
// |msgCh| stalled. Fallthrough to complete the transaction.
}
if txn.flushedAt = timeNow(); txn.stalledAt.IsZero() {
txn.stalledAt = txn.flushedAt // We spent no time stalled.
}
if err = app.FinalizeTxn(shard, store); err != nil {
err = extendErr(err, "app.FinalizeTxn")
return
}
// Inject a strong write barrier which resolves only after pending writes
// to all journals have completed. We do this before store.Flush to ensure
// that writes driven by transaction messages have completed before we
// persist updated offsets which step past those messages.
store.Recorder().StrongBarrier()
if err = store.Flush(txn.offsets); err != nil {
err = extendErr(err, "store.Flush")
return
}
txn.barrier = store.Recorder().WeakBarrier()
txn.committedAt = timeNow()
// If the timer is still running, stop and drain it.
if txn.maxDur != -1 && !timer.Stop() {
<-timer.C
}
done = true
return
}
// recordMetrics of a fully completed transaction.
func recordMetrics(txn *transaction) {
metrics.GazetteConsumerTxCountTotal.Inc()
metrics.GazetteConsumerTxMessagesTotal.Add(float64(txn.msgCount))
metrics.GazetteConsumerTxSecondsTotal.Add(txn.syncedAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxConsumeSecondsTotal.Add(txn.stalledAt.Sub(txn.beganAt).Seconds())
metrics.GazetteConsumerTxStalledSecondsTotal.Add(txn.flushedAt.Sub(txn.stalledAt).Seconds())
metrics.GazetteConsumerTxFlushSecondsTotal.Add(txn.committedAt.Sub(txn.flushedAt).Seconds())
metrics.GazetteConsumerTxSyncSecondsTotal.Add(txn.syncedAt.Sub(txn.committedAt).Seconds())
}
func | (err error, mFmt string, args ...interface{}) error {
if err == nil {
panic("expected error")
} else if err == context.Canceled || err == context.DeadlineExceeded {
return err
} else if _, ok := err.(interface{ StackTrace() errors.StackTrace }); ok {
// Avoid attaching another errors.StackTrace if one is already present.
return errors.WithMessage(err, fmt.Sprintf(mFmt, args...))
} else {
// Use Wrapf to simultaneously attach |mFmt| and the current stack trace.
return errors.Wrapf(err, mFmt, args...)
}
}
var timeNow = time.Now
| extendErr | identifier_name |
incident-sk.ts | /**
* @module incident-sk
* @description <h2><code>incident-sk</code></h2>
*
* <p>
* Displays a single Incident.
* </p>
*
* @attr minimized {boolean} If not set then the incident is displayed in expanded
* mode, otherwise it is displayed in compact mode.
*
* @attr params {boolean} If set then the incident params are displayed, only
* applicable if minimized is true.
*
* @evt add-note Sent when the user adds a note to an incident.
* The detail includes the text of the note and the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* text: "blah blah blah",
* }
* </pre>
*
* @evt del-note Sent when the user deletes a note on an incident.
* The detail includes the index of the note and the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* index: 0,
* }
* </pre>
*
* @evt take Sent when the user wants the incident assigned to themselves.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
* @evt assign Sent when the user want to assign the incident to someone else.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
*/
import { define } from '../../../elements-sk/modules/define';
import '../../../elements-sk/modules/icons/alarm-off-icon-sk';
import '../../../elements-sk/modules/icons/delete-icon-sk';
import '../../../elements-sk/modules/icons/thumbs-up-down-icon-sk';
import '../../../infra-sk/modules/clipboard-sk';
import '../silence-sk';
import { $$ } from '../../../infra-sk/modules/dom';
import { diffDate, strDuration } from '../../../infra-sk/modules/human';
import { errorMessage } from '../../../elements-sk/modules/errorMessage';
import { html, render, TemplateResult } from 'lit-html';
import { until } from 'lit-html/directives/until';
import { jsonOrThrow } from '../../../infra-sk/modules/jsonOrThrow';
import { abbr, linkify, displayNotes } from '../am';
import * as paramset from '../paramset';
import {
Silence,
Incident,
Params,
RecentIncidentsResponse,
Note,
} from '../json';
const MAX_MATCHING_SILENCES_TO_DISPLAY = 50;
const PARAMS_TO_DISPLAY_COPY_ICON = ['abbr', 'alertname', 'app', 'bot'];
class State {
key: string = '';
id: string = '';
params: Params = {};
start: number = 0;
last_seen: number = 0;
active: boolean = false;
notes: Note[] = [];
}
export class IncidentSk extends HTMLElement {
private silences: Silence[] = [];
private displaySilencesWithComments: boolean = false;
private flaky: boolean = false;
private recently_expired_silence: boolean = false;
private state: State = {
key: '',
id: '',
params: {},
start: 0,
last_seen: 0,
active: false,
notes: [],
};
private static template = (ele: IncidentSk) => html`
<h2 class=${ele.classOfH2()}>
${ele.state.params.alertname} ${abbr(ele.state.params.abbr)}
${ele.displayRecentlyExpired(ele.recently_expired_silence)}
${ele.displayFlakiness(ele.flaky)}
</h2>
<section class="detail">
${ele.actionButtons()}
<table class="timing">
<tr>
<th>Started</th>
<td title=${new Date(ele.state.start * 1000).toLocaleString()}>
${diffDate(ele.state.start * 1000)}
</td>
</tr>
${ele.lastSeen()} ${ele.duration()}
</table>
<table class="params">
${ele.table()}
</table>
${displayNotes(ele.state.notes, ele.state.key, 'del-note')}
<section class="addNote">
<textarea rows="2" cols="80"></textarea>
<button @click=${ele.addNote}>Submit</button>
</section>
<section class="matchingSilences">
<span class="matchingSilencesHeaders">
<h3>Matching Silences</h3>
<checkbox-sk
?checked=${ele.displaySilencesWithComments}
@click=${ele.toggleSilencesWithComments}
label="Show only silences with comments">
</checkbox-sk>
</span>
${ele.matchingSilences()}
</section>
<section class="history">
<h3>History</h3>
${until(ele.history(), html`<div class="loading">Loading...</div>`)}
</section>
</section>
`;
/** @prop incident_state An Incident. */
get incident_state(): State {
return this.state;
}
set incident_state(val: State) {
this.state = val;
this._render();
}
/** @prop incident_silences The list of active silences. */
get incident_silences(): Silence[] {
return this.silences;
}
set incident_silences(val: Silence[]) {
this._render();
this.silences = val;
}
/** @prop recently_expired_silence Whether silence recently expired. */
get incident_has_recently_expired_silence(): boolean {
return this.recently_expired_silence;
}
set incident_has_recently_expired_silence(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.recently_expired_silence) {
this.recently_expired_silence = val;
this._render();
}
}
/** @prop flaky Whether this incident has been flaky. */
get incident_flaky(): boolean {
return this.flaky;
}
set incident_flaky(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.flaky) {
this.flaky = val;
this._render();
}
}
private classOfH2(): string {
if (!this.state.active) {
return 'inactive';
}
if (this.state.params.assigned_to) {
return 'assigned';
}
return '';
}
private table(): TemplateResult[] {
const params = this.state.params;
const keys = Object.keys(params);
keys.sort();
return keys
.filter((k) => !k.startsWith('__'))
.map(
(k) => html`
<tr>
<th>${k}</th>
<td>
<span class="respect-newlines">${linkify(params[k])}</span>
${this.maybeDisplayCopyIcon(k)}
</td>
</tr>
`
);
}
private maybeDisplayCopyIcon(k: string): TemplateResult {
if (PARAMS_TO_DISPLAY_COPY_ICON.includes(k)) {
return html`<clipboard-sk value=${this.state.params[k]}></clipboard-sk>`;
}
return html``;
}
private actionButtons(): TemplateResult {
if (this.state.active) {
let assignToOwnerButton = html``;
if (this.state.params.owner) {
assignToOwnerButton = html`<button @click=${this.assignToOwner}>
Assign to Owner
</button>`;
}
return html`<section class="assign">
<button @click=${this.take}>Take</button>
${assignToOwnerButton}
<button @click=${this.assign}>Assign</button>
</section>`;
}
return html``;
}
private matchingSilences(): TemplateResult[] {
if (this.hasAttribute('minimized')) {
return [];
}
// Filter out silences whose paramsets do not match and
// which have no notes if displaySilencesWithComments is true.
const filteredSilences = this.silences.filter(
(silence: Silence) =>
paramset.match(silence.param_set, this.state.params) &&
!(
this.displaySilencesWithComments &&
this.doesSilenceHaveNoNotes(silence)
)
);
const ret = filteredSilences
.slice(0, MAX_MATCHING_SILENCES_TO_DISPLAY)
.map(
(silence: Silence) =>
html`<silence-sk
.silence_state=${silence}
collapsable
collapsed></silence-sk>`
);
if (!ret.length) {
ret.push(html`<div class="nosilences">None</div>`);
}
return ret;
}
private doesSilenceHaveNoNotes(silence: Silence): boolean {
return (
!silence.notes ||
(silence.notes.length === 1 && silence.notes[0].text === '')
);
}
private lastSeen(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Last Seen</th>
<td title=${new Date(this.state.last_seen * 1000).toLocaleString()}>
${diffDate(this.state.last_seen * 1000)}
</td>
</tr>`;
}
private duration(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Duration</th>
<td>${strDuration(this.state.last_seen - this.state.start)}</td>
</tr>`;
}
private history(): Promise<any> {
if (
this.hasAttribute('minimized') ||
this.state.id === '' ||
this.state.key === ''
) {
return Promise.resolve();
}
return fetch(
`/_/recent_incidents?id=${this.state.id}&key=${this.state.key}`,
{
headers: {
'content-type': 'application/json',
},
credentials: 'include',
method: 'GET',
}
)
.then(jsonOrThrow)
.then((json: RecentIncidentsResponse) => {
const incidents = json.incidents || [];
this.incident_flaky = json.flaky;
this.incident_has_recently_expired_silence =
json.recently_expired_silence;
return incidents.map( | }
private toggleSilencesWithComments(e: Event): void {
// This prevents a double event from happening.
e.preventDefault();
this.displaySilencesWithComments = !this.displaySilencesWithComments;
this._render();
}
private displayRecentlyExpired(
recentlyExpiredSilence: boolean
): TemplateResult {
if (recentlyExpiredSilence) {
return html`<alarm-off-icon-sk
title="This alert has a recently expired silence"></alarm-off-icon-sk>`;
}
return html``;
}
private displayFlakiness(flaky: boolean): TemplateResult {
if (flaky) {
return html`<thumbs-up-down-icon-sk
title="This alert is possibly flaky"></thumbs-up-down-icon-sk>`;
}
return html``;
}
private take(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('take', { detail: detail, bubbles: true })
);
}
private assignToOwner(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('assign-to-owner', { detail: detail, bubbles: true })
);
}
private assign(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('assign', { detail: detail, bubbles: true })
);
}
private addNote(): void {
const textarea = $$('textarea', this) as HTMLInputElement;
const detail = {
key: this.state.key,
text: textarea.value,
};
this.dispatchEvent(
new CustomEvent('add-note', { detail: detail, bubbles: true })
);
textarea.value = '';
}
private _render(): void {
if (!this.state) {
return;
}
render(IncidentSk.template(this), this, { eventContext: this });
}
}
define('incident-sk', IncidentSk); | (i: Incident) =>
html`<incident-sk .incident_state=${i} minimized></incident-sk>`
);
})
.catch(errorMessage); | random_line_split |
incident-sk.ts | /**
* @module incident-sk
* @description <h2><code>incident-sk</code></h2>
*
* <p>
* Displays a single Incident.
* </p>
*
* @attr minimized {boolean} If not set then the incident is displayed in expanded
* mode, otherwise it is displayed in compact mode.
*
* @attr params {boolean} If set then the incident params are displayed, only
* applicable if minimized is true.
*
* @evt add-note Sent when the user adds a note to an incident.
* The detail includes the text of the note and the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* text: "blah blah blah",
* }
* </pre>
*
* @evt del-note Sent when the user deletes a note on an incident.
* The detail includes the index of the note and the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* index: 0,
* }
* </pre>
*
* @evt take Sent when the user wants the incident assigned to themselves.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
* @evt assign Sent when the user want to assign the incident to someone else.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
*/
import { define } from '../../../elements-sk/modules/define';
import '../../../elements-sk/modules/icons/alarm-off-icon-sk';
import '../../../elements-sk/modules/icons/delete-icon-sk';
import '../../../elements-sk/modules/icons/thumbs-up-down-icon-sk';
import '../../../infra-sk/modules/clipboard-sk';
import '../silence-sk';
import { $$ } from '../../../infra-sk/modules/dom';
import { diffDate, strDuration } from '../../../infra-sk/modules/human';
import { errorMessage } from '../../../elements-sk/modules/errorMessage';
import { html, render, TemplateResult } from 'lit-html';
import { until } from 'lit-html/directives/until';
import { jsonOrThrow } from '../../../infra-sk/modules/jsonOrThrow';
import { abbr, linkify, displayNotes } from '../am';
import * as paramset from '../paramset';
import {
Silence,
Incident,
Params,
RecentIncidentsResponse,
Note,
} from '../json';
const MAX_MATCHING_SILENCES_TO_DISPLAY = 50;
const PARAMS_TO_DISPLAY_COPY_ICON = ['abbr', 'alertname', 'app', 'bot'];
class State {
key: string = '';
id: string = '';
params: Params = {};
start: number = 0;
last_seen: number = 0;
active: boolean = false;
notes: Note[] = [];
}
export class IncidentSk extends HTMLElement {
private silences: Silence[] = [];
private displaySilencesWithComments: boolean = false;
private flaky: boolean = false;
private recently_expired_silence: boolean = false;
private state: State = {
key: '',
id: '',
params: {},
start: 0,
last_seen: 0,
active: false,
notes: [],
};
private static template = (ele: IncidentSk) => html`
<h2 class=${ele.classOfH2()}>
${ele.state.params.alertname} ${abbr(ele.state.params.abbr)}
${ele.displayRecentlyExpired(ele.recently_expired_silence)}
${ele.displayFlakiness(ele.flaky)}
</h2>
<section class="detail">
${ele.actionButtons()}
<table class="timing">
<tr>
<th>Started</th>
<td title=${new Date(ele.state.start * 1000).toLocaleString()}>
${diffDate(ele.state.start * 1000)}
</td>
</tr>
${ele.lastSeen()} ${ele.duration()}
</table>
<table class="params">
${ele.table()}
</table>
${displayNotes(ele.state.notes, ele.state.key, 'del-note')}
<section class="addNote">
<textarea rows="2" cols="80"></textarea>
<button @click=${ele.addNote}>Submit</button>
</section>
<section class="matchingSilences">
<span class="matchingSilencesHeaders">
<h3>Matching Silences</h3>
<checkbox-sk
?checked=${ele.displaySilencesWithComments}
@click=${ele.toggleSilencesWithComments}
label="Show only silences with comments">
</checkbox-sk>
</span>
${ele.matchingSilences()}
</section>
<section class="history">
<h3>History</h3>
${until(ele.history(), html`<div class="loading">Loading...</div>`)}
</section>
</section>
`;
/** @prop incident_state An Incident. */
get incident_state(): State {
return this.state;
}
set incident_state(val: State) {
this.state = val;
this._render();
}
/** @prop incident_silences The list of active silences. */
get incident_silences(): Silence[] {
return this.silences;
}
set | (val: Silence[]) {
this._render();
this.silences = val;
}
/** @prop recently_expired_silence Whether silence recently expired. */
get incident_has_recently_expired_silence(): boolean {
return this.recently_expired_silence;
}
set incident_has_recently_expired_silence(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.recently_expired_silence) {
this.recently_expired_silence = val;
this._render();
}
}
/** @prop flaky Whether this incident has been flaky. */
get incident_flaky(): boolean {
return this.flaky;
}
set incident_flaky(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.flaky) {
this.flaky = val;
this._render();
}
}
private classOfH2(): string {
if (!this.state.active) {
return 'inactive';
}
if (this.state.params.assigned_to) {
return 'assigned';
}
return '';
}
private table(): TemplateResult[] {
const params = this.state.params;
const keys = Object.keys(params);
keys.sort();
return keys
.filter((k) => !k.startsWith('__'))
.map(
(k) => html`
<tr>
<th>${k}</th>
<td>
<span class="respect-newlines">${linkify(params[k])}</span>
${this.maybeDisplayCopyIcon(k)}
</td>
</tr>
`
);
}
private maybeDisplayCopyIcon(k: string): TemplateResult {
if (PARAMS_TO_DISPLAY_COPY_ICON.includes(k)) {
return html`<clipboard-sk value=${this.state.params[k]}></clipboard-sk>`;
}
return html``;
}
private actionButtons(): TemplateResult {
if (this.state.active) {
let assignToOwnerButton = html``;
if (this.state.params.owner) {
assignToOwnerButton = html`<button @click=${this.assignToOwner}>
Assign to Owner
</button>`;
}
return html`<section class="assign">
<button @click=${this.take}>Take</button>
${assignToOwnerButton}
<button @click=${this.assign}>Assign</button>
</section>`;
}
return html``;
}
private matchingSilences(): TemplateResult[] {
if (this.hasAttribute('minimized')) {
return [];
}
// Filter out silences whose paramsets do not match and
// which have no notes if displaySilencesWithComments is true.
const filteredSilences = this.silences.filter(
(silence: Silence) =>
paramset.match(silence.param_set, this.state.params) &&
!(
this.displaySilencesWithComments &&
this.doesSilenceHaveNoNotes(silence)
)
);
const ret = filteredSilences
.slice(0, MAX_MATCHING_SILENCES_TO_DISPLAY)
.map(
(silence: Silence) =>
html`<silence-sk
.silence_state=${silence}
collapsable
collapsed></silence-sk>`
);
if (!ret.length) {
ret.push(html`<div class="nosilences">None</div>`);
}
return ret;
}
private doesSilenceHaveNoNotes(silence: Silence): boolean {
return (
!silence.notes ||
(silence.notes.length === 1 && silence.notes[0].text === '')
);
}
private lastSeen(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Last Seen</th>
<td title=${new Date(this.state.last_seen * 1000).toLocaleString()}>
${diffDate(this.state.last_seen * 1000)}
</td>
</tr>`;
}
private duration(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Duration</th>
<td>${strDuration(this.state.last_seen - this.state.start)}</td>
</tr>`;
}
private history(): Promise<any> {
if (
this.hasAttribute('minimized') ||
this.state.id === '' ||
this.state.key === ''
) {
return Promise.resolve();
}
return fetch(
`/_/recent_incidents?id=${this.state.id}&key=${this.state.key}`,
{
headers: {
'content-type': 'application/json',
},
credentials: 'include',
method: 'GET',
}
)
.then(jsonOrThrow)
.then((json: RecentIncidentsResponse) => {
const incidents = json.incidents || [];
this.incident_flaky = json.flaky;
this.incident_has_recently_expired_silence =
json.recently_expired_silence;
return incidents.map(
(i: Incident) =>
html`<incident-sk .incident_state=${i} minimized></incident-sk>`
);
})
.catch(errorMessage);
}
private toggleSilencesWithComments(e: Event): void {
// This prevents a double event from happening.
e.preventDefault();
this.displaySilencesWithComments = !this.displaySilencesWithComments;
this._render();
}
private displayRecentlyExpired(
recentlyExpiredSilence: boolean
): TemplateResult {
if (recentlyExpiredSilence) {
return html`<alarm-off-icon-sk
title="This alert has a recently expired silence"></alarm-off-icon-sk>`;
}
return html``;
}
private displayFlakiness(flaky: boolean): TemplateResult {
if (flaky) {
return html`<thumbs-up-down-icon-sk
title="This alert is possibly flaky"></thumbs-up-down-icon-sk>`;
}
return html``;
}
private take(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('take', { detail: detail, bubbles: true })
);
}
private assignToOwner(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('assign-to-owner', { detail: detail, bubbles: true })
);
}
private assign(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('assign', { detail: detail, bubbles: true })
);
}
private addNote(): void {
const textarea = $$('textarea', this) as HTMLInputElement;
const detail = {
key: this.state.key,
text: textarea.value,
};
this.dispatchEvent(
new CustomEvent('add-note', { detail: detail, bubbles: true })
);
textarea.value = '';
}
private _render(): void {
if (!this.state) {
return;
}
render(IncidentSk.template(this), this, { eventContext: this });
}
}
define('incident-sk', IncidentSk);
| incident_silences | identifier_name |
incident-sk.ts | /**
* @module incident-sk
* @description <h2><code>incident-sk</code></h2>
*
* <p>
* Displays a single Incident.
* </p>
*
* @attr minimized {boolean} If not set then the incident is displayed in expanded
* mode, otherwise it is displayed in compact mode.
*
* @attr params {boolean} If set then the incident params are displayed, only
* applicable if minimized is true.
*
* @evt add-note Sent when the user adds a note to an incident.
* The detail includes the text of the note and the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* text: "blah blah blah",
* }
* </pre>
*
* @evt del-note Sent when the user deletes a note on an incident.
* The detail includes the index of the note and the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* index: 0,
* }
* </pre>
*
* @evt take Sent when the user wants the incident assigned to themselves.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
* @evt assign Sent when the user want to assign the incident to someone else.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
*/
import { define } from '../../../elements-sk/modules/define';
import '../../../elements-sk/modules/icons/alarm-off-icon-sk';
import '../../../elements-sk/modules/icons/delete-icon-sk';
import '../../../elements-sk/modules/icons/thumbs-up-down-icon-sk';
import '../../../infra-sk/modules/clipboard-sk';
import '../silence-sk';
import { $$ } from '../../../infra-sk/modules/dom';
import { diffDate, strDuration } from '../../../infra-sk/modules/human';
import { errorMessage } from '../../../elements-sk/modules/errorMessage';
import { html, render, TemplateResult } from 'lit-html';
import { until } from 'lit-html/directives/until';
import { jsonOrThrow } from '../../../infra-sk/modules/jsonOrThrow';
import { abbr, linkify, displayNotes } from '../am';
import * as paramset from '../paramset';
import {
Silence,
Incident,
Params,
RecentIncidentsResponse,
Note,
} from '../json';
const MAX_MATCHING_SILENCES_TO_DISPLAY = 50;
const PARAMS_TO_DISPLAY_COPY_ICON = ['abbr', 'alertname', 'app', 'bot'];
class State {
key: string = '';
id: string = '';
params: Params = {};
start: number = 0;
last_seen: number = 0;
active: boolean = false;
notes: Note[] = [];
}
export class IncidentSk extends HTMLElement {
private silences: Silence[] = [];
private displaySilencesWithComments: boolean = false;
private flaky: boolean = false;
private recently_expired_silence: boolean = false;
private state: State = {
key: '',
id: '',
params: {},
start: 0,
last_seen: 0,
active: false,
notes: [],
};
private static template = (ele: IncidentSk) => html`
<h2 class=${ele.classOfH2()}>
${ele.state.params.alertname} ${abbr(ele.state.params.abbr)}
${ele.displayRecentlyExpired(ele.recently_expired_silence)}
${ele.displayFlakiness(ele.flaky)}
</h2>
<section class="detail">
${ele.actionButtons()}
<table class="timing">
<tr>
<th>Started</th>
<td title=${new Date(ele.state.start * 1000).toLocaleString()}>
${diffDate(ele.state.start * 1000)}
</td>
</tr>
${ele.lastSeen()} ${ele.duration()}
</table>
<table class="params">
${ele.table()}
</table>
${displayNotes(ele.state.notes, ele.state.key, 'del-note')}
<section class="addNote">
<textarea rows="2" cols="80"></textarea>
<button @click=${ele.addNote}>Submit</button>
</section>
<section class="matchingSilences">
<span class="matchingSilencesHeaders">
<h3>Matching Silences</h3>
<checkbox-sk
?checked=${ele.displaySilencesWithComments}
@click=${ele.toggleSilencesWithComments}
label="Show only silences with comments">
</checkbox-sk>
</span>
${ele.matchingSilences()}
</section>
<section class="history">
<h3>History</h3>
${until(ele.history(), html`<div class="loading">Loading...</div>`)}
</section>
</section>
`;
/** @prop incident_state An Incident. */
get incident_state(): State {
return this.state;
}
set incident_state(val: State) {
this.state = val;
this._render();
}
/** @prop incident_silences The list of active silences. */
get incident_silences(): Silence[] {
return this.silences;
}
set incident_silences(val: Silence[]) {
this._render();
this.silences = val;
}
/** @prop recently_expired_silence Whether silence recently expired. */
get incident_has_recently_expired_silence(): boolean {
return this.recently_expired_silence;
}
set incident_has_recently_expired_silence(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.recently_expired_silence) {
this.recently_expired_silence = val;
this._render();
}
}
/** @prop flaky Whether this incident has been flaky. */
get incident_flaky(): boolean {
return this.flaky;
}
set incident_flaky(val: boolean) |
private classOfH2(): string {
if (!this.state.active) {
return 'inactive';
}
if (this.state.params.assigned_to) {
return 'assigned';
}
return '';
}
private table(): TemplateResult[] {
const params = this.state.params;
const keys = Object.keys(params);
keys.sort();
return keys
.filter((k) => !k.startsWith('__'))
.map(
(k) => html`
<tr>
<th>${k}</th>
<td>
<span class="respect-newlines">${linkify(params[k])}</span>
${this.maybeDisplayCopyIcon(k)}
</td>
</tr>
`
);
}
private maybeDisplayCopyIcon(k: string): TemplateResult {
if (PARAMS_TO_DISPLAY_COPY_ICON.includes(k)) {
return html`<clipboard-sk value=${this.state.params[k]}></clipboard-sk>`;
}
return html``;
}
private actionButtons(): TemplateResult {
if (this.state.active) {
let assignToOwnerButton = html``;
if (this.state.params.owner) {
assignToOwnerButton = html`<button @click=${this.assignToOwner}>
Assign to Owner
</button>`;
}
return html`<section class="assign">
<button @click=${this.take}>Take</button>
${assignToOwnerButton}
<button @click=${this.assign}>Assign</button>
</section>`;
}
return html``;
}
private matchingSilences(): TemplateResult[] {
if (this.hasAttribute('minimized')) {
return [];
}
// Filter out silences whose paramsets do not match and
// which have no notes if displaySilencesWithComments is true.
const filteredSilences = this.silences.filter(
(silence: Silence) =>
paramset.match(silence.param_set, this.state.params) &&
!(
this.displaySilencesWithComments &&
this.doesSilenceHaveNoNotes(silence)
)
);
const ret = filteredSilences
.slice(0, MAX_MATCHING_SILENCES_TO_DISPLAY)
.map(
(silence: Silence) =>
html`<silence-sk
.silence_state=${silence}
collapsable
collapsed></silence-sk>`
);
if (!ret.length) {
ret.push(html`<div class="nosilences">None</div>`);
}
return ret;
}
private doesSilenceHaveNoNotes(silence: Silence): boolean {
return (
!silence.notes ||
(silence.notes.length === 1 && silence.notes[0].text === '')
);
}
private lastSeen(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Last Seen</th>
<td title=${new Date(this.state.last_seen * 1000).toLocaleString()}>
${diffDate(this.state.last_seen * 1000)}
</td>
</tr>`;
}
private duration(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Duration</th>
<td>${strDuration(this.state.last_seen - this.state.start)}</td>
</tr>`;
}
private history(): Promise<any> {
if (
this.hasAttribute('minimized') ||
this.state.id === '' ||
this.state.key === ''
) {
return Promise.resolve();
}
return fetch(
`/_/recent_incidents?id=${this.state.id}&key=${this.state.key}`,
{
headers: {
'content-type': 'application/json',
},
credentials: 'include',
method: 'GET',
}
)
.then(jsonOrThrow)
.then((json: RecentIncidentsResponse) => {
const incidents = json.incidents || [];
this.incident_flaky = json.flaky;
this.incident_has_recently_expired_silence =
json.recently_expired_silence;
return incidents.map(
(i: Incident) =>
html`<incident-sk .incident_state=${i} minimized></incident-sk>`
);
})
.catch(errorMessage);
}
private toggleSilencesWithComments(e: Event): void {
// This prevents a double event from happening.
e.preventDefault();
this.displaySilencesWithComments = !this.displaySilencesWithComments;
this._render();
}
private displayRecentlyExpired(
recentlyExpiredSilence: boolean
): TemplateResult {
if (recentlyExpiredSilence) {
return html`<alarm-off-icon-sk
title="This alert has a recently expired silence"></alarm-off-icon-sk>`;
}
return html``;
}
private displayFlakiness(flaky: boolean): TemplateResult {
if (flaky) {
return html`<thumbs-up-down-icon-sk
title="This alert is possibly flaky"></thumbs-up-down-icon-sk>`;
}
return html``;
}
private take(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('take', { detail: detail, bubbles: true })
);
}
private assignToOwner(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('assign-to-owner', { detail: detail, bubbles: true })
);
}
private assign(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('assign', { detail: detail, bubbles: true })
);
}
private addNote(): void {
const textarea = $$('textarea', this) as HTMLInputElement;
const detail = {
key: this.state.key,
text: textarea.value,
};
this.dispatchEvent(
new CustomEvent('add-note', { detail: detail, bubbles: true })
);
textarea.value = '';
}
private _render(): void {
if (!this.state) {
return;
}
render(IncidentSk.template(this), this, { eventContext: this });
}
}
define('incident-sk', IncidentSk);
| {
// No need to render again if value is same as old value.
if (val !== this.flaky) {
this.flaky = val;
this._render();
}
} | identifier_body |
incident-sk.ts | /**
* @module incident-sk
* @description <h2><code>incident-sk</code></h2>
*
* <p>
* Displays a single Incident.
* </p>
*
* @attr minimized {boolean} If not set then the incident is displayed in expanded
* mode, otherwise it is displayed in compact mode.
*
* @attr params {boolean} If set then the incident params are displayed, only
* applicable if minimized is true.
*
* @evt add-note Sent when the user adds a note to an incident.
* The detail includes the text of the note and the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* text: "blah blah blah",
* }
* </pre>
*
* @evt del-note Sent when the user deletes a note on an incident.
* The detail includes the index of the note and the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* index: 0,
* }
* </pre>
*
* @evt take Sent when the user wants the incident assigned to themselves.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
* @evt assign Sent when the user want to assign the incident to someone else.
* The detail includes the key of the incident.
*
* <pre>
* detail {
* key: "12312123123",
* }
* </pre>
*
*/
import { define } from '../../../elements-sk/modules/define';
import '../../../elements-sk/modules/icons/alarm-off-icon-sk';
import '../../../elements-sk/modules/icons/delete-icon-sk';
import '../../../elements-sk/modules/icons/thumbs-up-down-icon-sk';
import '../../../infra-sk/modules/clipboard-sk';
import '../silence-sk';
import { $$ } from '../../../infra-sk/modules/dom';
import { diffDate, strDuration } from '../../../infra-sk/modules/human';
import { errorMessage } from '../../../elements-sk/modules/errorMessage';
import { html, render, TemplateResult } from 'lit-html';
import { until } from 'lit-html/directives/until';
import { jsonOrThrow } from '../../../infra-sk/modules/jsonOrThrow';
import { abbr, linkify, displayNotes } from '../am';
import * as paramset from '../paramset';
import {
Silence,
Incident,
Params,
RecentIncidentsResponse,
Note,
} from '../json';
const MAX_MATCHING_SILENCES_TO_DISPLAY = 50;
const PARAMS_TO_DISPLAY_COPY_ICON = ['abbr', 'alertname', 'app', 'bot'];
class State {
key: string = '';
id: string = '';
params: Params = {};
start: number = 0;
last_seen: number = 0;
active: boolean = false;
notes: Note[] = [];
}
export class IncidentSk extends HTMLElement {
private silences: Silence[] = [];
private displaySilencesWithComments: boolean = false;
private flaky: boolean = false;
private recently_expired_silence: boolean = false;
private state: State = {
key: '',
id: '',
params: {},
start: 0,
last_seen: 0,
active: false,
notes: [],
};
private static template = (ele: IncidentSk) => html`
<h2 class=${ele.classOfH2()}>
${ele.state.params.alertname} ${abbr(ele.state.params.abbr)}
${ele.displayRecentlyExpired(ele.recently_expired_silence)}
${ele.displayFlakiness(ele.flaky)}
</h2>
<section class="detail">
${ele.actionButtons()}
<table class="timing">
<tr>
<th>Started</th>
<td title=${new Date(ele.state.start * 1000).toLocaleString()}>
${diffDate(ele.state.start * 1000)}
</td>
</tr>
${ele.lastSeen()} ${ele.duration()}
</table>
<table class="params">
${ele.table()}
</table>
${displayNotes(ele.state.notes, ele.state.key, 'del-note')}
<section class="addNote">
<textarea rows="2" cols="80"></textarea>
<button @click=${ele.addNote}>Submit</button>
</section>
<section class="matchingSilences">
<span class="matchingSilencesHeaders">
<h3>Matching Silences</h3>
<checkbox-sk
?checked=${ele.displaySilencesWithComments}
@click=${ele.toggleSilencesWithComments}
label="Show only silences with comments">
</checkbox-sk>
</span>
${ele.matchingSilences()}
</section>
<section class="history">
<h3>History</h3>
${until(ele.history(), html`<div class="loading">Loading...</div>`)}
</section>
</section>
`;
/** @prop incident_state An Incident. */
get incident_state(): State {
return this.state;
}
set incident_state(val: State) {
this.state = val;
this._render();
}
/** @prop incident_silences The list of active silences. */
get incident_silences(): Silence[] {
return this.silences;
}
set incident_silences(val: Silence[]) {
this._render();
this.silences = val;
}
/** @prop recently_expired_silence Whether silence recently expired. */
get incident_has_recently_expired_silence(): boolean {
return this.recently_expired_silence;
}
set incident_has_recently_expired_silence(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.recently_expired_silence) |
}
/** @prop flaky Whether this incident has been flaky. */
get incident_flaky(): boolean {
return this.flaky;
}
set incident_flaky(val: boolean) {
// No need to render again if value is same as old value.
if (val !== this.flaky) {
this.flaky = val;
this._render();
}
}
private classOfH2(): string {
if (!this.state.active) {
return 'inactive';
}
if (this.state.params.assigned_to) {
return 'assigned';
}
return '';
}
private table(): TemplateResult[] {
const params = this.state.params;
const keys = Object.keys(params);
keys.sort();
return keys
.filter((k) => !k.startsWith('__'))
.map(
(k) => html`
<tr>
<th>${k}</th>
<td>
<span class="respect-newlines">${linkify(params[k])}</span>
${this.maybeDisplayCopyIcon(k)}
</td>
</tr>
`
);
}
private maybeDisplayCopyIcon(k: string): TemplateResult {
if (PARAMS_TO_DISPLAY_COPY_ICON.includes(k)) {
return html`<clipboard-sk value=${this.state.params[k]}></clipboard-sk>`;
}
return html``;
}
private actionButtons(): TemplateResult {
if (this.state.active) {
let assignToOwnerButton = html``;
if (this.state.params.owner) {
assignToOwnerButton = html`<button @click=${this.assignToOwner}>
Assign to Owner
</button>`;
}
return html`<section class="assign">
<button @click=${this.take}>Take</button>
${assignToOwnerButton}
<button @click=${this.assign}>Assign</button>
</section>`;
}
return html``;
}
private matchingSilences(): TemplateResult[] {
if (this.hasAttribute('minimized')) {
return [];
}
// Filter out silences whose paramsets do not match and
// which have no notes if displaySilencesWithComments is true.
const filteredSilences = this.silences.filter(
(silence: Silence) =>
paramset.match(silence.param_set, this.state.params) &&
!(
this.displaySilencesWithComments &&
this.doesSilenceHaveNoNotes(silence)
)
);
const ret = filteredSilences
.slice(0, MAX_MATCHING_SILENCES_TO_DISPLAY)
.map(
(silence: Silence) =>
html`<silence-sk
.silence_state=${silence}
collapsable
collapsed></silence-sk>`
);
if (!ret.length) {
ret.push(html`<div class="nosilences">None</div>`);
}
return ret;
}
private doesSilenceHaveNoNotes(silence: Silence): boolean {
return (
!silence.notes ||
(silence.notes.length === 1 && silence.notes[0].text === '')
);
}
private lastSeen(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Last Seen</th>
<td title=${new Date(this.state.last_seen * 1000).toLocaleString()}>
${diffDate(this.state.last_seen * 1000)}
</td>
</tr>`;
}
private duration(): TemplateResult {
if (this.state.active) {
return html``;
}
return html`<tr>
<th>Duration</th>
<td>${strDuration(this.state.last_seen - this.state.start)}</td>
</tr>`;
}
private history(): Promise<any> {
if (
this.hasAttribute('minimized') ||
this.state.id === '' ||
this.state.key === ''
) {
return Promise.resolve();
}
return fetch(
`/_/recent_incidents?id=${this.state.id}&key=${this.state.key}`,
{
headers: {
'content-type': 'application/json',
},
credentials: 'include',
method: 'GET',
}
)
.then(jsonOrThrow)
.then((json: RecentIncidentsResponse) => {
const incidents = json.incidents || [];
this.incident_flaky = json.flaky;
this.incident_has_recently_expired_silence =
json.recently_expired_silence;
return incidents.map(
(i: Incident) =>
html`<incident-sk .incident_state=${i} minimized></incident-sk>`
);
})
.catch(errorMessage);
}
private toggleSilencesWithComments(e: Event): void {
// This prevents a double event from happening.
e.preventDefault();
this.displaySilencesWithComments = !this.displaySilencesWithComments;
this._render();
}
private displayRecentlyExpired(
recentlyExpiredSilence: boolean
): TemplateResult {
if (recentlyExpiredSilence) {
return html`<alarm-off-icon-sk
title="This alert has a recently expired silence"></alarm-off-icon-sk>`;
}
return html``;
}
private displayFlakiness(flaky: boolean): TemplateResult {
if (flaky) {
return html`<thumbs-up-down-icon-sk
title="This alert is possibly flaky"></thumbs-up-down-icon-sk>`;
}
return html``;
}
private take(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('take', { detail: detail, bubbles: true })
);
}
private assignToOwner(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('assign-to-owner', { detail: detail, bubbles: true })
);
}
private assign(): void {
const detail = {
key: this.state.key,
};
this.dispatchEvent(
new CustomEvent('assign', { detail: detail, bubbles: true })
);
}
private addNote(): void {
const textarea = $$('textarea', this) as HTMLInputElement;
const detail = {
key: this.state.key,
text: textarea.value,
};
this.dispatchEvent(
new CustomEvent('add-note', { detail: detail, bubbles: true })
);
textarea.value = '';
}
private _render(): void {
if (!this.state) {
return;
}
render(IncidentSk.template(this), this, { eventContext: this });
}
}
define('incident-sk', IncidentSk);
| {
this.recently_expired_silence = val;
this._render();
} | conditional_block |
parse.go | // Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promquery
import (
"fmt"
"math"
"os"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/value"
"github.com/prometheus/prometheus/util/strutil"
)
type parser struct {
lex *lexer
token [3]item
peekCount int
}
// ParseErr wraps a parsing error with line and position context.
// If the parsing input was a single line, line will be 0 and omitted
// from the error string.
type ParseErr struct {
Line, Pos int
Err error
}
func (e *ParseErr) Error() string {
if e.Line == 0 {
return fmt.Sprintf("parse error at char %d: %s", e.Pos, e.Err)
}
return fmt.Sprintf("parse error at line %d, char %d: %s", e.Line, e.Pos, e.Err)
}
// ParseMetric parses the input into a metric
func ParseMetric(input string) (m labels.Labels, err error) {
p := newParser(input)
defer p.recover(&err)
m = p.metric()
if p.peek().typ != itemEOF {
p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:])
}
return m, nil
}
// newParser returns a new parser.
func newParser(input string) *parser {
p := &parser{
lex: lex(input),
}
return p
}
// sequenceValue is an omittable value in a sequence of time series values.
type sequenceValue struct {
value float64
omitted bool
}
func (v sequenceValue) String() string {
if v.omitted {
return "_"
}
return fmt.Sprintf("%f", v.value)
}
// parseSeriesDesc parses the description of a time series.
func parseSeriesDesc(input string) (labels.Labels, []sequenceValue, error) {
p := newParser(input)
p.lex.seriesDesc = true
return p.parseSeriesDesc()
}
// parseSeriesDesc parses a description of a time series into its metric and value sequence.
func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err error) {
defer p.recover(&err)
m = p.metric()
const ctx = "series values"
for {
for p.peek().typ == itemSpace {
p.next()
}
if p.peek().typ == itemEOF {
break
}
// Extract blanks.
if p.peek().typ == itemBlank {
p.next()
times := uint64(1)
if p.peek().typ == itemTimes {
p.next()
times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
}
for i := uint64(0); i < times; i++ {
vals = append(vals, sequenceValue{omitted: true})
}
// This is to ensure that there is a space between this and the next number.
// This is especially required if the next number is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
continue
}
// Extract values.
sign := 1.0
if t := p.peek().typ; t == itemSUB || t == itemADD {
if p.next().typ == itemSUB {
sign = -1
}
}
var k float64
if t := p.peek().typ; t == itemNumber {
k = sign * p.number(p.expect(itemNumber, ctx).val)
} else if t == itemIdentifier && p.peek().val == "stale" {
p.next()
k = math.Float64frombits(value.StaleNaN)
} else {
p.errorf("expected number or 'stale' in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
vals = append(vals, sequenceValue{
value: k,
})
// If there are no offset repetitions specified, proceed with the next value.
if t := p.peek(); t.typ == itemSpace {
// This ensures there is a space between every value.
continue
} else if t.typ == itemEOF {
break
} else if t.typ != itemADD && t.typ != itemSUB {
p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
// Expand the repeated offsets into values.
sign = 1.0
if p.next().typ == itemSUB {
sign = -1.0
}
offset := sign * p.number(p.expect(itemNumber, ctx).val)
p.expect(itemTimes, ctx)
times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
for i := uint64(0); i < times; i++ {
k += offset
vals = append(vals, sequenceValue{
value: k,
})
}
// This is to ensure that there is a space between this expanding notation
// and the next number. This is especially required if the next number
// is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
}
return m, vals, nil
}
// next returns the next token.
func (p *parser) next() item {
if p.peekCount > 0 {
p.peekCount--
} else {
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
}
if p.token[p.peekCount].typ == itemError {
p.errorf("%s", p.token[p.peekCount].val)
}
return p.token[p.peekCount]
}
// peek returns but does not consume the next token.
func (p *parser) peek() item {
if p.peekCount > 0 {
return p.token[p.peekCount-1]
}
p.peekCount = 1
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
return p.token[0]
}
// backup backs the input stream up one token.
func (p *parser) backup() {
p.peekCount++
}
// errorf formats the error and terminates processing.
func (p *parser) errorf(format string, args ...interface{}) {
p.error(fmt.Errorf(format, args...))
}
// error terminates processing.
func (p *parser) error(err error) {
perr := &ParseErr{
Line: p.lex.lineNumber(),
Pos: p.lex.linePosition(),
Err: err,
}
if strings.Count(strings.TrimSpace(p.lex.input), "\n") == 0 {
perr.Line = 0
}
panic(perr)
}
// expect consumes the next token and guarantees it has the required type.
func (p *parser) expect(exp ItemType, context string) item {
token := p.next()
if token.typ != exp {
p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc())
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item {
token := p.next()
if token.typ != exp1 && token.typ != exp2 {
p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc())
}
return token
}
var errUnexpected = fmt.Errorf("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) recover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
// Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
*errp = errUnexpected
} else {
*errp = e.(error)
}
}
p.lex.close()
}
// number parses a number.
func (p *parser) number(val string) float64 {
n, err := strconv.ParseInt(val, 0, 64)
f := float64(n)
if err != nil {
f, err = strconv.ParseFloat(val, 64)
}
if err != nil {
p.errorf("error parsing number: %s", err)
}
return f
}
// labels parses a list of labelnames.
//
// '(' <label_name>, ... ')'
//
func (p *parser) labels() []string {
const ctx = "grouping opts"
p.expect(itemLeftParen, ctx)
labels := []string{}
if p.peek().typ != itemRightParen {
for {
id := p.next()
if !isLabel(id.val) {
p.errorf("unexpected %s in %s, expected label", id.desc(), ctx)
}
labels = append(labels, id.val)
if p.peek().typ != itemComma {
break
}
p.next()
}
}
p.expect(itemRightParen, ctx)
return labels
}
// labelSet parses a set of label matchers
//
// '{' [ <labelname> '=' <match_string>, ... ] '}'
//
func (p *parser) labelSet() labels.Labels {
set := []labels.Label{}
for _, lm := range p.labelMatchers(itemEQL) {
set = append(set, labels.Label{Name: lm.Name, Value: lm.Value})
}
return labels.New(set...)
}
// labelMatchers parses a set of label matchers.
//
// '{' [ <labelname> <match_op> <match_string>, ... ] '}'
//
// if no 'operators' is given, then all operator type is valid
func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher {
const ctx = "label matching"
matchers := []*labels.Matcher{}
p.expect(itemLeftBrace, ctx)
// Check if no matchers are provided.
if p.peek().typ == itemRightBrace {
p.next()
return matchers
}
for {
label := p.expect(itemIdentifier, ctx)
op := p.next().typ
if !op.isOperator() { | if op == allowedOp {
validOp = true
}
}
if !validOp && len(operators) > 0 {
p.errorf("operator must be one of %q, is %q", operators, op)
}
val := p.unquoteString(p.expect(itemString, ctx).val)
// Map the item to the respective match type.
var matchType labels.MatchType
switch op {
case itemEQL:
matchType = labels.MatchEqual
case itemNEQ:
matchType = labels.MatchNotEqual
case itemEQLRegex:
matchType = labels.MatchRegexp
case itemNEQRegex:
matchType = labels.MatchNotRegexp
default:
p.errorf("item %q is not a metric match type", op)
}
m, err := labels.NewMatcher(matchType, label.val, val)
if err != nil {
p.error(err)
}
matchers = append(matchers, m)
if p.peek().typ == itemIdentifier {
p.errorf("missing comma before next identifier %q", p.peek().val)
}
// Terminate list if last matcher.
if p.peek().typ != itemComma {
break
}
p.next()
// Allow comma after each item in a multi-line listing.
if p.peek().typ == itemRightBrace {
break
}
}
p.expect(itemRightBrace, ctx)
return matchers
}
// metric parses a metric.
//
// <label_set>
// <metric_identifier> [<label_set>]
//
func (p *parser) metric() labels.Labels {
name := ""
var m labels.Labels
t := p.peek().typ
if t == itemIdentifier || t == itemMetricIdentifier {
name = p.next().val
t = p.peek().typ
}
if t != itemLeftBrace && name == "" {
p.errorf("missing metric name or metric selector")
}
if t == itemLeftBrace {
m = p.labelSet()
}
if name != "" {
m = append(m, labels.Label{Name: labels.MetricName, Value: name})
sort.Sort(m)
}
return m
}
// offset parses an offset modifier.
//
// offset <duration>
//
func (p *parser) offset() time.Duration {
const ctx = "offset"
p.next()
offi := p.expect(itemDuration, ctx)
offset, err := parseDuration(offi.val)
if err != nil {
p.error(err)
}
return offset
}
func (p *parser) unquoteString(s string) string {
unquoted, err := strutil.Unquote(s)
if err != nil {
p.errorf("error unquoting string %q: %s", s, err)
}
return unquoted
}
func parseDuration(ds string) (time.Duration, error) {
dur, err := model.ParseDuration(ds)
if err != nil {
return 0, err
}
if dur == 0 {
return 0, fmt.Errorf("duration must be greater than 0")
}
return time.Duration(dur), nil
} | p.errorf("expected label matching operator but got %s", op)
}
var validOp = false
for _, allowedOp := range operators { | random_line_split |
parse.go | // Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promquery
import (
"fmt"
"math"
"os"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/value"
"github.com/prometheus/prometheus/util/strutil"
)
type parser struct {
lex *lexer
token [3]item
peekCount int
}
// ParseErr wraps a parsing error with line and position context.
// If the parsing input was a single line, line will be 0 and omitted
// from the error string.
type ParseErr struct {
Line, Pos int
Err error
}
func (e *ParseErr) Error() string {
if e.Line == 0 {
return fmt.Sprintf("parse error at char %d: %s", e.Pos, e.Err)
}
return fmt.Sprintf("parse error at line %d, char %d: %s", e.Line, e.Pos, e.Err)
}
// ParseMetric parses the input into a metric
func ParseMetric(input string) (m labels.Labels, err error) {
p := newParser(input)
defer p.recover(&err)
m = p.metric()
if p.peek().typ != itemEOF {
p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:])
}
return m, nil
}
// newParser returns a new parser.
func newParser(input string) *parser {
p := &parser{
lex: lex(input),
}
return p
}
// sequenceValue is an omittable value in a sequence of time series values.
type sequenceValue struct {
value float64
omitted bool
}
func (v sequenceValue) String() string {
if v.omitted {
return "_"
}
return fmt.Sprintf("%f", v.value)
}
// parseSeriesDesc parses the description of a time series.
func parseSeriesDesc(input string) (labels.Labels, []sequenceValue, error) {
p := newParser(input)
p.lex.seriesDesc = true
return p.parseSeriesDesc()
}
// parseSeriesDesc parses a description of a time series into its metric and value sequence.
func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err error) {
defer p.recover(&err)
m = p.metric()
const ctx = "series values"
for {
for p.peek().typ == itemSpace {
p.next()
}
if p.peek().typ == itemEOF {
break
}
// Extract blanks.
if p.peek().typ == itemBlank |
// Extract values.
sign := 1.0
if t := p.peek().typ; t == itemSUB || t == itemADD {
if p.next().typ == itemSUB {
sign = -1
}
}
var k float64
if t := p.peek().typ; t == itemNumber {
k = sign * p.number(p.expect(itemNumber, ctx).val)
} else if t == itemIdentifier && p.peek().val == "stale" {
p.next()
k = math.Float64frombits(value.StaleNaN)
} else {
p.errorf("expected number or 'stale' in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
vals = append(vals, sequenceValue{
value: k,
})
// If there are no offset repetitions specified, proceed with the next value.
if t := p.peek(); t.typ == itemSpace {
// This ensures there is a space between every value.
continue
} else if t.typ == itemEOF {
break
} else if t.typ != itemADD && t.typ != itemSUB {
p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
// Expand the repeated offsets into values.
sign = 1.0
if p.next().typ == itemSUB {
sign = -1.0
}
offset := sign * p.number(p.expect(itemNumber, ctx).val)
p.expect(itemTimes, ctx)
times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
for i := uint64(0); i < times; i++ {
k += offset
vals = append(vals, sequenceValue{
value: k,
})
}
// This is to ensure that there is a space between this expanding notation
// and the next number. This is especially required if the next number
// is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
}
return m, vals, nil
}
// next returns the next token.
func (p *parser) next() item {
if p.peekCount > 0 {
p.peekCount--
} else {
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
}
if p.token[p.peekCount].typ == itemError {
p.errorf("%s", p.token[p.peekCount].val)
}
return p.token[p.peekCount]
}
// peek returns but does not consume the next token.
func (p *parser) peek() item {
if p.peekCount > 0 {
return p.token[p.peekCount-1]
}
p.peekCount = 1
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
return p.token[0]
}
// backup backs the input stream up one token.
func (p *parser) backup() {
p.peekCount++
}
// errorf formats the error and terminates processing.
func (p *parser) errorf(format string, args ...interface{}) {
p.error(fmt.Errorf(format, args...))
}
// error terminates processing.
func (p *parser) error(err error) {
perr := &ParseErr{
Line: p.lex.lineNumber(),
Pos: p.lex.linePosition(),
Err: err,
}
if strings.Count(strings.TrimSpace(p.lex.input), "\n") == 0 {
perr.Line = 0
}
panic(perr)
}
// expect consumes the next token and guarantees it has the required type.
func (p *parser) expect(exp ItemType, context string) item {
token := p.next()
if token.typ != exp {
p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc())
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item {
token := p.next()
if token.typ != exp1 && token.typ != exp2 {
p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc())
}
return token
}
var errUnexpected = fmt.Errorf("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) recover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
// Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
*errp = errUnexpected
} else {
*errp = e.(error)
}
}
p.lex.close()
}
// number parses a number.
func (p *parser) number(val string) float64 {
n, err := strconv.ParseInt(val, 0, 64)
f := float64(n)
if err != nil {
f, err = strconv.ParseFloat(val, 64)
}
if err != nil {
p.errorf("error parsing number: %s", err)
}
return f
}
// labels parses a list of labelnames.
//
// '(' <label_name>, ... ')'
//
func (p *parser) labels() []string {
const ctx = "grouping opts"
p.expect(itemLeftParen, ctx)
labels := []string{}
if p.peek().typ != itemRightParen {
for {
id := p.next()
if !isLabel(id.val) {
p.errorf("unexpected %s in %s, expected label", id.desc(), ctx)
}
labels = append(labels, id.val)
if p.peek().typ != itemComma {
break
}
p.next()
}
}
p.expect(itemRightParen, ctx)
return labels
}
// labelSet parses a set of label matchers
//
// '{' [ <labelname> '=' <match_string>, ... ] '}'
//
func (p *parser) labelSet() labels.Labels {
set := []labels.Label{}
for _, lm := range p.labelMatchers(itemEQL) {
set = append(set, labels.Label{Name: lm.Name, Value: lm.Value})
}
return labels.New(set...)
}
// labelMatchers parses a set of label matchers.
//
// '{' [ <labelname> <match_op> <match_string>, ... ] '}'
//
// if no 'operators' is given, then all operator type is valid
func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher {
const ctx = "label matching"
matchers := []*labels.Matcher{}
p.expect(itemLeftBrace, ctx)
// Check if no matchers are provided.
if p.peek().typ == itemRightBrace {
p.next()
return matchers
}
for {
label := p.expect(itemIdentifier, ctx)
op := p.next().typ
if !op.isOperator() {
p.errorf("expected label matching operator but got %s", op)
}
var validOp = false
for _, allowedOp := range operators {
if op == allowedOp {
validOp = true
}
}
if !validOp && len(operators) > 0 {
p.errorf("operator must be one of %q, is %q", operators, op)
}
val := p.unquoteString(p.expect(itemString, ctx).val)
// Map the item to the respective match type.
var matchType labels.MatchType
switch op {
case itemEQL:
matchType = labels.MatchEqual
case itemNEQ:
matchType = labels.MatchNotEqual
case itemEQLRegex:
matchType = labels.MatchRegexp
case itemNEQRegex:
matchType = labels.MatchNotRegexp
default:
p.errorf("item %q is not a metric match type", op)
}
m, err := labels.NewMatcher(matchType, label.val, val)
if err != nil {
p.error(err)
}
matchers = append(matchers, m)
if p.peek().typ == itemIdentifier {
p.errorf("missing comma before next identifier %q", p.peek().val)
}
// Terminate list if last matcher.
if p.peek().typ != itemComma {
break
}
p.next()
// Allow comma after each item in a multi-line listing.
if p.peek().typ == itemRightBrace {
break
}
}
p.expect(itemRightBrace, ctx)
return matchers
}
// metric parses a metric.
//
// <label_set>
// <metric_identifier> [<label_set>]
//
func (p *parser) metric() labels.Labels {
name := ""
var m labels.Labels
t := p.peek().typ
if t == itemIdentifier || t == itemMetricIdentifier {
name = p.next().val
t = p.peek().typ
}
if t != itemLeftBrace && name == "" {
p.errorf("missing metric name or metric selector")
}
if t == itemLeftBrace {
m = p.labelSet()
}
if name != "" {
m = append(m, labels.Label{Name: labels.MetricName, Value: name})
sort.Sort(m)
}
return m
}
// offset parses an offset modifier.
//
// offset <duration>
//
func (p *parser) offset() time.Duration {
const ctx = "offset"
p.next()
offi := p.expect(itemDuration, ctx)
offset, err := parseDuration(offi.val)
if err != nil {
p.error(err)
}
return offset
}
func (p *parser) unquoteString(s string) string {
unquoted, err := strutil.Unquote(s)
if err != nil {
p.errorf("error unquoting string %q: %s", s, err)
}
return unquoted
}
func parseDuration(ds string) (time.Duration, error) {
dur, err := model.ParseDuration(ds)
if err != nil {
return 0, err
}
if dur == 0 {
return 0, fmt.Errorf("duration must be greater than 0")
}
return time.Duration(dur), nil
}
| {
p.next()
times := uint64(1)
if p.peek().typ == itemTimes {
p.next()
times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
}
for i := uint64(0); i < times; i++ {
vals = append(vals, sequenceValue{omitted: true})
}
// This is to ensure that there is a space between this and the next number.
// This is especially required if the next number is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
continue
} | conditional_block |
parse.go | // Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promquery
import (
"fmt"
"math"
"os"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/value"
"github.com/prometheus/prometheus/util/strutil"
)
type parser struct {
lex *lexer
token [3]item
peekCount int
}
// ParseErr wraps a parsing error with line and position context.
// If the parsing input was a single line, line will be 0 and omitted
// from the error string.
type ParseErr struct {
Line, Pos int
Err error
}
func (e *ParseErr) Error() string {
if e.Line == 0 {
return fmt.Sprintf("parse error at char %d: %s", e.Pos, e.Err)
}
return fmt.Sprintf("parse error at line %d, char %d: %s", e.Line, e.Pos, e.Err)
}
// ParseMetric parses the input into a metric
func ParseMetric(input string) (m labels.Labels, err error) {
p := newParser(input)
defer p.recover(&err)
m = p.metric()
if p.peek().typ != itemEOF {
p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:])
}
return m, nil
}
// newParser returns a new parser.
func newParser(input string) *parser {
p := &parser{
lex: lex(input),
}
return p
}
// sequenceValue is an omittable value in a sequence of time series values.
type sequenceValue struct {
value float64
omitted bool
}
func (v sequenceValue) String() string {
if v.omitted {
return "_"
}
return fmt.Sprintf("%f", v.value)
}
// parseSeriesDesc parses the description of a time series.
func parseSeriesDesc(input string) (labels.Labels, []sequenceValue, error) {
p := newParser(input)
p.lex.seriesDesc = true
return p.parseSeriesDesc()
}
// parseSeriesDesc parses a description of a time series into its metric and value sequence.
func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err error) {
defer p.recover(&err)
m = p.metric()
const ctx = "series values"
for {
for p.peek().typ == itemSpace {
p.next()
}
if p.peek().typ == itemEOF {
break
}
// Extract blanks.
if p.peek().typ == itemBlank {
p.next()
times := uint64(1)
if p.peek().typ == itemTimes {
p.next()
times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
}
for i := uint64(0); i < times; i++ {
vals = append(vals, sequenceValue{omitted: true})
}
// This is to ensure that there is a space between this and the next number.
// This is especially required if the next number is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
continue
}
// Extract values.
sign := 1.0
if t := p.peek().typ; t == itemSUB || t == itemADD {
if p.next().typ == itemSUB {
sign = -1
}
}
var k float64
if t := p.peek().typ; t == itemNumber {
k = sign * p.number(p.expect(itemNumber, ctx).val)
} else if t == itemIdentifier && p.peek().val == "stale" {
p.next()
k = math.Float64frombits(value.StaleNaN)
} else {
p.errorf("expected number or 'stale' in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
vals = append(vals, sequenceValue{
value: k,
})
// If there are no offset repetitions specified, proceed with the next value.
if t := p.peek(); t.typ == itemSpace {
// This ensures there is a space between every value.
continue
} else if t.typ == itemEOF {
break
} else if t.typ != itemADD && t.typ != itemSUB {
p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
// Expand the repeated offsets into values.
sign = 1.0
if p.next().typ == itemSUB {
sign = -1.0
}
offset := sign * p.number(p.expect(itemNumber, ctx).val)
p.expect(itemTimes, ctx)
times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
for i := uint64(0); i < times; i++ {
k += offset
vals = append(vals, sequenceValue{
value: k,
})
}
// This is to ensure that there is a space between this expanding notation
// and the next number. This is especially required if the next number
// is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
}
return m, vals, nil
}
// next returns the next token.
func (p *parser) next() item {
if p.peekCount > 0 {
p.peekCount--
} else {
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
}
if p.token[p.peekCount].typ == itemError {
p.errorf("%s", p.token[p.peekCount].val)
}
return p.token[p.peekCount]
}
// peek returns but does not consume the next token.
func (p *parser) peek() item {
if p.peekCount > 0 {
return p.token[p.peekCount-1]
}
p.peekCount = 1
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
return p.token[0]
}
// backup backs the input stream up one token.
func (p *parser) backup() {
p.peekCount++
}
// errorf formats the error and terminates processing.
func (p *parser) errorf(format string, args ...interface{}) {
p.error(fmt.Errorf(format, args...))
}
// error terminates processing.
func (p *parser) error(err error) {
perr := &ParseErr{
Line: p.lex.lineNumber(),
Pos: p.lex.linePosition(),
Err: err,
}
if strings.Count(strings.TrimSpace(p.lex.input), "\n") == 0 {
perr.Line = 0
}
panic(perr)
}
// expect consumes the next token and guarantees it has the required type.
func (p *parser) expect(exp ItemType, context string) item {
token := p.next()
if token.typ != exp {
p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc())
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item |
var errUnexpected = fmt.Errorf("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) recover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
// Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
*errp = errUnexpected
} else {
*errp = e.(error)
}
}
p.lex.close()
}
// number parses a number.
func (p *parser) number(val string) float64 {
n, err := strconv.ParseInt(val, 0, 64)
f := float64(n)
if err != nil {
f, err = strconv.ParseFloat(val, 64)
}
if err != nil {
p.errorf("error parsing number: %s", err)
}
return f
}
// labels parses a list of labelnames.
//
// '(' <label_name>, ... ')'
//
func (p *parser) labels() []string {
const ctx = "grouping opts"
p.expect(itemLeftParen, ctx)
labels := []string{}
if p.peek().typ != itemRightParen {
for {
id := p.next()
if !isLabel(id.val) {
p.errorf("unexpected %s in %s, expected label", id.desc(), ctx)
}
labels = append(labels, id.val)
if p.peek().typ != itemComma {
break
}
p.next()
}
}
p.expect(itemRightParen, ctx)
return labels
}
// labelSet parses a set of label matchers
//
// '{' [ <labelname> '=' <match_string>, ... ] '}'
//
func (p *parser) labelSet() labels.Labels {
set := []labels.Label{}
for _, lm := range p.labelMatchers(itemEQL) {
set = append(set, labels.Label{Name: lm.Name, Value: lm.Value})
}
return labels.New(set...)
}
// labelMatchers parses a set of label matchers.
//
// '{' [ <labelname> <match_op> <match_string>, ... ] '}'
//
// if no 'operators' is given, then all operator type is valid
func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher {
const ctx = "label matching"
matchers := []*labels.Matcher{}
p.expect(itemLeftBrace, ctx)
// Check if no matchers are provided.
if p.peek().typ == itemRightBrace {
p.next()
return matchers
}
for {
label := p.expect(itemIdentifier, ctx)
op := p.next().typ
if !op.isOperator() {
p.errorf("expected label matching operator but got %s", op)
}
var validOp = false
for _, allowedOp := range operators {
if op == allowedOp {
validOp = true
}
}
if !validOp && len(operators) > 0 {
p.errorf("operator must be one of %q, is %q", operators, op)
}
val := p.unquoteString(p.expect(itemString, ctx).val)
// Map the item to the respective match type.
var matchType labels.MatchType
switch op {
case itemEQL:
matchType = labels.MatchEqual
case itemNEQ:
matchType = labels.MatchNotEqual
case itemEQLRegex:
matchType = labels.MatchRegexp
case itemNEQRegex:
matchType = labels.MatchNotRegexp
default:
p.errorf("item %q is not a metric match type", op)
}
m, err := labels.NewMatcher(matchType, label.val, val)
if err != nil {
p.error(err)
}
matchers = append(matchers, m)
if p.peek().typ == itemIdentifier {
p.errorf("missing comma before next identifier %q", p.peek().val)
}
// Terminate list if last matcher.
if p.peek().typ != itemComma {
break
}
p.next()
// Allow comma after each item in a multi-line listing.
if p.peek().typ == itemRightBrace {
break
}
}
p.expect(itemRightBrace, ctx)
return matchers
}
// metric parses a metric.
//
// <label_set>
// <metric_identifier> [<label_set>]
//
func (p *parser) metric() labels.Labels {
name := ""
var m labels.Labels
t := p.peek().typ
if t == itemIdentifier || t == itemMetricIdentifier {
name = p.next().val
t = p.peek().typ
}
if t != itemLeftBrace && name == "" {
p.errorf("missing metric name or metric selector")
}
if t == itemLeftBrace {
m = p.labelSet()
}
if name != "" {
m = append(m, labels.Label{Name: labels.MetricName, Value: name})
sort.Sort(m)
}
return m
}
// offset parses an offset modifier.
//
// offset <duration>
//
func (p *parser) offset() time.Duration {
const ctx = "offset"
p.next()
offi := p.expect(itemDuration, ctx)
offset, err := parseDuration(offi.val)
if err != nil {
p.error(err)
}
return offset
}
func (p *parser) unquoteString(s string) string {
unquoted, err := strutil.Unquote(s)
if err != nil {
p.errorf("error unquoting string %q: %s", s, err)
}
return unquoted
}
func parseDuration(ds string) (time.Duration, error) {
dur, err := model.ParseDuration(ds)
if err != nil {
return 0, err
}
if dur == 0 {
return 0, fmt.Errorf("duration must be greater than 0")
}
return time.Duration(dur), nil
}
| {
token := p.next()
if token.typ != exp1 && token.typ != exp2 {
p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc())
}
return token
} | identifier_body |
parse.go | // Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promquery
import (
"fmt"
"math"
"os"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/value"
"github.com/prometheus/prometheus/util/strutil"
)
type parser struct {
lex *lexer
token [3]item
peekCount int
}
// ParseErr wraps a parsing error with line and position context.
// If the parsing input was a single line, line will be 0 and omitted
// from the error string.
type ParseErr struct {
Line, Pos int
Err error
}
func (e *ParseErr) Error() string {
if e.Line == 0 {
return fmt.Sprintf("parse error at char %d: %s", e.Pos, e.Err)
}
return fmt.Sprintf("parse error at line %d, char %d: %s", e.Line, e.Pos, e.Err)
}
// ParseMetric parses the input into a metric
func ParseMetric(input string) (m labels.Labels, err error) {
p := newParser(input)
defer p.recover(&err)
m = p.metric()
if p.peek().typ != itemEOF {
p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:])
}
return m, nil
}
// newParser returns a new parser.
func newParser(input string) *parser {
p := &parser{
lex: lex(input),
}
return p
}
// sequenceValue is an omittable value in a sequence of time series values.
type sequenceValue struct {
value float64
omitted bool
}
func (v sequenceValue) String() string {
if v.omitted {
return "_"
}
return fmt.Sprintf("%f", v.value)
}
// parseSeriesDesc parses the description of a time series.
func parseSeriesDesc(input string) (labels.Labels, []sequenceValue, error) {
p := newParser(input)
p.lex.seriesDesc = true
return p.parseSeriesDesc()
}
// parseSeriesDesc parses a description of a time series into its metric and value sequence.
func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err error) {
defer p.recover(&err)
m = p.metric()
const ctx = "series values"
for {
for p.peek().typ == itemSpace {
p.next()
}
if p.peek().typ == itemEOF {
break
}
// Extract blanks.
if p.peek().typ == itemBlank {
p.next()
times := uint64(1)
if p.peek().typ == itemTimes {
p.next()
times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
}
for i := uint64(0); i < times; i++ {
vals = append(vals, sequenceValue{omitted: true})
}
// This is to ensure that there is a space between this and the next number.
// This is especially required if the next number is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
continue
}
// Extract values.
sign := 1.0
if t := p.peek().typ; t == itemSUB || t == itemADD {
if p.next().typ == itemSUB {
sign = -1
}
}
var k float64
if t := p.peek().typ; t == itemNumber {
k = sign * p.number(p.expect(itemNumber, ctx).val)
} else if t == itemIdentifier && p.peek().val == "stale" {
p.next()
k = math.Float64frombits(value.StaleNaN)
} else {
p.errorf("expected number or 'stale' in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
vals = append(vals, sequenceValue{
value: k,
})
// If there are no offset repetitions specified, proceed with the next value.
if t := p.peek(); t.typ == itemSpace {
// This ensures there is a space between every value.
continue
} else if t.typ == itemEOF {
break
} else if t.typ != itemADD && t.typ != itemSUB {
p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek())
}
// Expand the repeated offsets into values.
sign = 1.0
if p.next().typ == itemSUB {
sign = -1.0
}
offset := sign * p.number(p.expect(itemNumber, ctx).val)
p.expect(itemTimes, ctx)
times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64)
if err != nil {
p.errorf("invalid repetition in %s: %s", ctx, err)
}
for i := uint64(0); i < times; i++ {
k += offset
vals = append(vals, sequenceValue{
value: k,
})
}
// This is to ensure that there is a space between this expanding notation
// and the next number. This is especially required if the next number
// is negative.
if t := p.expectOneOf(itemSpace, itemEOF, ctx).typ; t == itemEOF {
break
}
}
return m, vals, nil
}
// next returns the next token.
func (p *parser) next() item {
if p.peekCount > 0 {
p.peekCount--
} else {
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
}
if p.token[p.peekCount].typ == itemError {
p.errorf("%s", p.token[p.peekCount].val)
}
return p.token[p.peekCount]
}
// peek returns but does not consume the next token.
func (p *parser) peek() item {
if p.peekCount > 0 {
return p.token[p.peekCount-1]
}
p.peekCount = 1
t := p.lex.nextItem()
// Skip comments.
for t.typ == itemComment {
t = p.lex.nextItem()
}
p.token[0] = t
return p.token[0]
}
// backup backs the input stream up one token.
func (p *parser) backup() {
p.peekCount++
}
// errorf formats the error and terminates processing.
func (p *parser) errorf(format string, args ...interface{}) {
p.error(fmt.Errorf(format, args...))
}
// error terminates processing.
func (p *parser) error(err error) {
perr := &ParseErr{
Line: p.lex.lineNumber(),
Pos: p.lex.linePosition(),
Err: err,
}
if strings.Count(strings.TrimSpace(p.lex.input), "\n") == 0 {
perr.Line = 0
}
panic(perr)
}
// expect consumes the next token and guarantees it has the required type.
func (p *parser) expect(exp ItemType, context string) item {
token := p.next()
if token.typ != exp {
p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc())
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item {
token := p.next()
if token.typ != exp1 && token.typ != exp2 {
p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc())
}
return token
}
var errUnexpected = fmt.Errorf("unexpected error")
// recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) | (errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
// Print the stack trace but do not inhibit the running application.
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf)
*errp = errUnexpected
} else {
*errp = e.(error)
}
}
p.lex.close()
}
// number parses a number.
func (p *parser) number(val string) float64 {
n, err := strconv.ParseInt(val, 0, 64)
f := float64(n)
if err != nil {
f, err = strconv.ParseFloat(val, 64)
}
if err != nil {
p.errorf("error parsing number: %s", err)
}
return f
}
// labels parses a list of labelnames.
//
// '(' <label_name>, ... ')'
//
func (p *parser) labels() []string {
const ctx = "grouping opts"
p.expect(itemLeftParen, ctx)
labels := []string{}
if p.peek().typ != itemRightParen {
for {
id := p.next()
if !isLabel(id.val) {
p.errorf("unexpected %s in %s, expected label", id.desc(), ctx)
}
labels = append(labels, id.val)
if p.peek().typ != itemComma {
break
}
p.next()
}
}
p.expect(itemRightParen, ctx)
return labels
}
// labelSet parses a set of label matchers
//
// '{' [ <labelname> '=' <match_string>, ... ] '}'
//
func (p *parser) labelSet() labels.Labels {
set := []labels.Label{}
for _, lm := range p.labelMatchers(itemEQL) {
set = append(set, labels.Label{Name: lm.Name, Value: lm.Value})
}
return labels.New(set...)
}
// labelMatchers parses a set of label matchers.
//
// '{' [ <labelname> <match_op> <match_string>, ... ] '}'
//
// if no 'operators' is given, then all operator type is valid
func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher {
const ctx = "label matching"
matchers := []*labels.Matcher{}
p.expect(itemLeftBrace, ctx)
// Check if no matchers are provided.
if p.peek().typ == itemRightBrace {
p.next()
return matchers
}
for {
label := p.expect(itemIdentifier, ctx)
op := p.next().typ
if !op.isOperator() {
p.errorf("expected label matching operator but got %s", op)
}
var validOp = false
for _, allowedOp := range operators {
if op == allowedOp {
validOp = true
}
}
if !validOp && len(operators) > 0 {
p.errorf("operator must be one of %q, is %q", operators, op)
}
val := p.unquoteString(p.expect(itemString, ctx).val)
// Map the item to the respective match type.
var matchType labels.MatchType
switch op {
case itemEQL:
matchType = labels.MatchEqual
case itemNEQ:
matchType = labels.MatchNotEqual
case itemEQLRegex:
matchType = labels.MatchRegexp
case itemNEQRegex:
matchType = labels.MatchNotRegexp
default:
p.errorf("item %q is not a metric match type", op)
}
m, err := labels.NewMatcher(matchType, label.val, val)
if err != nil {
p.error(err)
}
matchers = append(matchers, m)
if p.peek().typ == itemIdentifier {
p.errorf("missing comma before next identifier %q", p.peek().val)
}
// Terminate list if last matcher.
if p.peek().typ != itemComma {
break
}
p.next()
// Allow comma after each item in a multi-line listing.
if p.peek().typ == itemRightBrace {
break
}
}
p.expect(itemRightBrace, ctx)
return matchers
}
// metric parses a metric.
//
// <label_set>
// <metric_identifier> [<label_set>]
//
func (p *parser) metric() labels.Labels {
name := ""
var m labels.Labels
t := p.peek().typ
if t == itemIdentifier || t == itemMetricIdentifier {
name = p.next().val
t = p.peek().typ
}
if t != itemLeftBrace && name == "" {
p.errorf("missing metric name or metric selector")
}
if t == itemLeftBrace {
m = p.labelSet()
}
if name != "" {
m = append(m, labels.Label{Name: labels.MetricName, Value: name})
sort.Sort(m)
}
return m
}
// offset parses an offset modifier.
//
// offset <duration>
//
func (p *parser) offset() time.Duration {
const ctx = "offset"
p.next()
offi := p.expect(itemDuration, ctx)
offset, err := parseDuration(offi.val)
if err != nil {
p.error(err)
}
return offset
}
func (p *parser) unquoteString(s string) string {
unquoted, err := strutil.Unquote(s)
if err != nil {
p.errorf("error unquoting string %q: %s", s, err)
}
return unquoted
}
func parseDuration(ds string) (time.Duration, error) {
dur, err := model.ParseDuration(ds)
if err != nil {
return 0, err
}
if dur == 0 {
return 0, fmt.Errorf("duration must be greater than 0")
}
return time.Duration(dur), nil
}
| recover | identifier_name |
mock_cr50_agent.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self |
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait), ..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn handle_request(
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response
),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()), ..success },
)))
.expect("failed to send response");
} else {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look at the next (FIFO) response.
let next_response = task_mock_responses.pop_front().expect(&format!(
"Ran out of mock Pinweaver responses. Next request received is: {:?}",
request
));
handle_request(request, next_response, &he_secret).await;
}
}));
});
// Run the ServiceFs on the outgoing directory handle from the mock handles
fs.serve_connection(handles.outgoing_dir)?;
fs.collect::<()>().await;
Ok(())
}
| {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
} | identifier_body |
mock_cr50_agent.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
}
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait), ..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn handle_request(
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response
),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()), ..success },
)))
.expect("failed to send response");
} else |
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look at the next (FIFO) response.
let next_response = task_mock_responses.pop_front().expect(&format!(
"Ran out of mock Pinweaver responses. Next request received is: {:?}",
request
));
handle_request(request, next_response, &he_secret).await;
}
}));
});
// Run the ServiceFs on the outgoing directory handle from the mock handles
fs.serve_connection(handles.outgoing_dir)?;
fs.collect::<()>().await;
Ok(())
}
| {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
} | conditional_block |
mock_cr50_agent.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
}
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait), ..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn | (
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response
),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()), ..success },
)))
.expect("failed to send response");
} else {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look at the next (FIFO) response.
let next_response = task_mock_responses.pop_front().expect(&format!(
"Ran out of mock Pinweaver responses. Next request received is: {:?}",
request
));
handle_request(request, next_response, &he_secret).await;
}
}));
});
// Run the ServiceFs on the outgoing directory handle from the mock handles
fs.serve_connection(handles.outgoing_dir)?;
fs.collect::<()>().await;
Ok(())
}
| handle_request | identifier_name |
mock_cr50_agent.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Error,
fidl_fuchsia_tpm_cr50::{
InsertLeafResponse, PinWeaverRequest, PinWeaverRequestStream, TryAuthFailed,
TryAuthRateLimited, TryAuthResponse, TryAuthSuccess,
},
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_component_test::LocalComponentHandles,
futures::stream::{StreamExt, TryStreamExt},
parking_lot::Mutex,
std::collections::VecDeque,
std::sync::Arc,
};
/// Struct that builds a set of mock responses for CR50 agent requests.
/// Mock responses should be created (via add_* functions) in FIFO order and
/// will panic at test time if the order of requests does not match the order
/// of responses added to the builder.
/// Successful TryAuth responses take neither a `he_secret` nor a
/// `reset_secret` as those are generated via cprng by password_authenticator.
/// Instead, a successful TryAuth response will always return the `he_secret`
/// provided to the most recent InsertLeaf call, and will always return an
/// empty `reset_secret`.
/// TODO(fxb/89060, arkay): This logic could be improved upon to match the
/// `he_secret` to the credential label if necessary.
pub(crate) struct MockCr50AgentBuilder {
responses: VecDeque<MockResponse>,
}
/// Defines the type of a Hash as CR50 expects it.
pub(crate) type Hash = [u8; 32];
/// Defines an enum of known MockResponse types.
#[derive(Clone, Debug)]
pub(crate) enum MockResponse {
GetVersion { version: u8 },
ResetTree { root_hash: Hash },
InsertLeaf { response: InsertLeafResponse },
RemoveLeaf { root_hash: Hash },
TryAuth { response: TryAuthResponse },
}
#[allow(dead_code)]
impl MockCr50AgentBuilder {
/// Initializes a new MockCr50AgentBuilder.
pub(crate) fn new() -> Self {
MockCr50AgentBuilder { responses: VecDeque::new() }
}
/// Adds a GetVersion response.
pub(crate) fn add_get_version_response(mut self, version: u8) -> Self {
self.responses.push_back(MockResponse::GetVersion { version });
self
}
/// Adds a ResetTree response.
pub(crate) fn add_reset_tree_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::ResetTree { root_hash });
self
}
/// Adds an InsertLeaf response.
/// This function does not take an he_secret or reset_secret, see
/// [`MockCr50AgentBuilder`] for more information.
pub(crate) fn add_insert_leaf_response(
mut self,
root_hash: Hash,
mac: Hash,
cred_metadata: Vec<u8>,
) -> Self {
let response = InsertLeafResponse {
root_hash: Some(root_hash),
mac: Some(mac),
cred_metadata: Some(cred_metadata),
..InsertLeafResponse::EMPTY
};
self.responses.push_back(MockResponse::InsertLeaf { response });
self
}
/// Adds a RemoveLeaf response.
pub(crate) fn add_remove_leaf_response(mut self, root_hash: Hash) -> Self {
self.responses.push_back(MockResponse::RemoveLeaf { root_hash });
self
}
/// Adds a successful TryAuth response.
pub(crate) fn add_try_auth_success_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let success = TryAuthSuccess {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthSuccess::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Success(success) });
self
}
/// Adds a failed TryAuth response.
pub(crate) fn add_try_auth_failed_response(
mut self,
root_hash: Hash,
cred_metadata: Vec<u8>,
mac: Hash,
) -> Self {
let failed = TryAuthFailed {
root_hash: Some(root_hash),
cred_metadata: Some(cred_metadata),
mac: Some(mac),
..TryAuthFailed::EMPTY
};
self.responses
.push_back(MockResponse::TryAuth { response: TryAuthResponse::Failed(failed) });
self
}
/// Adds a rate limited TryAuth response.
pub(crate) fn add_try_auth_rate_limited_response(mut self, time_to_wait: i64) -> Self {
let ratelimited =
TryAuthRateLimited { time_to_wait: Some(time_to_wait), ..TryAuthRateLimited::EMPTY };
self.responses.push_back(MockResponse::TryAuth {
response: TryAuthResponse::RateLimited(ratelimited),
});
self
}
/// Consumes the builder and returns the VecDeque of responses for use with `mock()`.
pub(crate) fn build(self) -> VecDeque<MockResponse> {
self.responses
}
}
async fn handle_request(
request: PinWeaverRequest,
next_response: MockResponse,
he_secret: &Arc<Mutex<Vec<u8>>>,
) {
// Match the next response with the request, panicking if requests are out
// of the expected order.
match request {
PinWeaverRequest::GetVersion { responder: resp } => {
match next_response {
MockResponse::GetVersion { version } => {
resp.send(version).expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected GetVersion.",
next_response
),
};
}
PinWeaverRequest::ResetTree { bits_per_level: _, height: _, responder: resp } => {
match next_response {
MockResponse::ResetTree { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected ResetTree.",
next_response
),
};
}
PinWeaverRequest::InsertLeaf { params, responder: resp } => {
match next_response {
MockResponse::InsertLeaf { response } => {
// Store the he_secret received in the most recent
// InsertLeaf response to return in subsequent successful
// TryAuth responses.
let mut secret = he_secret.lock();
*secret = params.he_secret.expect("expected he_secret provided in params");
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response"); | ),
};
}
PinWeaverRequest::RemoveLeaf { params: _, responder: resp } => {
match next_response {
MockResponse::RemoveLeaf { root_hash } => {
resp.send(&mut std::result::Result::Ok(root_hash))
.expect("failed to send response");
}
_ => panic!(
"Next mock response type was {:?} but expected RemoveLeaf.",
next_response
),
};
}
PinWeaverRequest::TryAuth { params: _, responder: resp } => {
match next_response {
MockResponse::TryAuth { response } => {
if let TryAuthResponse::Success(success) = response {
// If it's a success, grab the last he_secret provided via InsertLeaf.
let secret = he_secret.lock();
resp.send(&mut std::result::Result::Ok(TryAuthResponse::Success(
TryAuthSuccess { he_secret: Some((*secret).clone()), ..success },
)))
.expect("failed to send response");
} else {
resp.send(&mut std::result::Result::Ok(response))
.expect("failed to send response");
}
}
_ => {
panic!("Next mock response type was {:?} but expected TryAuth.", next_response)
}
};
}
// GetLog and LogReplay are unimplemented as testing log replay is out
// of scope for pwauth-credmgr integration tests.
PinWeaverRequest::GetLog { root_hash: _, responder: _ } => {
unimplemented!();
}
PinWeaverRequest::LogReplay { params: _, responder: _ } => {
unimplemented!();
}
}
}
pub(crate) async fn mock(
mock_responses: VecDeque<MockResponse>,
handles: LocalComponentHandles,
) -> Result<(), Error> {
// Create a new ServiceFs to host FIDL protocols from
let mut fs = fserver::ServiceFs::new();
let mut tasks = vec![];
let last_he_secret: Arc<Mutex<Vec<u8>>> = Arc::new(Mutex::new(vec![0; 32]));
// Add the echo protocol to the ServiceFs
fs.dir("svc").add_fidl_service(move |mut stream: PinWeaverRequestStream| {
// Need to clone the mock responses again because this is a FnMut not a FnOnce
let mut task_mock_responses = mock_responses.clone();
let he_secret = Arc::clone(&last_he_secret);
tasks.push(fasync::Task::local(async move {
while let Some(request) =
stream.try_next().await.expect("failed to serve pinweaver service")
{
// Look at the next (FIFO) response.
let next_response = task_mock_responses.pop_front().expect(&format!(
"Ran out of mock Pinweaver responses. Next request received is: {:?}",
request
));
handle_request(request, next_response, &he_secret).await;
}
}));
});
// Run the ServiceFs on the outgoing directory handle from the mock handles
fs.serve_connection(handles.outgoing_dir)?;
fs.collect::<()>().await;
Ok(())
} | }
_ => panic!(
"Next mock response type was {:?} but expected InsertLeaf.",
next_response | random_line_split |
hash_aggregator.go | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"math"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/sql/colconv"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
)
// hashAggregatorState represents the state of the hash aggregator operator.
type hashAggregatorState int
const (
// hashAggregatorBuffering is the state in which the hashAggregator reads
// the batches from the input and buffers them up. Once the number of
// buffered tuples reaches maxBuffered or the input has been fully exhausted,
// the hashAggregator transitions to hashAggregatorAggregating state.
hashAggregatorBuffering hashAggregatorState = iota
// hashAggregatorAggregating is the state in which the hashAggregator is
// performing the aggregation on the buffered tuples. If the input has been
// fully exhausted and the buffer is empty, the hashAggregator transitions
// to hashAggregatorOutputting state.
hashAggregatorAggregating
// hashAggregatorOutputting is the state in which the hashAggregator is
// writing its aggregation results to the output buffer.
hashAggregatorOutputting
// hashAggregatorDone is the state in which the hashAggregator has finished
// writing to the output buffer.
hashAggregatorDone
)
// hashAggregator is an operator that performs aggregation based on the
// specified grouping columns. This operator performs aggregation in online
// fashion. It reads from the input one batch at a time, groups all tuples into
// the equality chains, probes heads of those chains against already existing
// buckets and creates new buckets for new groups. After the input is
// exhausted, the operator begins to write the result into an output buffer.
// The output row ordering of this operator is arbitrary.
// Note that throughout this file "buckets" and "groups" mean the same thing
// and are used interchangeably.
type hashAggregator struct {
colexecop.OneInputNode
allocator *colmem.Allocator
spec *execinfrapb.AggregatorSpec
aggHelper aggregatorHelper
inputTypes []*types.T
outputTypes []*types.T
inputArgsConverter *colconv.VecToDatumConverter
// maxBuffered determines the maximum number of tuples that are buffered up
// for aggregation at once.
maxBuffered int
bufferingState struct {
// tuples contains the tuples that we have buffered up for aggregation.
// Its length will not exceed maxBuffered.
tuples *colexecutils.AppendOnlyBufferedBatch
// pendingBatch stores the last read batch from the input that hasn't
// been fully processed yet.
pendingBatch coldata.Batch
// unprocessedIdx is the index of the first tuple in pendingBatch that
// hasn't been processed yet.
unprocessedIdx int
}
// numPreviouslyCreatedBuckets tracks the maximum number of buckets that
// have been created throughout the lifetime of this hashAggregator. This
// matters if the hashAggregator is reset - we reuse the same buckets on the
// next run.
// If non-zero, all buckets available to use are in
// buckets[len(buckets):numPreviouslyCreatedBuckets] range. Note that
// cap(buckets) might be higher than this number, but all buckets past
// numPreviouslyCreatedBuckets haven't been instantiated properly, so
// cap(buckets) should be ignored.
numPreviouslyCreatedBuckets int
// buckets contains all aggregation groups that we have so far. There is
// 1-to-1 mapping between buckets[i] and ht.Vals[i].
buckets []*aggBucket
// ht stores tuples that are "heads" of the corresponding aggregation
// groups ("head" here means the tuple that was first seen from the group).
ht *colexechash.HashTable
// state stores the current state of hashAggregator.
state hashAggregatorState
scratch struct {
// eqChains stores the chains of tuples from the current batch that are
// equal on the grouping columns (meaning that all tuples from the
// batch will be included into one of these chains). These chains must
// be set to zero length once the batch has been processed so that the
// memory could be reused.
eqChains [][]int
// intSlice and anotherIntSlice are simply scratch int slices that are
// reused for several purposes by the hashAggregator.
intSlice []int
anotherIntSlice []int
}
// inputTrackingState tracks all the input tuples which is needed in order
// to fallback to the external hash aggregator.
inputTrackingState struct {
tuples *colexecutils.SpillingQueue
zeroBatchEnqueued bool
}
// curOutputBucketIdx tracks the index in buckets to be flushed next when
// populating the output.
curOutputBucketIdx int
output coldata.Batch
aggFnsAlloc *colexecagg.AggregateFuncsAlloc
hashAlloc aggBucketAlloc
datumAlloc rowenc.DatumAlloc
toClose colexecop.Closers
}
var _ colexecop.ResettableOperator = &hashAggregator{}
var _ colexecop.BufferingInMemoryOperator = &hashAggregator{}
var _ colexecop.ClosableOperator = &hashAggregator{}
// hashAggregatorAllocSize determines the allocation size used by the hash
// aggregator's allocators. This number was chosen after running benchmarks of
// 'sum' aggregation on ints and decimals with varying group sizes (powers of 2
// from 1 to 4096).
const hashAggregatorAllocSize = 128
// NewHashAggregator creates a hash aggregator on the given grouping columns.
// The input specifications to this function are the same as that of the
// NewOrderedAggregator function.
// newSpillingQueueArgs - when non-nil - specifies the arguments to
// instantiate a SpillingQueue with which will be used to keep all of the
// input tuples in case the in-memory hash aggregator needs to fallback to
// the disk-backed operator. Pass in nil in order to not track all input
// tuples.
func NewHashAggregator(
args *colexecagg.NewAggregatorArgs, newSpillingQueueArgs *colexecutils.NewSpillingQueueArgs,
) (colexecop.ResettableOperator, error) {
aggFnsAlloc, inputArgsConverter, toClose, err := colexecagg.NewAggregateFuncsAlloc(
args, hashAggregatorAllocSize, true, /* isHashAgg */
)
// We want this number to be coldata.MaxBatchSize, but then we would lose
// some test coverage due to disabling of the randomization of the batch
// size, so we, instead, use 4 x coldata.BatchSize() (which ends up being
// coldata.MaxBatchSize in non-test environment).
maxBuffered := 4 * coldata.BatchSize()
if maxBuffered > coldata.MaxBatchSize {
// When randomizing coldata.BatchSize() in tests we might exceed
// coldata.MaxBatchSize, so we need to shrink it.
maxBuffered = coldata.MaxBatchSize
}
hashAgg := &hashAggregator{
OneInputNode: colexecop.NewOneInputNode(args.Input),
allocator: args.Allocator,
spec: args.Spec,
state: hashAggregatorBuffering,
inputTypes: args.InputTypes,
outputTypes: args.OutputTypes,
inputArgsConverter: inputArgsConverter,
maxBuffered: maxBuffered,
toClose: toClose,
aggFnsAlloc: aggFnsAlloc,
hashAlloc: aggBucketAlloc{allocator: args.Allocator},
}
hashAgg.bufferingState.tuples = colexecutils.NewAppendOnlyBufferedBatch(args.Allocator, args.InputTypes, nil /* colsToStore */)
hashAgg.datumAlloc.AllocSize = hashAggregatorAllocSize
hashAgg.aggHelper = newAggregatorHelper(args, &hashAgg.datumAlloc, true /* isHashAgg */, hashAgg.maxBuffered)
if newSpillingQueueArgs != nil {
hashAgg.inputTrackingState.tuples = colexecutils.NewSpillingQueue(newSpillingQueueArgs)
}
return hashAgg, err
}
func (op *hashAggregator) Init() {
op.Input.Init()
// These numbers were chosen after running the micro-benchmarks and relevant
// TPCH queries using tpchvec/bench.
const hashTableLoadFactor = 0.1
const hashTableNumBuckets = 256
op.ht = colexechash.NewHashTable(
op.allocator,
hashTableLoadFactor,
hashTableNumBuckets,
op.inputTypes,
op.spec.GroupCols,
true, /* allowNullEquality */
colexechash.HashTableDistinctBuildMode,
colexechash.HashTableDefaultProbeMode,
)
}
func (op *hashAggregator) Next(ctx context.Context) coldata.Batch {
for {
switch op.state {
case hashAggregatorBuffering:
if op.bufferingState.pendingBatch != nil && op.bufferingState.unprocessedIdx < op.bufferingState.pendingBatch.Length() {
op.allocator.PerformOperation(op.bufferingState.tuples.ColVecs(), func() {
op.bufferingState.tuples.AppendTuples(
op.bufferingState.pendingBatch, op.bufferingState.unprocessedIdx, op.bufferingState.pendingBatch.Length(),
)
})
}
op.bufferingState.pendingBatch, op.bufferingState.unprocessedIdx = op.Input.Next(ctx), 0
n := op.bufferingState.pendingBatch.Length()
if op.inputTrackingState.tuples != nil {
op.inputTrackingState.tuples.Enqueue(ctx, op.bufferingState.pendingBatch)
op.inputTrackingState.zeroBatchEnqueued = n == 0
}
if n == 0 {
// This is the last input batch.
if op.bufferingState.tuples.Length() == 0 {
// There are currently no buffered tuples to perform the
// aggregation on.
if len(op.buckets) == 0 {
// We don't have any buckets which means that there were
// no input tuples whatsoever, so we can transition to
// finished state right away.
op.state = hashAggregatorDone
} else {
// There are some buckets, so we proceed to the
// outputting state.
op.state = hashAggregatorOutputting
}
} else {
// There are some buffered tuples on which we need to run
// the aggregation.
op.state = hashAggregatorAggregating
}
continue
}
toBuffer := n
if op.bufferingState.tuples.Length()+toBuffer > op.maxBuffered {
toBuffer = op.maxBuffered - op.bufferingState.tuples.Length()
}
if toBuffer > 0 {
op.allocator.PerformOperation(op.bufferingState.tuples.ColVecs(), func() {
op.bufferingState.tuples.AppendTuples(op.bufferingState.pendingBatch, 0 /* startIdx */, toBuffer)
})
op.bufferingState.unprocessedIdx = toBuffer
}
if op.bufferingState.tuples.Length() == op.maxBuffered {
op.state = hashAggregatorAggregating
continue
}
case hashAggregatorAggregating:
op.inputArgsConverter.ConvertBatch(op.bufferingState.tuples)
op.onlineAgg(ctx, op.bufferingState.tuples)
if op.bufferingState.pendingBatch.Length() == 0 {
if len(op.buckets) == 0 {
op.state = hashAggregatorDone
} else {
op.state = hashAggregatorOutputting
}
continue
}
op.bufferingState.tuples.ResetInternalBatch()
op.state = hashAggregatorBuffering
case hashAggregatorOutputting:
// Note that ResetMaybeReallocate truncates the requested capacity
// at coldata.BatchSize(), so we can just try asking for
// len(op.buckets) capacity. Note that in hashAggregatorOutputting
// state we always have at least 1 bucket.
//
// For now, we don't enforce any footprint-based memory limit.
// TODO(yuzefovich): refactor this.
const maxBatchMemSize = math.MaxInt64
op.output, _ = op.allocator.ResetMaybeReallocate(
op.outputTypes, op.output, len(op.buckets), maxBatchMemSize,
)
curOutputIdx := 0
op.allocator.PerformOperation(op.output.ColVecs(), func() {
for curOutputIdx < op.output.Capacity() && op.curOutputBucketIdx < len(op.buckets) {
bucket := op.buckets[op.curOutputBucketIdx]
for fnIdx, fn := range bucket.fns {
fn.SetOutput(op.output.ColVec(fnIdx))
fn.Flush(curOutputIdx)
}
curOutputIdx++
op.curOutputBucketIdx++
}
})
if op.curOutputBucketIdx >= len(op.buckets) {
op.state = hashAggregatorDone
}
op.output.SetLength(curOutputIdx)
return op.output
case hashAggregatorDone:
return coldata.ZeroBatch
default:
colexecerror.InternalError(errors.AssertionFailedf("hash aggregator in unhandled state"))
// This code is unreachable, but the compiler cannot infer that.
return nil
}
}
}
func (op *hashAggregator) setupScratchSlices(numBuffered int) {
if len(op.scratch.eqChains) < numBuffered {
op.scratch.eqChains = make([][]int, numBuffered)
op.scratch.intSlice = make([]int, numBuffered)
op.scratch.anotherIntSlice = make([]int, numBuffered)
}
}
// onlineAgg groups all tuples in b into equality chains, then probes the
// heads of those chains against already existing groups, aggregates matched
// chains into the corresponding buckets and creates new buckets for new
// aggregation groups.
//
// Let's go through an example of how this function works: our input stream
// contains the following tuples:
// {-3}, {-3}, {-2}, {-1}, {-4}, {-1}, {-1}, {-4}.
// (Note that negative values are chosen in order to visually distinguish them
// from the IDs that we'll be working with below.)
// We will use coldata.BatchSize() == 4 and let's assume that we will use a
// simple hash function h(i) = i % 2 with two buckets in the hash table.
//
// I. we get a batch [-3, -3, -2, -1].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 1, 1, 0, 1]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [3, 1] (length of first == # of hash buckets)
// ProbeScratch.next = [reserved, 2, 4, 0, 0]
// (Note that we have a hash collision in the bucket with hash 1.)
// c) find "equality" buckets (populate HeadID):
// ProbeScratch.HeadID = [1, 1, 3, 4]
// (This means that tuples at position 0 and 1 are the same, and the
// tuple at position HeadID-1 is the head of the equality chain.)
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 1]
// eqChains[1] = [2]
// eqChains[2] = [3]
// The special "heads of equality chains" selection vector is [0, 2, 3].
// 3. we don't have any existing buckets yet, so this step is a noop.
// 4. each of the three equality chains contains tuples from a separate
// aggregation group, so we perform aggregation on each of them in turn.
// After we do so, we will have three buckets and the hash table will contain
// three tuples (with buckets and tuples corresponding to each other):
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>]
// ht.Vals = [-3, -2, -1].
// We have fully processed the first batch.
//
// II. we get a batch [-4, -1, -1, -4].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 0, 1, 1, 0]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [1, 2]
// ProbeScratch.next = [reserved, 4, 3, 0, 0]
// c) find "equality" buckets:
// ProbeScratch.HeadID = [1, 2, 2, 1]
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 3]
// eqChains[1] = [1, 2]
// The special "heads of equality chains" selection vector is [0, 1].
// 3. probe that special "heads" selection vector against the tuples already
// present in the hash table:
// ProbeScratch.HeadID = [0, 3]
// Value 0 indicates that the first equality chain doesn't have an
// existing bucket, but the second chain does and the ID of its bucket is
// HeadID-1 = 2. We aggregate the second equality chain into that bucket.
// 4. the first equality chain contains tuples from a new aggregation group,
// so we create a new bucket for it and perform the aggregation.
// After we do so, we will have four buckets and the hash table will contain
// four tuples:
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>, <bucket for -4>]
// ht.Vals = [-3, -2, -1, -4].
// We have fully processed the second batch.
//
// We have processed the input fully, so we're ready to emit the output.
//
// NOTE: b *must* be non-zero length batch.
func (op *hashAggregator) onlineAgg(ctx context.Context, b coldata.Batch) {
op.setupScratchSlices(b.Length())
inputVecs := b.ColVecs()
// Step 1: find "equality" buckets: we compute the hash buckets for all
// tuples, build 'next' chains between them, and then find equality buckets
// for the tuples.
op.ht.ComputeHashAndBuildChains(ctx, b)
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.ProbeScratch.First, op.ht.ProbeScratch.Next, op.ht.CheckProbeForDistinct,
)
// Step 2: now that we have op.ht.ProbeScratch.HeadID populated we can
// populate the equality chains.
eqChainsCount, eqChainsHeadsSel := op.populateEqChains(b)
b.SetLength(eqChainsCount)
// Make a copy of the selection vector that contains heads of the
// corresponding equality chains because the underlying memory will be
// modified below.
eqChainsHeads := op.scratch.intSlice[:eqChainsCount]
copy(eqChainsHeads, eqChainsHeadsSel)
// Step 3: if we have any existing buckets, we need to probe the heads of
// the equality chains (which the selection vector on b currently contains)
// against the heads of the existing groups.
if len(op.buckets) > 0 {
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.BuildScratch.First, op.ht.BuildScratch.Next, op.ht.CheckBuildForAggregation,
)
for eqChainsSlot, HeadID := range op.ht.ProbeScratch.HeadID[:eqChainsCount] {
if HeadID != 0 {
// Tuples in this equality chain belong to an already existing
// group.
eqChain := op.scratch.eqChains[eqChainsSlot]
bucket := op.buckets[HeadID-1]
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
// We have fully processed this equality chain, so we need to
// reset its length.
op.scratch.eqChains[eqChainsSlot] = op.scratch.eqChains[eqChainsSlot][:0]
}
}
}
// Step 4: now we go over all equality chains and check whether there are
// any that haven't been processed yet (they will be of non-zero length).
// If we find any, we'll create a new bucket for each.
newGroupsHeadsSel := op.scratch.anotherIntSlice[:0]
newGroupCount := 0
for eqChainSlot, eqChain := range op.scratch.eqChains[:eqChainsCount] {
if len(eqChain) > 0 {
// Tuples in this equality chain belong to a new aggregation group,
// so we'll use a new bucket and make sure that the head of this
// equality chain is appended to the hash table in the
// corresponding position.
var bucket *aggBucket
if nextBucketIdx := len(op.buckets); op.numPreviouslyCreatedBuckets > nextBucketIdx {
// We still have a bucket created on the previous run of the
// hash aggregator. Increase the length of op.buckets, using
// previously-allocated capacity, and then reset the bucket for
// reuse.
op.buckets = op.buckets[:nextBucketIdx+1]
bucket = op.buckets[nextBucketIdx]
bucket.reset()
} else {
// Need to allocate a new bucket.
bucket = op.hashAlloc.newAggBucket()
op.buckets = append(op.buckets, bucket)
// We know that all selected tuples belong to the same single
// group, so we can pass 'nil' for the 'groups' argument.
bucket.init(
op.aggFnsAlloc.MakeAggregateFuncs(), op.aggHelper.makeSeenMaps(), nil, /* groups */
)
}
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
newGroupsHeadsSel = append(newGroupsHeadsSel, eqChainsHeads[eqChainSlot])
// We need to compact the hash buffer according to the new groups
// head tuples selection vector we're building.
op.ht.ProbeScratch.HashBuffer[newGroupCount] = op.ht.ProbeScratch.HashBuffer[eqChainSlot]
newGroupCount++
op.scratch.eqChains[eqChainSlot] = op.scratch.eqChains[eqChainSlot][:0]
}
}
if newGroupCount > 0 {
// We have created new buckets, so we need to append the heads of those
// buckets to the hash table.
copy(b.Selection(), newGroupsHeadsSel)
b.SetLength(newGroupCount)
op.ht.AppendAllDistinct(ctx, b)
}
}
func (op *hashAggregator) ExportBuffered(ctx context.Context, _ colexecop.Operator) coldata.Batch {
if !op.inputTrackingState.zeroBatchEnqueued {
// Per the contract of the spilling queue, we need to append a
// zero-length batch.
op.inputTrackingState.tuples.Enqueue(ctx, coldata.ZeroBatch)
op.inputTrackingState.zeroBatchEnqueued = true
}
batch, err := op.inputTrackingState.tuples.Dequeue(ctx)
if err != nil {
colexecerror.InternalError(err)
}
return batch
}
func (op *hashAggregator) Reset(ctx context.Context) {
if r, ok := op.Input.(colexecop.Resetter); ok {
r.Reset(ctx)
}
op.bufferingState.tuples.ResetInternalBatch()
op.bufferingState.pendingBatch = nil
op.bufferingState.unprocessedIdx = 0
if op.numPreviouslyCreatedBuckets < len(op.buckets) {
op.numPreviouslyCreatedBuckets = len(op.buckets)
}
// Set up buckets for reuse.
op.buckets = op.buckets[:0]
op.ht.Reset(ctx)
if op.inputTrackingState.tuples != nil {
if err := op.inputTrackingState.tuples.Close(ctx); err != nil {
colexecerror.InternalError(err)
}
op.inputTrackingState.zeroBatchEnqueued = false
}
op.curOutputBucketIdx = 0
op.state = hashAggregatorBuffering
}
func (op *hashAggregator) Close(ctx context.Context) error | {
var retErr error
if op.inputTrackingState.tuples != nil {
retErr = op.inputTrackingState.tuples.Close(ctx)
}
if err := op.toClose.Close(ctx); err != nil {
retErr = err
}
return retErr
} | identifier_body |
|
hash_aggregator.go | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"math"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/sql/colconv"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
)
// hashAggregatorState represents the state of the hash aggregator operator.
type hashAggregatorState int
const (
// hashAggregatorBuffering is the state in which the hashAggregator reads
// the batches from the input and buffers them up. Once the number of
// buffered tuples reaches maxBuffered or the input has been fully exhausted,
// the hashAggregator transitions to hashAggregatorAggregating state.
hashAggregatorBuffering hashAggregatorState = iota
// hashAggregatorAggregating is the state in which the hashAggregator is
// performing the aggregation on the buffered tuples. If the input has been
// fully exhausted and the buffer is empty, the hashAggregator transitions
// to hashAggregatorOutputting state.
hashAggregatorAggregating
// hashAggregatorOutputting is the state in which the hashAggregator is
// writing its aggregation results to the output buffer.
hashAggregatorOutputting
// hashAggregatorDone is the state in which the hashAggregator has finished
// writing to the output buffer.
hashAggregatorDone
)
// hashAggregator is an operator that performs aggregation based on the
// specified grouping columns. This operator performs aggregation in online
// fashion. It reads from the input one batch at a time, groups all tuples into
// the equality chains, probes heads of those chains against already existing
// buckets and creates new buckets for new groups. After the input is
// exhausted, the operator begins to write the result into an output buffer.
// The output row ordering of this operator is arbitrary.
// Note that throughout this file "buckets" and "groups" mean the same thing
// and are used interchangeably.
type hashAggregator struct {
colexecop.OneInputNode
allocator *colmem.Allocator
spec *execinfrapb.AggregatorSpec
aggHelper aggregatorHelper
inputTypes []*types.T
outputTypes []*types.T
inputArgsConverter *colconv.VecToDatumConverter
// maxBuffered determines the maximum number of tuples that are buffered up
// for aggregation at once.
maxBuffered int
bufferingState struct {
// tuples contains the tuples that we have buffered up for aggregation.
// Its length will not exceed maxBuffered.
tuples *colexecutils.AppendOnlyBufferedBatch
// pendingBatch stores the last read batch from the input that hasn't
// been fully processed yet.
pendingBatch coldata.Batch
// unprocessedIdx is the index of the first tuple in pendingBatch that
// hasn't been processed yet.
unprocessedIdx int
}
// numPreviouslyCreatedBuckets tracks the maximum number of buckets that
// have been created throughout the lifetime of this hashAggregator. This
// matters if the hashAggregator is reset - we reuse the same buckets on the
// next run.
// If non-zero, all buckets available to use are in
// buckets[len(buckets):numPreviouslyCreatedBuckets] range. Note that
// cap(buckets) might be higher than this number, but all buckets past
// numPreviouslyCreatedBuckets haven't been instantiated properly, so
// cap(buckets) should be ignored.
numPreviouslyCreatedBuckets int
// buckets contains all aggregation groups that we have so far. There is
// 1-to-1 mapping between buckets[i] and ht.Vals[i].
buckets []*aggBucket
// ht stores tuples that are "heads" of the corresponding aggregation
// groups ("head" here means the tuple that was first seen from the group).
ht *colexechash.HashTable
// state stores the current state of hashAggregator.
state hashAggregatorState
scratch struct {
// eqChains stores the chains of tuples from the current batch that are
// equal on the grouping columns (meaning that all tuples from the
// batch will be included into one of these chains). These chains must
// be set to zero length once the batch has been processed so that the
// memory could be reused.
eqChains [][]int
// intSlice and anotherIntSlice are simply scratch int slices that are
// reused for several purposes by the hashAggregator.
intSlice []int
anotherIntSlice []int
}
// inputTrackingState tracks all the input tuples which is needed in order
// to fallback to the external hash aggregator.
inputTrackingState struct {
tuples *colexecutils.SpillingQueue
zeroBatchEnqueued bool
}
// curOutputBucketIdx tracks the index in buckets to be flushed next when
// populating the output.
curOutputBucketIdx int
output coldata.Batch
aggFnsAlloc *colexecagg.AggregateFuncsAlloc
hashAlloc aggBucketAlloc
datumAlloc rowenc.DatumAlloc
toClose colexecop.Closers
}
var _ colexecop.ResettableOperator = &hashAggregator{}
var _ colexecop.BufferingInMemoryOperator = &hashAggregator{}
var _ colexecop.ClosableOperator = &hashAggregator{}
// hashAggregatorAllocSize determines the allocation size used by the hash
// aggregator's allocators. This number was chosen after running benchmarks of
// 'sum' aggregation on ints and decimals with varying group sizes (powers of 2
// from 1 to 4096).
const hashAggregatorAllocSize = 128
// NewHashAggregator creates a hash aggregator on the given grouping columns.
// The input specifications to this function are the same as that of the
// NewOrderedAggregator function.
// newSpillingQueueArgs - when non-nil - specifies the arguments to
// instantiate a SpillingQueue with which will be used to keep all of the
// input tuples in case the in-memory hash aggregator needs to fallback to
// the disk-backed operator. Pass in nil in order to not track all input
// tuples.
func NewHashAggregator(
args *colexecagg.NewAggregatorArgs, newSpillingQueueArgs *colexecutils.NewSpillingQueueArgs,
) (colexecop.ResettableOperator, error) {
aggFnsAlloc, inputArgsConverter, toClose, err := colexecagg.NewAggregateFuncsAlloc(
args, hashAggregatorAllocSize, true, /* isHashAgg */
)
// We want this number to be coldata.MaxBatchSize, but then we would lose
// some test coverage due to disabling of the randomization of the batch
// size, so we, instead, use 4 x coldata.BatchSize() (which ends up being
// coldata.MaxBatchSize in non-test environment).
maxBuffered := 4 * coldata.BatchSize()
if maxBuffered > coldata.MaxBatchSize {
// When randomizing coldata.BatchSize() in tests we might exceed
// coldata.MaxBatchSize, so we need to shrink it.
maxBuffered = coldata.MaxBatchSize
}
hashAgg := &hashAggregator{
OneInputNode: colexecop.NewOneInputNode(args.Input),
allocator: args.Allocator,
spec: args.Spec,
state: hashAggregatorBuffering,
inputTypes: args.InputTypes,
outputTypes: args.OutputTypes,
inputArgsConverter: inputArgsConverter,
maxBuffered: maxBuffered,
toClose: toClose,
aggFnsAlloc: aggFnsAlloc,
hashAlloc: aggBucketAlloc{allocator: args.Allocator},
}
hashAgg.bufferingState.tuples = colexecutils.NewAppendOnlyBufferedBatch(args.Allocator, args.InputTypes, nil /* colsToStore */)
hashAgg.datumAlloc.AllocSize = hashAggregatorAllocSize
hashAgg.aggHelper = newAggregatorHelper(args, &hashAgg.datumAlloc, true /* isHashAgg */, hashAgg.maxBuffered)
if newSpillingQueueArgs != nil {
hashAgg.inputTrackingState.tuples = colexecutils.NewSpillingQueue(newSpillingQueueArgs)
}
return hashAgg, err
}
func (op *hashAggregator) Init() {
op.Input.Init()
// These numbers were chosen after running the micro-benchmarks and relevant
// TPCH queries using tpchvec/bench.
const hashTableLoadFactor = 0.1
const hashTableNumBuckets = 256
op.ht = colexechash.NewHashTable(
op.allocator,
hashTableLoadFactor,
hashTableNumBuckets,
op.inputTypes,
op.spec.GroupCols,
true, /* allowNullEquality */
colexechash.HashTableDistinctBuildMode,
colexechash.HashTableDefaultProbeMode,
)
}
func (op *hashAggregator) Next(ctx context.Context) coldata.Batch {
for {
switch op.state {
case hashAggregatorBuffering:
if op.bufferingState.pendingBatch != nil && op.bufferingState.unprocessedIdx < op.bufferingState.pendingBatch.Length() {
op.allocator.PerformOperation(op.bufferingState.tuples.ColVecs(), func() {
op.bufferingState.tuples.AppendTuples(
op.bufferingState.pendingBatch, op.bufferingState.unprocessedIdx, op.bufferingState.pendingBatch.Length(),
)
})
}
op.bufferingState.pendingBatch, op.bufferingState.unprocessedIdx = op.Input.Next(ctx), 0
n := op.bufferingState.pendingBatch.Length()
if op.inputTrackingState.tuples != nil {
op.inputTrackingState.tuples.Enqueue(ctx, op.bufferingState.pendingBatch)
op.inputTrackingState.zeroBatchEnqueued = n == 0
}
if n == 0 {
// This is the last input batch.
if op.bufferingState.tuples.Length() == 0 {
// There are currently no buffered tuples to perform the
// aggregation on.
if len(op.buckets) == 0 {
// We don't have any buckets which means that there were
// no input tuples whatsoever, so we can transition to
// finished state right away.
op.state = hashAggregatorDone
} else {
// There are some buckets, so we proceed to the
// outputting state.
op.state = hashAggregatorOutputting
}
} else {
// There are some buffered tuples on which we need to run
// the aggregation.
op.state = hashAggregatorAggregating
}
continue
}
toBuffer := n
if op.bufferingState.tuples.Length()+toBuffer > op.maxBuffered {
toBuffer = op.maxBuffered - op.bufferingState.tuples.Length()
}
if toBuffer > 0 {
op.allocator.PerformOperation(op.bufferingState.tuples.ColVecs(), func() {
op.bufferingState.tuples.AppendTuples(op.bufferingState.pendingBatch, 0 /* startIdx */, toBuffer)
})
op.bufferingState.unprocessedIdx = toBuffer
}
if op.bufferingState.tuples.Length() == op.maxBuffered {
op.state = hashAggregatorAggregating
continue
}
case hashAggregatorAggregating:
op.inputArgsConverter.ConvertBatch(op.bufferingState.tuples)
op.onlineAgg(ctx, op.bufferingState.tuples)
if op.bufferingState.pendingBatch.Length() == 0 {
if len(op.buckets) == 0 {
op.state = hashAggregatorDone
} else {
op.state = hashAggregatorOutputting
}
continue
}
op.bufferingState.tuples.ResetInternalBatch()
op.state = hashAggregatorBuffering
case hashAggregatorOutputting:
// Note that ResetMaybeReallocate truncates the requested capacity
// at coldata.BatchSize(), so we can just try asking for
// len(op.buckets) capacity. Note that in hashAggregatorOutputting
// state we always have at least 1 bucket.
//
// For now, we don't enforce any footprint-based memory limit.
// TODO(yuzefovich): refactor this.
const maxBatchMemSize = math.MaxInt64
op.output, _ = op.allocator.ResetMaybeReallocate(
op.outputTypes, op.output, len(op.buckets), maxBatchMemSize,
)
curOutputIdx := 0
op.allocator.PerformOperation(op.output.ColVecs(), func() {
for curOutputIdx < op.output.Capacity() && op.curOutputBucketIdx < len(op.buckets) {
bucket := op.buckets[op.curOutputBucketIdx]
for fnIdx, fn := range bucket.fns {
fn.SetOutput(op.output.ColVec(fnIdx))
fn.Flush(curOutputIdx)
}
curOutputIdx++
op.curOutputBucketIdx++
}
})
if op.curOutputBucketIdx >= len(op.buckets) |
op.output.SetLength(curOutputIdx)
return op.output
case hashAggregatorDone:
return coldata.ZeroBatch
default:
colexecerror.InternalError(errors.AssertionFailedf("hash aggregator in unhandled state"))
// This code is unreachable, but the compiler cannot infer that.
return nil
}
}
}
func (op *hashAggregator) setupScratchSlices(numBuffered int) {
if len(op.scratch.eqChains) < numBuffered {
op.scratch.eqChains = make([][]int, numBuffered)
op.scratch.intSlice = make([]int, numBuffered)
op.scratch.anotherIntSlice = make([]int, numBuffered)
}
}
// onlineAgg groups all tuples in b into equality chains, then probes the
// heads of those chains against already existing groups, aggregates matched
// chains into the corresponding buckets and creates new buckets for new
// aggregation groups.
//
// Let's go through an example of how this function works: our input stream
// contains the following tuples:
// {-3}, {-3}, {-2}, {-1}, {-4}, {-1}, {-1}, {-4}.
// (Note that negative values are chosen in order to visually distinguish them
// from the IDs that we'll be working with below.)
// We will use coldata.BatchSize() == 4 and let's assume that we will use a
// simple hash function h(i) = i % 2 with two buckets in the hash table.
//
// I. we get a batch [-3, -3, -2, -1].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 1, 1, 0, 1]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [3, 1] (length of first == # of hash buckets)
// ProbeScratch.next = [reserved, 2, 4, 0, 0]
// (Note that we have a hash collision in the bucket with hash 1.)
// c) find "equality" buckets (populate HeadID):
// ProbeScratch.HeadID = [1, 1, 3, 4]
// (This means that tuples at position 0 and 1 are the same, and the
// tuple at position HeadID-1 is the head of the equality chain.)
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 1]
// eqChains[1] = [2]
// eqChains[2] = [3]
// The special "heads of equality chains" selection vector is [0, 2, 3].
// 3. we don't have any existing buckets yet, so this step is a noop.
// 4. each of the three equality chains contains tuples from a separate
// aggregation group, so we perform aggregation on each of them in turn.
// After we do so, we will have three buckets and the hash table will contain
// three tuples (with buckets and tuples corresponding to each other):
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>]
// ht.Vals = [-3, -2, -1].
// We have fully processed the first batch.
//
// II. we get a batch [-4, -1, -1, -4].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 0, 1, 1, 0]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [1, 2]
// ProbeScratch.next = [reserved, 4, 3, 0, 0]
// c) find "equality" buckets:
// ProbeScratch.HeadID = [1, 2, 2, 1]
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 3]
// eqChains[1] = [1, 2]
// The special "heads of equality chains" selection vector is [0, 1].
// 3. probe that special "heads" selection vector against the tuples already
// present in the hash table:
// ProbeScratch.HeadID = [0, 3]
// Value 0 indicates that the first equality chain doesn't have an
// existing bucket, but the second chain does and the ID of its bucket is
// HeadID-1 = 2. We aggregate the second equality chain into that bucket.
// 4. the first equality chain contains tuples from a new aggregation group,
// so we create a new bucket for it and perform the aggregation.
// After we do so, we will have four buckets and the hash table will contain
// four tuples:
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>, <bucket for -4>]
// ht.Vals = [-3, -2, -1, -4].
// We have fully processed the second batch.
//
// We have processed the input fully, so we're ready to emit the output.
//
// NOTE: b *must* be non-zero length batch.
func (op *hashAggregator) onlineAgg(ctx context.Context, b coldata.Batch) {
op.setupScratchSlices(b.Length())
inputVecs := b.ColVecs()
// Step 1: find "equality" buckets: we compute the hash buckets for all
// tuples, build 'next' chains between them, and then find equality buckets
// for the tuples.
op.ht.ComputeHashAndBuildChains(ctx, b)
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.ProbeScratch.First, op.ht.ProbeScratch.Next, op.ht.CheckProbeForDistinct,
)
// Step 2: now that we have op.ht.ProbeScratch.HeadID populated we can
// populate the equality chains.
eqChainsCount, eqChainsHeadsSel := op.populateEqChains(b)
b.SetLength(eqChainsCount)
// Make a copy of the selection vector that contains heads of the
// corresponding equality chains because the underlying memory will be
// modified below.
eqChainsHeads := op.scratch.intSlice[:eqChainsCount]
copy(eqChainsHeads, eqChainsHeadsSel)
// Step 3: if we have any existing buckets, we need to probe the heads of
// the equality chains (which the selection vector on b currently contains)
// against the heads of the existing groups.
if len(op.buckets) > 0 {
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.BuildScratch.First, op.ht.BuildScratch.Next, op.ht.CheckBuildForAggregation,
)
for eqChainsSlot, HeadID := range op.ht.ProbeScratch.HeadID[:eqChainsCount] {
if HeadID != 0 {
// Tuples in this equality chain belong to an already existing
// group.
eqChain := op.scratch.eqChains[eqChainsSlot]
bucket := op.buckets[HeadID-1]
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
// We have fully processed this equality chain, so we need to
// reset its length.
op.scratch.eqChains[eqChainsSlot] = op.scratch.eqChains[eqChainsSlot][:0]
}
}
}
// Step 4: now we go over all equality chains and check whether there are
// any that haven't been processed yet (they will be of non-zero length).
// If we find any, we'll create a new bucket for each.
newGroupsHeadsSel := op.scratch.anotherIntSlice[:0]
newGroupCount := 0
for eqChainSlot, eqChain := range op.scratch.eqChains[:eqChainsCount] {
if len(eqChain) > 0 {
// Tuples in this equality chain belong to a new aggregation group,
// so we'll use a new bucket and make sure that the head of this
// equality chain is appended to the hash table in the
// corresponding position.
var bucket *aggBucket
if nextBucketIdx := len(op.buckets); op.numPreviouslyCreatedBuckets > nextBucketIdx {
// We still have a bucket created on the previous run of the
// hash aggregator. Increase the length of op.buckets, using
// previously-allocated capacity, and then reset the bucket for
// reuse.
op.buckets = op.buckets[:nextBucketIdx+1]
bucket = op.buckets[nextBucketIdx]
bucket.reset()
} else {
// Need to allocate a new bucket.
bucket = op.hashAlloc.newAggBucket()
op.buckets = append(op.buckets, bucket)
// We know that all selected tuples belong to the same single
// group, so we can pass 'nil' for the 'groups' argument.
bucket.init(
op.aggFnsAlloc.MakeAggregateFuncs(), op.aggHelper.makeSeenMaps(), nil, /* groups */
)
}
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
newGroupsHeadsSel = append(newGroupsHeadsSel, eqChainsHeads[eqChainSlot])
// We need to compact the hash buffer according to the new groups
// head tuples selection vector we're building.
op.ht.ProbeScratch.HashBuffer[newGroupCount] = op.ht.ProbeScratch.HashBuffer[eqChainSlot]
newGroupCount++
op.scratch.eqChains[eqChainSlot] = op.scratch.eqChains[eqChainSlot][:0]
}
}
if newGroupCount > 0 {
// We have created new buckets, so we need to append the heads of those
// buckets to the hash table.
copy(b.Selection(), newGroupsHeadsSel)
b.SetLength(newGroupCount)
op.ht.AppendAllDistinct(ctx, b)
}
}
func (op *hashAggregator) ExportBuffered(ctx context.Context, _ colexecop.Operator) coldata.Batch {
if !op.inputTrackingState.zeroBatchEnqueued {
// Per the contract of the spilling queue, we need to append a
// zero-length batch.
op.inputTrackingState.tuples.Enqueue(ctx, coldata.ZeroBatch)
op.inputTrackingState.zeroBatchEnqueued = true
}
batch, err := op.inputTrackingState.tuples.Dequeue(ctx)
if err != nil {
colexecerror.InternalError(err)
}
return batch
}
func (op *hashAggregator) Reset(ctx context.Context) {
if r, ok := op.Input.(colexecop.Resetter); ok {
r.Reset(ctx)
}
op.bufferingState.tuples.ResetInternalBatch()
op.bufferingState.pendingBatch = nil
op.bufferingState.unprocessedIdx = 0
if op.numPreviouslyCreatedBuckets < len(op.buckets) {
op.numPreviouslyCreatedBuckets = len(op.buckets)
}
// Set up buckets for reuse.
op.buckets = op.buckets[:0]
op.ht.Reset(ctx)
if op.inputTrackingState.tuples != nil {
if err := op.inputTrackingState.tuples.Close(ctx); err != nil {
colexecerror.InternalError(err)
}
op.inputTrackingState.zeroBatchEnqueued = false
}
op.curOutputBucketIdx = 0
op.state = hashAggregatorBuffering
}
func (op *hashAggregator) Close(ctx context.Context) error {
var retErr error
if op.inputTrackingState.tuples != nil {
retErr = op.inputTrackingState.tuples.Close(ctx)
}
if err := op.toClose.Close(ctx); err != nil {
retErr = err
}
return retErr
}
| {
op.state = hashAggregatorDone
} | conditional_block |
hash_aggregator.go | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"math"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/sql/colconv"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
)
// hashAggregatorState represents the state of the hash aggregator operator.
type hashAggregatorState int
const (
// hashAggregatorBuffering is the state in which the hashAggregator reads
// the batches from the input and buffers them up. Once the number of
// buffered tuples reaches maxBuffered or the input has been fully exhausted,
// the hashAggregator transitions to hashAggregatorAggregating state.
hashAggregatorBuffering hashAggregatorState = iota
// hashAggregatorAggregating is the state in which the hashAggregator is
// performing the aggregation on the buffered tuples. If the input has been
// fully exhausted and the buffer is empty, the hashAggregator transitions
// to hashAggregatorOutputting state.
hashAggregatorAggregating
// hashAggregatorOutputting is the state in which the hashAggregator is
// writing its aggregation results to the output buffer.
hashAggregatorOutputting
// hashAggregatorDone is the state in which the hashAggregator has finished
// writing to the output buffer.
hashAggregatorDone
)
// hashAggregator is an operator that performs aggregation based on the
// specified grouping columns. This operator performs aggregation in online
// fashion. It reads from the input one batch at a time, groups all tuples into
// the equality chains, probes heads of those chains against already existing
// buckets and creates new buckets for new groups. After the input is
// exhausted, the operator begins to write the result into an output buffer.
// The output row ordering of this operator is arbitrary.
// Note that throughout this file "buckets" and "groups" mean the same thing
// and are used interchangeably.
type hashAggregator struct {
colexecop.OneInputNode
allocator *colmem.Allocator
spec *execinfrapb.AggregatorSpec
aggHelper aggregatorHelper
inputTypes []*types.T
outputTypes []*types.T
inputArgsConverter *colconv.VecToDatumConverter
// maxBuffered determines the maximum number of tuples that are buffered up
// for aggregation at once.
maxBuffered int
bufferingState struct {
// tuples contains the tuples that we have buffered up for aggregation.
// Its length will not exceed maxBuffered.
tuples *colexecutils.AppendOnlyBufferedBatch
// pendingBatch stores the last read batch from the input that hasn't
// been fully processed yet.
pendingBatch coldata.Batch
// unprocessedIdx is the index of the first tuple in pendingBatch that
// hasn't been processed yet.
unprocessedIdx int
}
// numPreviouslyCreatedBuckets tracks the maximum number of buckets that
// have been created throughout the lifetime of this hashAggregator. This
// matters if the hashAggregator is reset - we reuse the same buckets on the
// next run.
// If non-zero, all buckets available to use are in
// buckets[len(buckets):numPreviouslyCreatedBuckets] range. Note that
// cap(buckets) might be higher than this number, but all buckets past
// numPreviouslyCreatedBuckets haven't been instantiated properly, so
// cap(buckets) should be ignored.
numPreviouslyCreatedBuckets int
// buckets contains all aggregation groups that we have so far. There is
// 1-to-1 mapping between buckets[i] and ht.Vals[i].
buckets []*aggBucket
// ht stores tuples that are "heads" of the corresponding aggregation
// groups ("head" here means the tuple that was first seen from the group).
ht *colexechash.HashTable
// state stores the current state of hashAggregator.
state hashAggregatorState
scratch struct {
// eqChains stores the chains of tuples from the current batch that are
// equal on the grouping columns (meaning that all tuples from the
// batch will be included into one of these chains). These chains must
// be set to zero length once the batch has been processed so that the
// memory could be reused.
eqChains [][]int
// intSlice and anotherIntSlice are simply scratch int slices that are
// reused for several purposes by the hashAggregator.
intSlice []int
anotherIntSlice []int
}
// inputTrackingState tracks all the input tuples which is needed in order
// to fallback to the external hash aggregator.
inputTrackingState struct {
tuples *colexecutils.SpillingQueue
zeroBatchEnqueued bool
}
// curOutputBucketIdx tracks the index in buckets to be flushed next when
// populating the output.
curOutputBucketIdx int
output coldata.Batch
aggFnsAlloc *colexecagg.AggregateFuncsAlloc
hashAlloc aggBucketAlloc
datumAlloc rowenc.DatumAlloc
toClose colexecop.Closers
}
var _ colexecop.ResettableOperator = &hashAggregator{}
var _ colexecop.BufferingInMemoryOperator = &hashAggregator{}
var _ colexecop.ClosableOperator = &hashAggregator{}
// hashAggregatorAllocSize determines the allocation size used by the hash
// aggregator's allocators. This number was chosen after running benchmarks of
// 'sum' aggregation on ints and decimals with varying group sizes (powers of 2
// from 1 to 4096).
const hashAggregatorAllocSize = 128
// NewHashAggregator creates a hash aggregator on the given grouping columns.
// The input specifications to this function are the same as that of the
// NewOrderedAggregator function.
// newSpillingQueueArgs - when non-nil - specifies the arguments to
// instantiate a SpillingQueue with which will be used to keep all of the
// input tuples in case the in-memory hash aggregator needs to fallback to
// the disk-backed operator. Pass in nil in order to not track all input
// tuples.
func NewHashAggregator(
args *colexecagg.NewAggregatorArgs, newSpillingQueueArgs *colexecutils.NewSpillingQueueArgs,
) (colexecop.ResettableOperator, error) {
aggFnsAlloc, inputArgsConverter, toClose, err := colexecagg.NewAggregateFuncsAlloc(
args, hashAggregatorAllocSize, true, /* isHashAgg */
)
// We want this number to be coldata.MaxBatchSize, but then we would lose
// some test coverage due to disabling of the randomization of the batch
// size, so we, instead, use 4 x coldata.BatchSize() (which ends up being
// coldata.MaxBatchSize in non-test environment).
maxBuffered := 4 * coldata.BatchSize()
if maxBuffered > coldata.MaxBatchSize {
// When randomizing coldata.BatchSize() in tests we might exceed
// coldata.MaxBatchSize, so we need to shrink it.
maxBuffered = coldata.MaxBatchSize
}
hashAgg := &hashAggregator{
OneInputNode: colexecop.NewOneInputNode(args.Input),
allocator: args.Allocator,
spec: args.Spec,
state: hashAggregatorBuffering,
inputTypes: args.InputTypes,
outputTypes: args.OutputTypes,
inputArgsConverter: inputArgsConverter,
maxBuffered: maxBuffered,
toClose: toClose,
aggFnsAlloc: aggFnsAlloc,
hashAlloc: aggBucketAlloc{allocator: args.Allocator},
}
hashAgg.bufferingState.tuples = colexecutils.NewAppendOnlyBufferedBatch(args.Allocator, args.InputTypes, nil /* colsToStore */)
hashAgg.datumAlloc.AllocSize = hashAggregatorAllocSize
hashAgg.aggHelper = newAggregatorHelper(args, &hashAgg.datumAlloc, true /* isHashAgg */, hashAgg.maxBuffered)
if newSpillingQueueArgs != nil {
hashAgg.inputTrackingState.tuples = colexecutils.NewSpillingQueue(newSpillingQueueArgs)
}
return hashAgg, err
}
func (op *hashAggregator) Init() {
op.Input.Init()
// These numbers were chosen after running the micro-benchmarks and relevant
// TPCH queries using tpchvec/bench.
const hashTableLoadFactor = 0.1
const hashTableNumBuckets = 256
op.ht = colexechash.NewHashTable(
op.allocator,
hashTableLoadFactor,
hashTableNumBuckets,
op.inputTypes,
op.spec.GroupCols,
true, /* allowNullEquality */
colexechash.HashTableDistinctBuildMode,
colexechash.HashTableDefaultProbeMode,
)
}
func (op *hashAggregator) Next(ctx context.Context) coldata.Batch {
for {
switch op.state {
case hashAggregatorBuffering:
if op.bufferingState.pendingBatch != nil && op.bufferingState.unprocessedIdx < op.bufferingState.pendingBatch.Length() {
op.allocator.PerformOperation(op.bufferingState.tuples.ColVecs(), func() {
op.bufferingState.tuples.AppendTuples(
op.bufferingState.pendingBatch, op.bufferingState.unprocessedIdx, op.bufferingState.pendingBatch.Length(),
)
})
}
op.bufferingState.pendingBatch, op.bufferingState.unprocessedIdx = op.Input.Next(ctx), 0
n := op.bufferingState.pendingBatch.Length()
if op.inputTrackingState.tuples != nil {
op.inputTrackingState.tuples.Enqueue(ctx, op.bufferingState.pendingBatch)
op.inputTrackingState.zeroBatchEnqueued = n == 0
}
if n == 0 {
// This is the last input batch.
if op.bufferingState.tuples.Length() == 0 {
// There are currently no buffered tuples to perform the
// aggregation on.
if len(op.buckets) == 0 {
// We don't have any buckets which means that there were
// no input tuples whatsoever, so we can transition to
// finished state right away.
op.state = hashAggregatorDone
} else {
// There are some buckets, so we proceed to the
// outputting state.
op.state = hashAggregatorOutputting
}
} else {
// There are some buffered tuples on which we need to run
// the aggregation.
op.state = hashAggregatorAggregating
}
continue
}
toBuffer := n
if op.bufferingState.tuples.Length()+toBuffer > op.maxBuffered {
toBuffer = op.maxBuffered - op.bufferingState.tuples.Length()
}
if toBuffer > 0 {
op.allocator.PerformOperation(op.bufferingState.tuples.ColVecs(), func() {
op.bufferingState.tuples.AppendTuples(op.bufferingState.pendingBatch, 0 /* startIdx */, toBuffer)
})
op.bufferingState.unprocessedIdx = toBuffer
}
if op.bufferingState.tuples.Length() == op.maxBuffered {
op.state = hashAggregatorAggregating
continue
}
case hashAggregatorAggregating:
op.inputArgsConverter.ConvertBatch(op.bufferingState.tuples)
op.onlineAgg(ctx, op.bufferingState.tuples)
if op.bufferingState.pendingBatch.Length() == 0 {
if len(op.buckets) == 0 {
op.state = hashAggregatorDone
} else {
op.state = hashAggregatorOutputting
}
continue
}
op.bufferingState.tuples.ResetInternalBatch()
op.state = hashAggregatorBuffering
case hashAggregatorOutputting:
// Note that ResetMaybeReallocate truncates the requested capacity
// at coldata.BatchSize(), so we can just try asking for
// len(op.buckets) capacity. Note that in hashAggregatorOutputting
// state we always have at least 1 bucket.
//
// For now, we don't enforce any footprint-based memory limit.
// TODO(yuzefovich): refactor this.
const maxBatchMemSize = math.MaxInt64
op.output, _ = op.allocator.ResetMaybeReallocate(
op.outputTypes, op.output, len(op.buckets), maxBatchMemSize,
)
curOutputIdx := 0
op.allocator.PerformOperation(op.output.ColVecs(), func() {
for curOutputIdx < op.output.Capacity() && op.curOutputBucketIdx < len(op.buckets) {
bucket := op.buckets[op.curOutputBucketIdx]
for fnIdx, fn := range bucket.fns {
fn.SetOutput(op.output.ColVec(fnIdx))
fn.Flush(curOutputIdx)
}
curOutputIdx++
op.curOutputBucketIdx++
}
})
if op.curOutputBucketIdx >= len(op.buckets) {
op.state = hashAggregatorDone
}
op.output.SetLength(curOutputIdx)
return op.output
case hashAggregatorDone:
return coldata.ZeroBatch
default:
colexecerror.InternalError(errors.AssertionFailedf("hash aggregator in unhandled state"))
// This code is unreachable, but the compiler cannot infer that.
return nil
}
}
}
func (op *hashAggregator) setupScratchSlices(numBuffered int) {
if len(op.scratch.eqChains) < numBuffered {
op.scratch.eqChains = make([][]int, numBuffered)
op.scratch.intSlice = make([]int, numBuffered)
op.scratch.anotherIntSlice = make([]int, numBuffered)
}
}
// onlineAgg groups all tuples in b into equality chains, then probes the
// heads of those chains against already existing groups, aggregates matched
// chains into the corresponding buckets and creates new buckets for new
// aggregation groups.
//
// Let's go through an example of how this function works: our input stream
// contains the following tuples:
// {-3}, {-3}, {-2}, {-1}, {-4}, {-1}, {-1}, {-4}.
// (Note that negative values are chosen in order to visually distinguish them
// from the IDs that we'll be working with below.)
// We will use coldata.BatchSize() == 4 and let's assume that we will use a
// simple hash function h(i) = i % 2 with two buckets in the hash table.
//
// I. we get a batch [-3, -3, -2, -1].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 1, 1, 0, 1]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [3, 1] (length of first == # of hash buckets)
// ProbeScratch.next = [reserved, 2, 4, 0, 0]
// (Note that we have a hash collision in the bucket with hash 1.)
// c) find "equality" buckets (populate HeadID):
// ProbeScratch.HeadID = [1, 1, 3, 4]
// (This means that tuples at position 0 and 1 are the same, and the
// tuple at position HeadID-1 is the head of the equality chain.)
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 1]
// eqChains[1] = [2]
// eqChains[2] = [3]
// The special "heads of equality chains" selection vector is [0, 2, 3].
// 3. we don't have any existing buckets yet, so this step is a noop.
// 4. each of the three equality chains contains tuples from a separate
// aggregation group, so we perform aggregation on each of them in turn.
// After we do so, we will have three buckets and the hash table will contain
// three tuples (with buckets and tuples corresponding to each other):
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>]
// ht.Vals = [-3, -2, -1].
// We have fully processed the first batch.
//
// II. we get a batch [-4, -1, -1, -4].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 0, 1, 1, 0]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [1, 2]
// ProbeScratch.next = [reserved, 4, 3, 0, 0]
// c) find "equality" buckets:
// ProbeScratch.HeadID = [1, 2, 2, 1]
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 3]
// eqChains[1] = [1, 2]
// The special "heads of equality chains" selection vector is [0, 1].
// 3. probe that special "heads" selection vector against the tuples already
// present in the hash table:
// ProbeScratch.HeadID = [0, 3]
// Value 0 indicates that the first equality chain doesn't have an
// existing bucket, but the second chain does and the ID of its bucket is
// HeadID-1 = 2. We aggregate the second equality chain into that bucket.
// 4. the first equality chain contains tuples from a new aggregation group,
// so we create a new bucket for it and perform the aggregation.
// After we do so, we will have four buckets and the hash table will contain
// four tuples:
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>, <bucket for -4>]
// ht.Vals = [-3, -2, -1, -4].
// We have fully processed the second batch.
//
// We have processed the input fully, so we're ready to emit the output.
//
// NOTE: b *must* be non-zero length batch.
func (op *hashAggregator) onlineAgg(ctx context.Context, b coldata.Batch) {
op.setupScratchSlices(b.Length())
inputVecs := b.ColVecs()
// Step 1: find "equality" buckets: we compute the hash buckets for all
// tuples, build 'next' chains between them, and then find equality buckets
// for the tuples.
op.ht.ComputeHashAndBuildChains(ctx, b)
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.ProbeScratch.First, op.ht.ProbeScratch.Next, op.ht.CheckProbeForDistinct,
)
// Step 2: now that we have op.ht.ProbeScratch.HeadID populated we can
// populate the equality chains.
eqChainsCount, eqChainsHeadsSel := op.populateEqChains(b)
b.SetLength(eqChainsCount)
// Make a copy of the selection vector that contains heads of the
// corresponding equality chains because the underlying memory will be
// modified below.
eqChainsHeads := op.scratch.intSlice[:eqChainsCount]
copy(eqChainsHeads, eqChainsHeadsSel)
// Step 3: if we have any existing buckets, we need to probe the heads of
// the equality chains (which the selection vector on b currently contains)
// against the heads of the existing groups.
if len(op.buckets) > 0 {
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.BuildScratch.First, op.ht.BuildScratch.Next, op.ht.CheckBuildForAggregation,
)
for eqChainsSlot, HeadID := range op.ht.ProbeScratch.HeadID[:eqChainsCount] {
if HeadID != 0 {
// Tuples in this equality chain belong to an already existing
// group.
eqChain := op.scratch.eqChains[eqChainsSlot]
bucket := op.buckets[HeadID-1]
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
// We have fully processed this equality chain, so we need to
// reset its length.
op.scratch.eqChains[eqChainsSlot] = op.scratch.eqChains[eqChainsSlot][:0]
}
}
}
// Step 4: now we go over all equality chains and check whether there are
// any that haven't been processed yet (they will be of non-zero length).
// If we find any, we'll create a new bucket for each.
newGroupsHeadsSel := op.scratch.anotherIntSlice[:0]
newGroupCount := 0
for eqChainSlot, eqChain := range op.scratch.eqChains[:eqChainsCount] {
if len(eqChain) > 0 {
// Tuples in this equality chain belong to a new aggregation group,
// so we'll use a new bucket and make sure that the head of this
// equality chain is appended to the hash table in the
// corresponding position.
var bucket *aggBucket
if nextBucketIdx := len(op.buckets); op.numPreviouslyCreatedBuckets > nextBucketIdx {
// We still have a bucket created on the previous run of the
// hash aggregator. Increase the length of op.buckets, using
// previously-allocated capacity, and then reset the bucket for
// reuse.
op.buckets = op.buckets[:nextBucketIdx+1]
bucket = op.buckets[nextBucketIdx]
bucket.reset()
} else {
// Need to allocate a new bucket.
bucket = op.hashAlloc.newAggBucket()
op.buckets = append(op.buckets, bucket)
// We know that all selected tuples belong to the same single
// group, so we can pass 'nil' for the 'groups' argument.
bucket.init(
op.aggFnsAlloc.MakeAggregateFuncs(), op.aggHelper.makeSeenMaps(), nil, /* groups */
)
}
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
newGroupsHeadsSel = append(newGroupsHeadsSel, eqChainsHeads[eqChainSlot])
// We need to compact the hash buffer according to the new groups
// head tuples selection vector we're building. | op.scratch.eqChains[eqChainSlot] = op.scratch.eqChains[eqChainSlot][:0]
}
}
if newGroupCount > 0 {
// We have created new buckets, so we need to append the heads of those
// buckets to the hash table.
copy(b.Selection(), newGroupsHeadsSel)
b.SetLength(newGroupCount)
op.ht.AppendAllDistinct(ctx, b)
}
}
func (op *hashAggregator) ExportBuffered(ctx context.Context, _ colexecop.Operator) coldata.Batch {
if !op.inputTrackingState.zeroBatchEnqueued {
// Per the contract of the spilling queue, we need to append a
// zero-length batch.
op.inputTrackingState.tuples.Enqueue(ctx, coldata.ZeroBatch)
op.inputTrackingState.zeroBatchEnqueued = true
}
batch, err := op.inputTrackingState.tuples.Dequeue(ctx)
if err != nil {
colexecerror.InternalError(err)
}
return batch
}
func (op *hashAggregator) Reset(ctx context.Context) {
if r, ok := op.Input.(colexecop.Resetter); ok {
r.Reset(ctx)
}
op.bufferingState.tuples.ResetInternalBatch()
op.bufferingState.pendingBatch = nil
op.bufferingState.unprocessedIdx = 0
if op.numPreviouslyCreatedBuckets < len(op.buckets) {
op.numPreviouslyCreatedBuckets = len(op.buckets)
}
// Set up buckets for reuse.
op.buckets = op.buckets[:0]
op.ht.Reset(ctx)
if op.inputTrackingState.tuples != nil {
if err := op.inputTrackingState.tuples.Close(ctx); err != nil {
colexecerror.InternalError(err)
}
op.inputTrackingState.zeroBatchEnqueued = false
}
op.curOutputBucketIdx = 0
op.state = hashAggregatorBuffering
}
func (op *hashAggregator) Close(ctx context.Context) error {
var retErr error
if op.inputTrackingState.tuples != nil {
retErr = op.inputTrackingState.tuples.Close(ctx)
}
if err := op.toClose.Close(ctx); err != nil {
retErr = err
}
return retErr
} | op.ht.ProbeScratch.HashBuffer[newGroupCount] = op.ht.ProbeScratch.HashBuffer[eqChainSlot]
newGroupCount++ | random_line_split |
hash_aggregator.go | // Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"math"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/sql/colconv"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecagg"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexechash"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/colexecutils"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/errors"
)
// hashAggregatorState represents the state of the hash aggregator operator.
type hashAggregatorState int
const (
// hashAggregatorBuffering is the state in which the hashAggregator reads
// the batches from the input and buffers them up. Once the number of
// buffered tuples reaches maxBuffered or the input has been fully exhausted,
// the hashAggregator transitions to hashAggregatorAggregating state.
hashAggregatorBuffering hashAggregatorState = iota
// hashAggregatorAggregating is the state in which the hashAggregator is
// performing the aggregation on the buffered tuples. If the input has been
// fully exhausted and the buffer is empty, the hashAggregator transitions
// to hashAggregatorOutputting state.
hashAggregatorAggregating
// hashAggregatorOutputting is the state in which the hashAggregator is
// writing its aggregation results to the output buffer.
hashAggregatorOutputting
// hashAggregatorDone is the state in which the hashAggregator has finished
// writing to the output buffer.
hashAggregatorDone
)
// hashAggregator is an operator that performs aggregation based on the
// specified grouping columns. This operator performs aggregation in online
// fashion. It reads from the input one batch at a time, groups all tuples into
// the equality chains, probes heads of those chains against already existing
// buckets and creates new buckets for new groups. After the input is
// exhausted, the operator begins to write the result into an output buffer.
// The output row ordering of this operator is arbitrary.
// Note that throughout this file "buckets" and "groups" mean the same thing
// and are used interchangeably.
type hashAggregator struct {
colexecop.OneInputNode
allocator *colmem.Allocator
spec *execinfrapb.AggregatorSpec
aggHelper aggregatorHelper
inputTypes []*types.T
outputTypes []*types.T
inputArgsConverter *colconv.VecToDatumConverter
// maxBuffered determines the maximum number of tuples that are buffered up
// for aggregation at once.
maxBuffered int
bufferingState struct {
// tuples contains the tuples that we have buffered up for aggregation.
// Its length will not exceed maxBuffered.
tuples *colexecutils.AppendOnlyBufferedBatch
// pendingBatch stores the last read batch from the input that hasn't
// been fully processed yet.
pendingBatch coldata.Batch
// unprocessedIdx is the index of the first tuple in pendingBatch that
// hasn't been processed yet.
unprocessedIdx int
}
// numPreviouslyCreatedBuckets tracks the maximum number of buckets that
// have been created throughout the lifetime of this hashAggregator. This
// matters if the hashAggregator is reset - we reuse the same buckets on the
// next run.
// If non-zero, all buckets available to use are in
// buckets[len(buckets):numPreviouslyCreatedBuckets] range. Note that
// cap(buckets) might be higher than this number, but all buckets past
// numPreviouslyCreatedBuckets haven't been instantiated properly, so
// cap(buckets) should be ignored.
numPreviouslyCreatedBuckets int
// buckets contains all aggregation groups that we have so far. There is
// 1-to-1 mapping between buckets[i] and ht.Vals[i].
buckets []*aggBucket
// ht stores tuples that are "heads" of the corresponding aggregation
// groups ("head" here means the tuple that was first seen from the group).
ht *colexechash.HashTable
// state stores the current state of hashAggregator.
state hashAggregatorState
scratch struct {
// eqChains stores the chains of tuples from the current batch that are
// equal on the grouping columns (meaning that all tuples from the
// batch will be included into one of these chains). These chains must
// be set to zero length once the batch has been processed so that the
// memory could be reused.
eqChains [][]int
// intSlice and anotherIntSlice are simply scratch int slices that are
// reused for several purposes by the hashAggregator.
intSlice []int
anotherIntSlice []int
}
// inputTrackingState tracks all the input tuples which is needed in order
// to fallback to the external hash aggregator.
inputTrackingState struct {
tuples *colexecutils.SpillingQueue
zeroBatchEnqueued bool
}
// curOutputBucketIdx tracks the index in buckets to be flushed next when
// populating the output.
curOutputBucketIdx int
output coldata.Batch
aggFnsAlloc *colexecagg.AggregateFuncsAlloc
hashAlloc aggBucketAlloc
datumAlloc rowenc.DatumAlloc
toClose colexecop.Closers
}
var _ colexecop.ResettableOperator = &hashAggregator{}
var _ colexecop.BufferingInMemoryOperator = &hashAggregator{}
var _ colexecop.ClosableOperator = &hashAggregator{}
// hashAggregatorAllocSize determines the allocation size used by the hash
// aggregator's allocators. This number was chosen after running benchmarks of
// 'sum' aggregation on ints and decimals with varying group sizes (powers of 2
// from 1 to 4096).
const hashAggregatorAllocSize = 128
// NewHashAggregator creates a hash aggregator on the given grouping columns.
// The input specifications to this function are the same as that of the
// NewOrderedAggregator function.
// newSpillingQueueArgs - when non-nil - specifies the arguments to
// instantiate a SpillingQueue with which will be used to keep all of the
// input tuples in case the in-memory hash aggregator needs to fallback to
// the disk-backed operator. Pass in nil in order to not track all input
// tuples.
func NewHashAggregator(
args *colexecagg.NewAggregatorArgs, newSpillingQueueArgs *colexecutils.NewSpillingQueueArgs,
) (colexecop.ResettableOperator, error) {
aggFnsAlloc, inputArgsConverter, toClose, err := colexecagg.NewAggregateFuncsAlloc(
args, hashAggregatorAllocSize, true, /* isHashAgg */
)
// We want this number to be coldata.MaxBatchSize, but then we would lose
// some test coverage due to disabling of the randomization of the batch
// size, so we, instead, use 4 x coldata.BatchSize() (which ends up being
// coldata.MaxBatchSize in non-test environment).
maxBuffered := 4 * coldata.BatchSize()
if maxBuffered > coldata.MaxBatchSize {
// When randomizing coldata.BatchSize() in tests we might exceed
// coldata.MaxBatchSize, so we need to shrink it.
maxBuffered = coldata.MaxBatchSize
}
hashAgg := &hashAggregator{
OneInputNode: colexecop.NewOneInputNode(args.Input),
allocator: args.Allocator,
spec: args.Spec,
state: hashAggregatorBuffering,
inputTypes: args.InputTypes,
outputTypes: args.OutputTypes,
inputArgsConverter: inputArgsConverter,
maxBuffered: maxBuffered,
toClose: toClose,
aggFnsAlloc: aggFnsAlloc,
hashAlloc: aggBucketAlloc{allocator: args.Allocator},
}
hashAgg.bufferingState.tuples = colexecutils.NewAppendOnlyBufferedBatch(args.Allocator, args.InputTypes, nil /* colsToStore */)
hashAgg.datumAlloc.AllocSize = hashAggregatorAllocSize
hashAgg.aggHelper = newAggregatorHelper(args, &hashAgg.datumAlloc, true /* isHashAgg */, hashAgg.maxBuffered)
if newSpillingQueueArgs != nil {
hashAgg.inputTrackingState.tuples = colexecutils.NewSpillingQueue(newSpillingQueueArgs)
}
return hashAgg, err
}
func (op *hashAggregator) Init() {
op.Input.Init()
// These numbers were chosen after running the micro-benchmarks and relevant
// TPCH queries using tpchvec/bench.
const hashTableLoadFactor = 0.1
const hashTableNumBuckets = 256
op.ht = colexechash.NewHashTable(
op.allocator,
hashTableLoadFactor,
hashTableNumBuckets,
op.inputTypes,
op.spec.GroupCols,
true, /* allowNullEquality */
colexechash.HashTableDistinctBuildMode,
colexechash.HashTableDefaultProbeMode,
)
}
func (op *hashAggregator) Next(ctx context.Context) coldata.Batch {
for {
switch op.state {
case hashAggregatorBuffering:
if op.bufferingState.pendingBatch != nil && op.bufferingState.unprocessedIdx < op.bufferingState.pendingBatch.Length() {
op.allocator.PerformOperation(op.bufferingState.tuples.ColVecs(), func() {
op.bufferingState.tuples.AppendTuples(
op.bufferingState.pendingBatch, op.bufferingState.unprocessedIdx, op.bufferingState.pendingBatch.Length(),
)
})
}
op.bufferingState.pendingBatch, op.bufferingState.unprocessedIdx = op.Input.Next(ctx), 0
n := op.bufferingState.pendingBatch.Length()
if op.inputTrackingState.tuples != nil {
op.inputTrackingState.tuples.Enqueue(ctx, op.bufferingState.pendingBatch)
op.inputTrackingState.zeroBatchEnqueued = n == 0
}
if n == 0 {
// This is the last input batch.
if op.bufferingState.tuples.Length() == 0 {
// There are currently no buffered tuples to perform the
// aggregation on.
if len(op.buckets) == 0 {
// We don't have any buckets which means that there were
// no input tuples whatsoever, so we can transition to
// finished state right away.
op.state = hashAggregatorDone
} else {
// There are some buckets, so we proceed to the
// outputting state.
op.state = hashAggregatorOutputting
}
} else {
// There are some buffered tuples on which we need to run
// the aggregation.
op.state = hashAggregatorAggregating
}
continue
}
toBuffer := n
if op.bufferingState.tuples.Length()+toBuffer > op.maxBuffered {
toBuffer = op.maxBuffered - op.bufferingState.tuples.Length()
}
if toBuffer > 0 {
op.allocator.PerformOperation(op.bufferingState.tuples.ColVecs(), func() {
op.bufferingState.tuples.AppendTuples(op.bufferingState.pendingBatch, 0 /* startIdx */, toBuffer)
})
op.bufferingState.unprocessedIdx = toBuffer
}
if op.bufferingState.tuples.Length() == op.maxBuffered {
op.state = hashAggregatorAggregating
continue
}
case hashAggregatorAggregating:
op.inputArgsConverter.ConvertBatch(op.bufferingState.tuples)
op.onlineAgg(ctx, op.bufferingState.tuples)
if op.bufferingState.pendingBatch.Length() == 0 {
if len(op.buckets) == 0 {
op.state = hashAggregatorDone
} else {
op.state = hashAggregatorOutputting
}
continue
}
op.bufferingState.tuples.ResetInternalBatch()
op.state = hashAggregatorBuffering
case hashAggregatorOutputting:
// Note that ResetMaybeReallocate truncates the requested capacity
// at coldata.BatchSize(), so we can just try asking for
// len(op.buckets) capacity. Note that in hashAggregatorOutputting
// state we always have at least 1 bucket.
//
// For now, we don't enforce any footprint-based memory limit.
// TODO(yuzefovich): refactor this.
const maxBatchMemSize = math.MaxInt64
op.output, _ = op.allocator.ResetMaybeReallocate(
op.outputTypes, op.output, len(op.buckets), maxBatchMemSize,
)
curOutputIdx := 0
op.allocator.PerformOperation(op.output.ColVecs(), func() {
for curOutputIdx < op.output.Capacity() && op.curOutputBucketIdx < len(op.buckets) {
bucket := op.buckets[op.curOutputBucketIdx]
for fnIdx, fn := range bucket.fns {
fn.SetOutput(op.output.ColVec(fnIdx))
fn.Flush(curOutputIdx)
}
curOutputIdx++
op.curOutputBucketIdx++
}
})
if op.curOutputBucketIdx >= len(op.buckets) {
op.state = hashAggregatorDone
}
op.output.SetLength(curOutputIdx)
return op.output
case hashAggregatorDone:
return coldata.ZeroBatch
default:
colexecerror.InternalError(errors.AssertionFailedf("hash aggregator in unhandled state"))
// This code is unreachable, but the compiler cannot infer that.
return nil
}
}
}
func (op *hashAggregator) setupScratchSlices(numBuffered int) {
if len(op.scratch.eqChains) < numBuffered {
op.scratch.eqChains = make([][]int, numBuffered)
op.scratch.intSlice = make([]int, numBuffered)
op.scratch.anotherIntSlice = make([]int, numBuffered)
}
}
// onlineAgg groups all tuples in b into equality chains, then probes the
// heads of those chains against already existing groups, aggregates matched
// chains into the corresponding buckets and creates new buckets for new
// aggregation groups.
//
// Let's go through an example of how this function works: our input stream
// contains the following tuples:
// {-3}, {-3}, {-2}, {-1}, {-4}, {-1}, {-1}, {-4}.
// (Note that negative values are chosen in order to visually distinguish them
// from the IDs that we'll be working with below.)
// We will use coldata.BatchSize() == 4 and let's assume that we will use a
// simple hash function h(i) = i % 2 with two buckets in the hash table.
//
// I. we get a batch [-3, -3, -2, -1].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 1, 1, 0, 1]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [3, 1] (length of first == # of hash buckets)
// ProbeScratch.next = [reserved, 2, 4, 0, 0]
// (Note that we have a hash collision in the bucket with hash 1.)
// c) find "equality" buckets (populate HeadID):
// ProbeScratch.HeadID = [1, 1, 3, 4]
// (This means that tuples at position 0 and 1 are the same, and the
// tuple at position HeadID-1 is the head of the equality chain.)
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 1]
// eqChains[1] = [2]
// eqChains[2] = [3]
// The special "heads of equality chains" selection vector is [0, 2, 3].
// 3. we don't have any existing buckets yet, so this step is a noop.
// 4. each of the three equality chains contains tuples from a separate
// aggregation group, so we perform aggregation on each of them in turn.
// After we do so, we will have three buckets and the hash table will contain
// three tuples (with buckets and tuples corresponding to each other):
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>]
// ht.Vals = [-3, -2, -1].
// We have fully processed the first batch.
//
// II. we get a batch [-4, -1, -1, -4].
// 1. a) compute hash buckets: ProbeScratch.next = [reserved, 0, 1, 1, 0]
// b) build 'next' chains between hash buckets:
// ProbeScratch.first = [1, 2]
// ProbeScratch.next = [reserved, 4, 3, 0, 0]
// c) find "equality" buckets:
// ProbeScratch.HeadID = [1, 2, 2, 1]
// 2. divide all tuples into the equality chains based on HeadID:
// eqChains[0] = [0, 3]
// eqChains[1] = [1, 2]
// The special "heads of equality chains" selection vector is [0, 1].
// 3. probe that special "heads" selection vector against the tuples already
// present in the hash table:
// ProbeScratch.HeadID = [0, 3]
// Value 0 indicates that the first equality chain doesn't have an
// existing bucket, but the second chain does and the ID of its bucket is
// HeadID-1 = 2. We aggregate the second equality chain into that bucket.
// 4. the first equality chain contains tuples from a new aggregation group,
// so we create a new bucket for it and perform the aggregation.
// After we do so, we will have four buckets and the hash table will contain
// four tuples:
// buckets = [<bucket for -3>, <bucket for -2>, <bucket for -1>, <bucket for -4>]
// ht.Vals = [-3, -2, -1, -4].
// We have fully processed the second batch.
//
// We have processed the input fully, so we're ready to emit the output.
//
// NOTE: b *must* be non-zero length batch.
func (op *hashAggregator) onlineAgg(ctx context.Context, b coldata.Batch) {
op.setupScratchSlices(b.Length())
inputVecs := b.ColVecs()
// Step 1: find "equality" buckets: we compute the hash buckets for all
// tuples, build 'next' chains between them, and then find equality buckets
// for the tuples.
op.ht.ComputeHashAndBuildChains(ctx, b)
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.ProbeScratch.First, op.ht.ProbeScratch.Next, op.ht.CheckProbeForDistinct,
)
// Step 2: now that we have op.ht.ProbeScratch.HeadID populated we can
// populate the equality chains.
eqChainsCount, eqChainsHeadsSel := op.populateEqChains(b)
b.SetLength(eqChainsCount)
// Make a copy of the selection vector that contains heads of the
// corresponding equality chains because the underlying memory will be
// modified below.
eqChainsHeads := op.scratch.intSlice[:eqChainsCount]
copy(eqChainsHeads, eqChainsHeadsSel)
// Step 3: if we have any existing buckets, we need to probe the heads of
// the equality chains (which the selection vector on b currently contains)
// against the heads of the existing groups.
if len(op.buckets) > 0 {
op.ht.FindBuckets(
b, op.ht.Keys, op.ht.BuildScratch.First, op.ht.BuildScratch.Next, op.ht.CheckBuildForAggregation,
)
for eqChainsSlot, HeadID := range op.ht.ProbeScratch.HeadID[:eqChainsCount] {
if HeadID != 0 {
// Tuples in this equality chain belong to an already existing
// group.
eqChain := op.scratch.eqChains[eqChainsSlot]
bucket := op.buckets[HeadID-1]
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
// We have fully processed this equality chain, so we need to
// reset its length.
op.scratch.eqChains[eqChainsSlot] = op.scratch.eqChains[eqChainsSlot][:0]
}
}
}
// Step 4: now we go over all equality chains and check whether there are
// any that haven't been processed yet (they will be of non-zero length).
// If we find any, we'll create a new bucket for each.
newGroupsHeadsSel := op.scratch.anotherIntSlice[:0]
newGroupCount := 0
for eqChainSlot, eqChain := range op.scratch.eqChains[:eqChainsCount] {
if len(eqChain) > 0 {
// Tuples in this equality chain belong to a new aggregation group,
// so we'll use a new bucket and make sure that the head of this
// equality chain is appended to the hash table in the
// corresponding position.
var bucket *aggBucket
if nextBucketIdx := len(op.buckets); op.numPreviouslyCreatedBuckets > nextBucketIdx {
// We still have a bucket created on the previous run of the
// hash aggregator. Increase the length of op.buckets, using
// previously-allocated capacity, and then reset the bucket for
// reuse.
op.buckets = op.buckets[:nextBucketIdx+1]
bucket = op.buckets[nextBucketIdx]
bucket.reset()
} else {
// Need to allocate a new bucket.
bucket = op.hashAlloc.newAggBucket()
op.buckets = append(op.buckets, bucket)
// We know that all selected tuples belong to the same single
// group, so we can pass 'nil' for the 'groups' argument.
bucket.init(
op.aggFnsAlloc.MakeAggregateFuncs(), op.aggHelper.makeSeenMaps(), nil, /* groups */
)
}
op.aggHelper.performAggregation(
ctx, inputVecs, len(eqChain), eqChain, bucket, nil, /* groups */
)
newGroupsHeadsSel = append(newGroupsHeadsSel, eqChainsHeads[eqChainSlot])
// We need to compact the hash buffer according to the new groups
// head tuples selection vector we're building.
op.ht.ProbeScratch.HashBuffer[newGroupCount] = op.ht.ProbeScratch.HashBuffer[eqChainSlot]
newGroupCount++
op.scratch.eqChains[eqChainSlot] = op.scratch.eqChains[eqChainSlot][:0]
}
}
if newGroupCount > 0 {
// We have created new buckets, so we need to append the heads of those
// buckets to the hash table.
copy(b.Selection(), newGroupsHeadsSel)
b.SetLength(newGroupCount)
op.ht.AppendAllDistinct(ctx, b)
}
}
func (op *hashAggregator) ExportBuffered(ctx context.Context, _ colexecop.Operator) coldata.Batch {
if !op.inputTrackingState.zeroBatchEnqueued {
// Per the contract of the spilling queue, we need to append a
// zero-length batch.
op.inputTrackingState.tuples.Enqueue(ctx, coldata.ZeroBatch)
op.inputTrackingState.zeroBatchEnqueued = true
}
batch, err := op.inputTrackingState.tuples.Dequeue(ctx)
if err != nil {
colexecerror.InternalError(err)
}
return batch
}
func (op *hashAggregator) Reset(ctx context.Context) {
if r, ok := op.Input.(colexecop.Resetter); ok {
r.Reset(ctx)
}
op.bufferingState.tuples.ResetInternalBatch()
op.bufferingState.pendingBatch = nil
op.bufferingState.unprocessedIdx = 0
if op.numPreviouslyCreatedBuckets < len(op.buckets) {
op.numPreviouslyCreatedBuckets = len(op.buckets)
}
// Set up buckets for reuse.
op.buckets = op.buckets[:0]
op.ht.Reset(ctx)
if op.inputTrackingState.tuples != nil {
if err := op.inputTrackingState.tuples.Close(ctx); err != nil {
colexecerror.InternalError(err)
}
op.inputTrackingState.zeroBatchEnqueued = false
}
op.curOutputBucketIdx = 0
op.state = hashAggregatorBuffering
}
func (op *hashAggregator) | (ctx context.Context) error {
var retErr error
if op.inputTrackingState.tuples != nil {
retErr = op.inputTrackingState.tuples.Close(ctx)
}
if err := op.toClose.Close(ctx); err != nil {
retErr = err
}
return retErr
}
| Close | identifier_name |
watcher.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package docker
import (
"fmt"
"net/http"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/go-connections/tlsconfig"
"golang.org/x/net/context"
"github.com/elastic/beats/libbeat/common/bus"
"github.com/elastic/beats/libbeat/logp"
)
// Select Docker API version
const (
shortIDLen = 12
)
// Watcher reads docker events and keeps a list of known containers
type Watcher interface {
// Start watching docker API for new containers
Start() error
// Stop watching docker API for new containers
Stop()
// Container returns the running container with the given ID or nil if unknown
Container(ID string) *Container
// Containers returns the list of known containers
Containers() map[string]*Container
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
ListenStart() bus.Listener
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
ListenStop() bus.Listener
}
// TLSConfig for docker socket connection
type TLSConfig struct {
CA string `config:"certificate_authority"`
Certificate string `config:"certificate"`
Key string `config:"key"`
}
type watcher struct {
sync.RWMutex
client Client
ctx context.Context
stop context.CancelFunc
containers map[string]*Container
deleted map[string]time.Time // deleted annotations key -> last access time
cleanupTimeout time.Duration
lastValidTimestamp int64
stopped sync.WaitGroup
bus bus.Bus
shortID bool // whether to store short ID in "containers" too
}
// Container info retrieved by the watcher
type Container struct {
ID string
Name string
Image string
Labels map[string]string
IPAddresses []string
Ports []types.Port
}
// Client for docker interface
type Client interface {
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
}
// WatcherConstructor represent a function that creates a new Watcher from giving parameters
type WatcherConstructor func(host string, tls *TLSConfig, storeShortID bool) (Watcher, error)
// NewWatcher returns a watcher running for the given settings
func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) {
var httpClient *http.Client
if tls != nil {
options := tlsconfig.Options{
CAFile: tls.CA,
CertFile: tls.Certificate,
KeyFile: tls.Key,
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
}
}
client, err := NewClient(host, httpClient, nil)
if err != nil {
return nil, err
}
return NewWatcherWithClient(client, 60*time.Second, storeShortID)
}
// NewWatcherWithClient creates a new Watcher from a given Docker client
func NewWatcherWithClient(client Client, cleanupTimeout time.Duration, storeShortID bool) (Watcher, error) {
ctx, cancel := context.WithCancel(context.Background())
return &watcher{
client: client,
ctx: ctx,
stop: cancel,
containers: make(map[string]*Container),
deleted: make(map[string]time.Time),
cleanupTimeout: cleanupTimeout,
bus: bus.New("docker"),
shortID: storeShortID,
}, nil
}
// Container returns the running container with the given ID or nil if unknown
func (w *watcher) Container(ID string) *Container {
w.RLock()
container := w.containers[ID]
if container == nil {
w.RUnlock()
return nil
}
_, ok := w.deleted[container.ID]
w.RUnlock()
// Update last access time if it's deleted
if ok {
w.Lock()
w.deleted[container.ID] = time.Now()
w.Unlock()
}
return container
}
// Containers returns the list of known containers
func (w *watcher) Containers() map[string]*Container {
w.RLock()
defer w.RUnlock()
res := make(map[string]*Container)
for k, v := range w.containers {
if !w.shortID || len(k) != shortIDLen {
res[k] = v
}
}
return res
}
// Start watching docker API for new containers
func (w *watcher) Start() error {
// Do initial scan of existing containers
logp.Debug("docker", "Start docker containers scanner")
w.lastValidTimestamp = time.Now().Unix()
w.Lock()
defer w.Unlock()
containers, err := w.listContainers(types.ContainerListOptions{})
if err != nil {
return err
}
for _, c := range containers {
w.containers[c.ID] = c
if w.shortID {
w.containers[c.ID[:shortIDLen]] = c
}
}
// Emit all start events (avoid blocking if the bus get's blocked)
go func() {
for _, c := range containers {
w.bus.Publish(bus.Event{
"start": true,
"container": c,
})
}
}()
w.stopped.Add(2)
go w.watch()
go w.cleanupWorker()
return nil
}
func (w *watcher) Stop() {
w.stop()
}
func (w *watcher) watch() {
filter := filters.NewArgs()
filter.Add("type", "container")
options := types.EventsOptions{
Since: fmt.Sprintf("%d", w.lastValidTimestamp),
Filters: filter,
}
for {
events, errors := w.client.Events(w.ctx, options)
WATCH:
for {
select {
case event := <-events:
logp.Debug("docker", "Got a new docker event: %v", event)
w.lastValidTimestamp = event.Time
// Add / update
if event.Action == "start" || event.Action == "update" {
filter := filters.NewArgs()
filter.Add("id", event.Actor.ID)
containers, err := w.listContainers(types.ContainerListOptions{
Filters: filter,
})
if err != nil || len(containers) != 1 {
logp.Err("Error getting container info: %v", err)
continue
}
container := containers[0]
w.Lock()
w.containers[event.Actor.ID] = container
if w.shortID {
w.containers[event.Actor.ID[:shortIDLen]] = container
}
// un-delete if it's flagged (in case of update or recreation) | "container": container,
})
}
// Delete
if event.Action == "die" {
container := w.Container(event.Actor.ID)
if container != nil {
w.bus.Publish(bus.Event{
"stop": true,
"container": container,
})
}
w.Lock()
w.deleted[event.Actor.ID] = time.Now()
w.Unlock()
}
case err := <-errors:
// Restart watch call
logp.Err("Error watching for docker events: %v", err)
time.Sleep(1 * time.Second)
break WATCH
case <-w.ctx.Done():
logp.Debug("docker", "Watcher stopped")
w.stopped.Done()
return
}
}
}
}
func (w *watcher) listContainers(options types.ContainerListOptions) ([]*Container, error) {
containers, err := w.client.ContainerList(w.ctx, options)
if err != nil {
return nil, err
}
var result []*Container
for _, c := range containers {
var ipaddresses []string
for _, net := range c.NetworkSettings.Networks {
if net.IPAddress != "" {
ipaddresses = append(ipaddresses, net.IPAddress)
}
}
// If there are no network interfaces, assume that the container is on host network
// Inspect the container directly and use the hostname as the IP address in order
if len(ipaddresses) == 0 {
info, err := w.client.ContainerInspect(w.ctx, c.ID)
if err == nil {
ipaddresses = append(ipaddresses, info.Config.Hostname)
} else {
logp.Warn("unable to inspect container %s due to error %v", c.ID, err)
}
}
result = append(result, &Container{
ID: c.ID,
Name: c.Names[0][1:], // Strip '/' from container names
Image: c.Image,
Labels: c.Labels,
Ports: c.Ports,
IPAddresses: ipaddresses,
})
}
return result, nil
}
// Clean up deleted containers after they are not used anymore
func (w *watcher) cleanupWorker() {
for {
// Wait a full period
time.Sleep(w.cleanupTimeout)
select {
case <-w.ctx.Done():
w.stopped.Done()
return
default:
// Check entries for timeout
var toDelete []string
timeout := time.Now().Add(-w.cleanupTimeout)
w.RLock()
for key, lastSeen := range w.deleted {
if lastSeen.Before(timeout) {
logp.Debug("docker", "Removing container %s after cool down timeout", key)
toDelete = append(toDelete, key)
}
}
w.RUnlock()
// Delete timed out entries:
for _, key := range toDelete {
container := w.Container(key)
if container != nil {
w.bus.Publish(bus.Event{
"delete": true,
"container": container,
})
}
}
w.Lock()
for _, key := range toDelete {
delete(w.deleted, key)
delete(w.containers, key)
if w.shortID {
delete(w.containers, key[:shortIDLen])
}
}
w.Unlock()
}
}
}
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
func (w *watcher) ListenStart() bus.Listener {
return w.bus.Subscribe("start")
}
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
func (w *watcher) ListenStop() bus.Listener {
return w.bus.Subscribe("stop")
} | delete(w.deleted, event.Actor.ID)
w.Unlock()
w.bus.Publish(bus.Event{
"start": true, | random_line_split |
watcher.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package docker
import (
"fmt"
"net/http"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/go-connections/tlsconfig"
"golang.org/x/net/context"
"github.com/elastic/beats/libbeat/common/bus"
"github.com/elastic/beats/libbeat/logp"
)
// Select Docker API version
const (
shortIDLen = 12
)
// Watcher reads docker events and keeps a list of known containers
type Watcher interface {
// Start watching docker API for new containers
Start() error
// Stop watching docker API for new containers
Stop()
// Container returns the running container with the given ID or nil if unknown
Container(ID string) *Container
// Containers returns the list of known containers
Containers() map[string]*Container
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
ListenStart() bus.Listener
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
ListenStop() bus.Listener
}
// TLSConfig for docker socket connection
type TLSConfig struct {
CA string `config:"certificate_authority"`
Certificate string `config:"certificate"`
Key string `config:"key"`
}
type watcher struct {
sync.RWMutex
client Client
ctx context.Context
stop context.CancelFunc
containers map[string]*Container
deleted map[string]time.Time // deleted annotations key -> last access time
cleanupTimeout time.Duration
lastValidTimestamp int64
stopped sync.WaitGroup
bus bus.Bus
shortID bool // whether to store short ID in "containers" too
}
// Container info retrieved by the watcher
type Container struct {
ID string
Name string
Image string
Labels map[string]string
IPAddresses []string
Ports []types.Port
}
// Client for docker interface
type Client interface {
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
}
// WatcherConstructor represent a function that creates a new Watcher from giving parameters
type WatcherConstructor func(host string, tls *TLSConfig, storeShortID bool) (Watcher, error)
// NewWatcher returns a watcher running for the given settings
func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) {
var httpClient *http.Client
if tls != nil {
options := tlsconfig.Options{
CAFile: tls.CA,
CertFile: tls.Certificate,
KeyFile: tls.Key,
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
}
}
client, err := NewClient(host, httpClient, nil)
if err != nil {
return nil, err
}
return NewWatcherWithClient(client, 60*time.Second, storeShortID)
}
// NewWatcherWithClient creates a new Watcher from a given Docker client
func NewWatcherWithClient(client Client, cleanupTimeout time.Duration, storeShortID bool) (Watcher, error) {
ctx, cancel := context.WithCancel(context.Background())
return &watcher{
client: client,
ctx: ctx,
stop: cancel,
containers: make(map[string]*Container),
deleted: make(map[string]time.Time),
cleanupTimeout: cleanupTimeout,
bus: bus.New("docker"),
shortID: storeShortID,
}, nil
}
// Container returns the running container with the given ID or nil if unknown
func (w *watcher) Container(ID string) *Container {
w.RLock()
container := w.containers[ID]
if container == nil {
w.RUnlock()
return nil
}
_, ok := w.deleted[container.ID]
w.RUnlock()
// Update last access time if it's deleted
if ok {
w.Lock()
w.deleted[container.ID] = time.Now()
w.Unlock()
}
return container
}
// Containers returns the list of known containers
func (w *watcher) Containers() map[string]*Container {
w.RLock()
defer w.RUnlock()
res := make(map[string]*Container)
for k, v := range w.containers {
if !w.shortID || len(k) != shortIDLen {
res[k] = v
}
}
return res
}
// Start watching docker API for new containers
func (w *watcher) Start() error {
// Do initial scan of existing containers
logp.Debug("docker", "Start docker containers scanner")
w.lastValidTimestamp = time.Now().Unix()
w.Lock()
defer w.Unlock()
containers, err := w.listContainers(types.ContainerListOptions{})
if err != nil {
return err
}
for _, c := range containers {
w.containers[c.ID] = c
if w.shortID {
w.containers[c.ID[:shortIDLen]] = c
}
}
// Emit all start events (avoid blocking if the bus get's blocked)
go func() {
for _, c := range containers {
w.bus.Publish(bus.Event{
"start": true,
"container": c,
})
}
}()
w.stopped.Add(2)
go w.watch()
go w.cleanupWorker()
return nil
}
func (w *watcher) Stop() {
w.stop()
}
func (w *watcher) watch() {
filter := filters.NewArgs()
filter.Add("type", "container")
options := types.EventsOptions{
Since: fmt.Sprintf("%d", w.lastValidTimestamp),
Filters: filter,
}
for {
events, errors := w.client.Events(w.ctx, options)
WATCH:
for {
select {
case event := <-events:
logp.Debug("docker", "Got a new docker event: %v", event)
w.lastValidTimestamp = event.Time
// Add / update
if event.Action == "start" || event.Action == "update" {
filter := filters.NewArgs()
filter.Add("id", event.Actor.ID)
containers, err := w.listContainers(types.ContainerListOptions{
Filters: filter,
})
if err != nil || len(containers) != 1 {
logp.Err("Error getting container info: %v", err)
continue
}
container := containers[0]
w.Lock()
w.containers[event.Actor.ID] = container
if w.shortID {
w.containers[event.Actor.ID[:shortIDLen]] = container
}
// un-delete if it's flagged (in case of update or recreation)
delete(w.deleted, event.Actor.ID)
w.Unlock()
w.bus.Publish(bus.Event{
"start": true,
"container": container,
})
}
// Delete
if event.Action == "die" {
container := w.Container(event.Actor.ID)
if container != nil {
w.bus.Publish(bus.Event{
"stop": true,
"container": container,
})
}
w.Lock()
w.deleted[event.Actor.ID] = time.Now()
w.Unlock()
}
case err := <-errors:
// Restart watch call
logp.Err("Error watching for docker events: %v", err)
time.Sleep(1 * time.Second)
break WATCH
case <-w.ctx.Done():
logp.Debug("docker", "Watcher stopped")
w.stopped.Done()
return
}
}
}
}
func (w *watcher) listContainers(options types.ContainerListOptions) ([]*Container, error) {
containers, err := w.client.ContainerList(w.ctx, options)
if err != nil {
return nil, err
}
var result []*Container
for _, c := range containers {
var ipaddresses []string
for _, net := range c.NetworkSettings.Networks {
if net.IPAddress != "" {
ipaddresses = append(ipaddresses, net.IPAddress)
}
}
// If there are no network interfaces, assume that the container is on host network
// Inspect the container directly and use the hostname as the IP address in order
if len(ipaddresses) == 0 {
info, err := w.client.ContainerInspect(w.ctx, c.ID)
if err == nil {
ipaddresses = append(ipaddresses, info.Config.Hostname)
} else {
logp.Warn("unable to inspect container %s due to error %v", c.ID, err)
}
}
result = append(result, &Container{
ID: c.ID,
Name: c.Names[0][1:], // Strip '/' from container names
Image: c.Image,
Labels: c.Labels,
Ports: c.Ports,
IPAddresses: ipaddresses,
})
}
return result, nil
}
// Clean up deleted containers after they are not used anymore
func (w *watcher) cleanupWorker() {
for {
// Wait a full period
time.Sleep(w.cleanupTimeout)
select {
case <-w.ctx.Done():
w.stopped.Done()
return
default:
// Check entries for timeout
var toDelete []string
timeout := time.Now().Add(-w.cleanupTimeout)
w.RLock()
for key, lastSeen := range w.deleted {
if lastSeen.Before(timeout) {
logp.Debug("docker", "Removing container %s after cool down timeout", key)
toDelete = append(toDelete, key)
}
}
w.RUnlock()
// Delete timed out entries:
for _, key := range toDelete {
container := w.Container(key)
if container != nil {
w.bus.Publish(bus.Event{
"delete": true,
"container": container,
})
}
}
w.Lock()
for _, key := range toDelete {
delete(w.deleted, key)
delete(w.containers, key)
if w.shortID {
delete(w.containers, key[:shortIDLen])
}
}
w.Unlock()
}
}
}
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
func (w *watcher) ListenStart() bus.Listener {
return w.bus.Subscribe("start")
}
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
func (w *watcher) ListenStop() bus.Listener | {
return w.bus.Subscribe("stop")
} | identifier_body |
|
watcher.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package docker
import (
"fmt"
"net/http"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/go-connections/tlsconfig"
"golang.org/x/net/context"
"github.com/elastic/beats/libbeat/common/bus"
"github.com/elastic/beats/libbeat/logp"
)
// Select Docker API version
const (
shortIDLen = 12
)
// Watcher reads docker events and keeps a list of known containers
type Watcher interface {
// Start watching docker API for new containers
Start() error
// Stop watching docker API for new containers
Stop()
// Container returns the running container with the given ID or nil if unknown
Container(ID string) *Container
// Containers returns the list of known containers
Containers() map[string]*Container
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
ListenStart() bus.Listener
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
ListenStop() bus.Listener
}
// TLSConfig for docker socket connection
type TLSConfig struct {
CA string `config:"certificate_authority"`
Certificate string `config:"certificate"`
Key string `config:"key"`
}
type watcher struct {
sync.RWMutex
client Client
ctx context.Context
stop context.CancelFunc
containers map[string]*Container
deleted map[string]time.Time // deleted annotations key -> last access time
cleanupTimeout time.Duration
lastValidTimestamp int64
stopped sync.WaitGroup
bus bus.Bus
shortID bool // whether to store short ID in "containers" too
}
// Container info retrieved by the watcher
type Container struct {
ID string
Name string
Image string
Labels map[string]string
IPAddresses []string
Ports []types.Port
}
// Client for docker interface
type Client interface {
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
}
// WatcherConstructor represent a function that creates a new Watcher from giving parameters
type WatcherConstructor func(host string, tls *TLSConfig, storeShortID bool) (Watcher, error)
// NewWatcher returns a watcher running for the given settings
func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) {
var httpClient *http.Client
if tls != nil {
options := tlsconfig.Options{
CAFile: tls.CA,
CertFile: tls.Certificate,
KeyFile: tls.Key,
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
}
}
client, err := NewClient(host, httpClient, nil)
if err != nil {
return nil, err
}
return NewWatcherWithClient(client, 60*time.Second, storeShortID)
}
// NewWatcherWithClient creates a new Watcher from a given Docker client
func NewWatcherWithClient(client Client, cleanupTimeout time.Duration, storeShortID bool) (Watcher, error) {
ctx, cancel := context.WithCancel(context.Background())
return &watcher{
client: client,
ctx: ctx,
stop: cancel,
containers: make(map[string]*Container),
deleted: make(map[string]time.Time),
cleanupTimeout: cleanupTimeout,
bus: bus.New("docker"),
shortID: storeShortID,
}, nil
}
// Container returns the running container with the given ID or nil if unknown
func (w *watcher) Container(ID string) *Container {
w.RLock()
container := w.containers[ID]
if container == nil {
w.RUnlock()
return nil
}
_, ok := w.deleted[container.ID]
w.RUnlock()
// Update last access time if it's deleted
if ok {
w.Lock()
w.deleted[container.ID] = time.Now()
w.Unlock()
}
return container
}
// Containers returns the list of known containers
func (w *watcher) Containers() map[string]*Container {
w.RLock()
defer w.RUnlock()
res := make(map[string]*Container)
for k, v := range w.containers |
return res
}
// Start watching docker API for new containers
func (w *watcher) Start() error {
// Do initial scan of existing containers
logp.Debug("docker", "Start docker containers scanner")
w.lastValidTimestamp = time.Now().Unix()
w.Lock()
defer w.Unlock()
containers, err := w.listContainers(types.ContainerListOptions{})
if err != nil {
return err
}
for _, c := range containers {
w.containers[c.ID] = c
if w.shortID {
w.containers[c.ID[:shortIDLen]] = c
}
}
// Emit all start events (avoid blocking if the bus get's blocked)
go func() {
for _, c := range containers {
w.bus.Publish(bus.Event{
"start": true,
"container": c,
})
}
}()
w.stopped.Add(2)
go w.watch()
go w.cleanupWorker()
return nil
}
func (w *watcher) Stop() {
w.stop()
}
func (w *watcher) watch() {
filter := filters.NewArgs()
filter.Add("type", "container")
options := types.EventsOptions{
Since: fmt.Sprintf("%d", w.lastValidTimestamp),
Filters: filter,
}
for {
events, errors := w.client.Events(w.ctx, options)
WATCH:
for {
select {
case event := <-events:
logp.Debug("docker", "Got a new docker event: %v", event)
w.lastValidTimestamp = event.Time
// Add / update
if event.Action == "start" || event.Action == "update" {
filter := filters.NewArgs()
filter.Add("id", event.Actor.ID)
containers, err := w.listContainers(types.ContainerListOptions{
Filters: filter,
})
if err != nil || len(containers) != 1 {
logp.Err("Error getting container info: %v", err)
continue
}
container := containers[0]
w.Lock()
w.containers[event.Actor.ID] = container
if w.shortID {
w.containers[event.Actor.ID[:shortIDLen]] = container
}
// un-delete if it's flagged (in case of update or recreation)
delete(w.deleted, event.Actor.ID)
w.Unlock()
w.bus.Publish(bus.Event{
"start": true,
"container": container,
})
}
// Delete
if event.Action == "die" {
container := w.Container(event.Actor.ID)
if container != nil {
w.bus.Publish(bus.Event{
"stop": true,
"container": container,
})
}
w.Lock()
w.deleted[event.Actor.ID] = time.Now()
w.Unlock()
}
case err := <-errors:
// Restart watch call
logp.Err("Error watching for docker events: %v", err)
time.Sleep(1 * time.Second)
break WATCH
case <-w.ctx.Done():
logp.Debug("docker", "Watcher stopped")
w.stopped.Done()
return
}
}
}
}
func (w *watcher) listContainers(options types.ContainerListOptions) ([]*Container, error) {
containers, err := w.client.ContainerList(w.ctx, options)
if err != nil {
return nil, err
}
var result []*Container
for _, c := range containers {
var ipaddresses []string
for _, net := range c.NetworkSettings.Networks {
if net.IPAddress != "" {
ipaddresses = append(ipaddresses, net.IPAddress)
}
}
// If there are no network interfaces, assume that the container is on host network
// Inspect the container directly and use the hostname as the IP address in order
if len(ipaddresses) == 0 {
info, err := w.client.ContainerInspect(w.ctx, c.ID)
if err == nil {
ipaddresses = append(ipaddresses, info.Config.Hostname)
} else {
logp.Warn("unable to inspect container %s due to error %v", c.ID, err)
}
}
result = append(result, &Container{
ID: c.ID,
Name: c.Names[0][1:], // Strip '/' from container names
Image: c.Image,
Labels: c.Labels,
Ports: c.Ports,
IPAddresses: ipaddresses,
})
}
return result, nil
}
// Clean up deleted containers after they are not used anymore
func (w *watcher) cleanupWorker() {
for {
// Wait a full period
time.Sleep(w.cleanupTimeout)
select {
case <-w.ctx.Done():
w.stopped.Done()
return
default:
// Check entries for timeout
var toDelete []string
timeout := time.Now().Add(-w.cleanupTimeout)
w.RLock()
for key, lastSeen := range w.deleted {
if lastSeen.Before(timeout) {
logp.Debug("docker", "Removing container %s after cool down timeout", key)
toDelete = append(toDelete, key)
}
}
w.RUnlock()
// Delete timed out entries:
for _, key := range toDelete {
container := w.Container(key)
if container != nil {
w.bus.Publish(bus.Event{
"delete": true,
"container": container,
})
}
}
w.Lock()
for _, key := range toDelete {
delete(w.deleted, key)
delete(w.containers, key)
if w.shortID {
delete(w.containers, key[:shortIDLen])
}
}
w.Unlock()
}
}
}
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
func (w *watcher) ListenStart() bus.Listener {
return w.bus.Subscribe("start")
}
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
func (w *watcher) ListenStop() bus.Listener {
return w.bus.Subscribe("stop")
}
| {
if !w.shortID || len(k) != shortIDLen {
res[k] = v
}
} | conditional_block |
watcher.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package docker
import (
"fmt"
"net/http"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/go-connections/tlsconfig"
"golang.org/x/net/context"
"github.com/elastic/beats/libbeat/common/bus"
"github.com/elastic/beats/libbeat/logp"
)
// Select Docker API version
const (
shortIDLen = 12
)
// Watcher reads docker events and keeps a list of known containers
type Watcher interface {
// Start watching docker API for new containers
Start() error
// Stop watching docker API for new containers
Stop()
// Container returns the running container with the given ID or nil if unknown
Container(ID string) *Container
// Containers returns the list of known containers
Containers() map[string]*Container
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
ListenStart() bus.Listener
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
ListenStop() bus.Listener
}
// TLSConfig for docker socket connection
type TLSConfig struct {
CA string `config:"certificate_authority"`
Certificate string `config:"certificate"`
Key string `config:"key"`
}
type watcher struct {
sync.RWMutex
client Client
ctx context.Context
stop context.CancelFunc
containers map[string]*Container
deleted map[string]time.Time // deleted annotations key -> last access time
cleanupTimeout time.Duration
lastValidTimestamp int64
stopped sync.WaitGroup
bus bus.Bus
shortID bool // whether to store short ID in "containers" too
}
// Container info retrieved by the watcher
type Container struct {
ID string
Name string
Image string
Labels map[string]string
IPAddresses []string
Ports []types.Port
}
// Client for docker interface
type Client interface {
ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
}
// WatcherConstructor represent a function that creates a new Watcher from giving parameters
type WatcherConstructor func(host string, tls *TLSConfig, storeShortID bool) (Watcher, error)
// NewWatcher returns a watcher running for the given settings
func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) {
var httpClient *http.Client
if tls != nil {
options := tlsconfig.Options{
CAFile: tls.CA,
CertFile: tls.Certificate,
KeyFile: tls.Key,
}
tlsc, err := tlsconfig.Client(options)
if err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsc,
},
}
}
client, err := NewClient(host, httpClient, nil)
if err != nil {
return nil, err
}
return NewWatcherWithClient(client, 60*time.Second, storeShortID)
}
// NewWatcherWithClient creates a new Watcher from a given Docker client
func NewWatcherWithClient(client Client, cleanupTimeout time.Duration, storeShortID bool) (Watcher, error) {
ctx, cancel := context.WithCancel(context.Background())
return &watcher{
client: client,
ctx: ctx,
stop: cancel,
containers: make(map[string]*Container),
deleted: make(map[string]time.Time),
cleanupTimeout: cleanupTimeout,
bus: bus.New("docker"),
shortID: storeShortID,
}, nil
}
// Container returns the running container with the given ID or nil if unknown
func (w *watcher) Container(ID string) *Container {
w.RLock()
container := w.containers[ID]
if container == nil {
w.RUnlock()
return nil
}
_, ok := w.deleted[container.ID]
w.RUnlock()
// Update last access time if it's deleted
if ok {
w.Lock()
w.deleted[container.ID] = time.Now()
w.Unlock()
}
return container
}
// Containers returns the list of known containers
func (w *watcher) | () map[string]*Container {
w.RLock()
defer w.RUnlock()
res := make(map[string]*Container)
for k, v := range w.containers {
if !w.shortID || len(k) != shortIDLen {
res[k] = v
}
}
return res
}
// Start watching docker API for new containers
func (w *watcher) Start() error {
// Do initial scan of existing containers
logp.Debug("docker", "Start docker containers scanner")
w.lastValidTimestamp = time.Now().Unix()
w.Lock()
defer w.Unlock()
containers, err := w.listContainers(types.ContainerListOptions{})
if err != nil {
return err
}
for _, c := range containers {
w.containers[c.ID] = c
if w.shortID {
w.containers[c.ID[:shortIDLen]] = c
}
}
// Emit all start events (avoid blocking if the bus get's blocked)
go func() {
for _, c := range containers {
w.bus.Publish(bus.Event{
"start": true,
"container": c,
})
}
}()
w.stopped.Add(2)
go w.watch()
go w.cleanupWorker()
return nil
}
func (w *watcher) Stop() {
w.stop()
}
func (w *watcher) watch() {
filter := filters.NewArgs()
filter.Add("type", "container")
options := types.EventsOptions{
Since: fmt.Sprintf("%d", w.lastValidTimestamp),
Filters: filter,
}
for {
events, errors := w.client.Events(w.ctx, options)
WATCH:
for {
select {
case event := <-events:
logp.Debug("docker", "Got a new docker event: %v", event)
w.lastValidTimestamp = event.Time
// Add / update
if event.Action == "start" || event.Action == "update" {
filter := filters.NewArgs()
filter.Add("id", event.Actor.ID)
containers, err := w.listContainers(types.ContainerListOptions{
Filters: filter,
})
if err != nil || len(containers) != 1 {
logp.Err("Error getting container info: %v", err)
continue
}
container := containers[0]
w.Lock()
w.containers[event.Actor.ID] = container
if w.shortID {
w.containers[event.Actor.ID[:shortIDLen]] = container
}
// un-delete if it's flagged (in case of update or recreation)
delete(w.deleted, event.Actor.ID)
w.Unlock()
w.bus.Publish(bus.Event{
"start": true,
"container": container,
})
}
// Delete
if event.Action == "die" {
container := w.Container(event.Actor.ID)
if container != nil {
w.bus.Publish(bus.Event{
"stop": true,
"container": container,
})
}
w.Lock()
w.deleted[event.Actor.ID] = time.Now()
w.Unlock()
}
case err := <-errors:
// Restart watch call
logp.Err("Error watching for docker events: %v", err)
time.Sleep(1 * time.Second)
break WATCH
case <-w.ctx.Done():
logp.Debug("docker", "Watcher stopped")
w.stopped.Done()
return
}
}
}
}
func (w *watcher) listContainers(options types.ContainerListOptions) ([]*Container, error) {
containers, err := w.client.ContainerList(w.ctx, options)
if err != nil {
return nil, err
}
var result []*Container
for _, c := range containers {
var ipaddresses []string
for _, net := range c.NetworkSettings.Networks {
if net.IPAddress != "" {
ipaddresses = append(ipaddresses, net.IPAddress)
}
}
// If there are no network interfaces, assume that the container is on host network
// Inspect the container directly and use the hostname as the IP address in order
if len(ipaddresses) == 0 {
info, err := w.client.ContainerInspect(w.ctx, c.ID)
if err == nil {
ipaddresses = append(ipaddresses, info.Config.Hostname)
} else {
logp.Warn("unable to inspect container %s due to error %v", c.ID, err)
}
}
result = append(result, &Container{
ID: c.ID,
Name: c.Names[0][1:], // Strip '/' from container names
Image: c.Image,
Labels: c.Labels,
Ports: c.Ports,
IPAddresses: ipaddresses,
})
}
return result, nil
}
// Clean up deleted containers after they are not used anymore
func (w *watcher) cleanupWorker() {
for {
// Wait a full period
time.Sleep(w.cleanupTimeout)
select {
case <-w.ctx.Done():
w.stopped.Done()
return
default:
// Check entries for timeout
var toDelete []string
timeout := time.Now().Add(-w.cleanupTimeout)
w.RLock()
for key, lastSeen := range w.deleted {
if lastSeen.Before(timeout) {
logp.Debug("docker", "Removing container %s after cool down timeout", key)
toDelete = append(toDelete, key)
}
}
w.RUnlock()
// Delete timed out entries:
for _, key := range toDelete {
container := w.Container(key)
if container != nil {
w.bus.Publish(bus.Event{
"delete": true,
"container": container,
})
}
}
w.Lock()
for _, key := range toDelete {
delete(w.deleted, key)
delete(w.containers, key)
if w.shortID {
delete(w.containers, key[:shortIDLen])
}
}
w.Unlock()
}
}
}
// ListenStart returns a bus listener to receive container started events, with a `container` key holding it
func (w *watcher) ListenStart() bus.Listener {
return w.bus.Subscribe("start")
}
// ListenStop returns a bus listener to receive container stopped events, with a `container` key holding it
func (w *watcher) ListenStop() bus.Listener {
return w.bus.Subscribe("stop")
}
| Containers | identifier_name |
catsass.py | #!/usr/bin/env python
"""Seriously the cats ass. Seriously.
CatsAss is the cats ass for replacing multiple prints in
simple debugging situations.
----

----
*Requires Python 3.6*
"""
import os.path
from collections import namedtuple
from itertools import groupby
from pprint import pformat, pprint
__author__ = "Duroktar"
__license__ = "MIT"
# == API ==
# === the_cats_ass() ===
def the_cats_ass():
"""This function is the_cats_ass. That's an over-statement.
This is an under-statement. See what I did there?
What _you_ can do here is save the sys.ENVIRONMENT
by reducing print-ed waste. Mew.
returns: probably what you want
"""
return __cat_whisperer()[Cat.ASS]
# === comb() ===
def comb(cat, *brush):
"""
Filter the results of poking the cat. Takes variable
names as strings for the args.
cat:
the_cats_ass() or similar
brush:
the variables you wish to see (as strings)
returns: hairballs
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k in brush})
# === avoid() ===
def avoid(cat, *undesirables):
"""
Omit any undesirable variables from the result.
cat:
the_cats_ass() or similar
undesirables:
variables you wish to have omitted (as strings)
returns: from whence it came
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k not in undesirables})
# === poke_the_cat() ===
def poke_the_cat(where, catnip=False):
|
# === schrodingers_cat() ===
def schrodingers_cat(peek=False):
"""
Peek in the box for a 50/50 shot of retrieving your
desired output, while the other half of the time the
cat is dead and the function returns nothing at all.
If you decide not to peek, the cat -being neither
dead nor alive- responds with random nonsense.
peek:
whether to peek in the box
returns: depends
"""
from random import choice, randint
if peek:
if randint(1, 10) % 2 == 0:
# RIP
return "Nothing at all"
else:
return poke_the_cat(Cat.LEGS, catnip=True)
else:
garbled_cries = "mew meow wokka beocat ekkie".split()
return choice(garbled_cries)
def calico_kitty():
"""I can haz colorz?"""
return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]
def tuxedo_cat():
"""Finer than a pheasant"""
return __cat_whisperer(colors=True, coat='tuxedo_colorz', logo_colorz='dark_logo_colorz')[Cat.ASS]
__LIBDIR__ = os.path.abspath(os.path.dirname(__file__))
# === PrettyKitty() ===
class PrettyKitty:
"""I can has repr?"""
def __init__(self, ctx, values, cat=None, logo=None,
marker='|/', logo_offset=6, template=None,
formatter=pformat, colors=False, coat=None,
logo_colorz=None, term_bg="dark", title=None,
title_start=(6, 45)):
# The callers name usually.
self.ctx = ctx
# The local variables data.
self.values = values
# The line where the talk bubble should
# start must end with the marker, or the
# data will be printed just below the logo,
# which may not be what you want.
self.marker = marker
# Other formatters can be swapped in, JSON
# -for example- should work, or yaml.
self.formatter = formatter
# Allows the logo to be offset to either side;
# positive numbers move it left and negative
# numbers move it right.
self.logo_offset = logo_offset
self.term_bg = term_bg
if title is None:
title = "Meowed with love by Duroktar, 2017"
self.title = title
self.title_location = title_start
# Colors ~*%~
self.colors = colors
self.coat = coat
self.logo_colorz = logo_colorz
# TODO Should be public
Template = namedtuple("Template", "view offset")
if template is None:
# Option 1
template = Template({
"Name": self.ctx,
"Vars": self.values}, 1)
# Option 2
template = Template({self.ctx: self.values}, 1)
self.template = template
if cat is None:
cat = open(os.path.join(__LIBDIR__, 'octocat'), 'r').readlines()
self.cat = cat
if logo is None:
logo = open(os.path.join(__LIBDIR__, 'logo'), 'r').readlines()
self.logo = logo
def __repr__(self):
return self.haz_format() + "\n\n"
def haz_format(self):
from shutil import get_terminal_size
def yield_pads(lines, l):
for line in lines:
line = line.rstrip("\n").rstrip()
length = len(line)
if length < l:
yield line + " " * (l - length - 1)
else:
yield line
def rfill_lines(filler, start=0, offset=0, column=None):
height = len(filler)
for i in range(height):
index = start + i
try:
line = cat[index]
except IndexError:
cat.append(f"{' ' * pivot}{filler[i]}")
else:
if column is not None:
new_l = f"{line[:-(term_width - column) - offset]}{filler[i]}"
else:
new_l = f"{line[:-(term_width - pivot) - offset]}{filler[i]}"
cat[index] = new_l
term_width, term_height = get_terminal_size((80, 30))
cat = list(yield_pads(self.cat, term_width))
pivot = max((len(l.encode('unicode-escape')) for l in self.cat))
logo_offset = self.logo_offset
logo_width = max((len(str(l)) for l in self.logo))
logo = list(yield_pads(self.logo, logo_width - 1))
logo_height = len(logo)
marker = self.marker
data_start_line = [i - 1 for i, v in enumerate(cat) if v.strip().endswith(marker)]
if not data_start_line:
data_start_line = [logo_height + 1]
data_start_line = data_start_line[0]
if logo_height > data_start_line:
data_start_line = logo_height + 1
title = [self.title]
title_start_line, title_start_column = self.title_location
data = self.formatter(self.template.view, width=(term_width - pivot))
if self.colors:
cat, logo, title, data = self.haz_colorz(cat, logo, title, data)
rfill_lines(logo, offset=logo_offset)
rfill_lines(title, start=title_start_line, column=title_start_column)
rfill_lines(data.splitlines(), start=data_start_line, offset=self.template.offset)
return "\n".join((l.rstrip() for l in cat))
def haz_colorz(self, cat, logo, title, data):
from catsass.colorz import kitty_colorz
color_stuffs = kitty_colorz()
if color_stuffs is None:
return cat, logo, title, data
def color_lines(lines, color_mapping, words=False):
if any([len(k) > 1 for k in color_mapping.keys()]):
words = True
search_lines = [groupby(line.split()) for line in lines]
else:
search_lines = [groupby(line) for line in lines]
rv = []
for groups in search_lines:
line = []
for item, group in groups:
color = color_mapping.get(item)
if color is None:
line.append("".join(group))
else:
line.append(color("".join(group)).color_str)
if words:
rv.append(" ".join(line))
else:
rv.append("".join(line))
return rv
highlight = color_stuffs.get('highlight')
# Customz
cat_colorz = color_stuffs.get(self.coat) or {}
logo_colorz = color_stuffs.get(self.logo_colorz) or {}
# All this will be customizable in the next release.
title_colors = color_stuffs.get('title_colorz')
python3_lexer = color_stuffs.get('Python3Lexer')
terminal_formatter = color_stuffs.get('TerminalFormatter')
# Slap-chop! Why? I dunno..
cat = color_lines(cat, cat_colorz)
logo = color_lines(logo, logo_colorz)
title = color_lines(title, title_colors)
if highlight is not None:
data = highlight(data, python3_lexer(stripnl=False), terminal_formatter(bg=self.term_bg))
return cat, logo, title, data
# === Cat ===
class Cat:
"""
Different places to poke the cat. If the wrong scope is
being printed then you probably just need to poke the cat
somewhere else.
"""
TAIL = 0
ASS = 1
LEGS = 2
# === __cat_whisperer() ===
def __cat_whisperer(**kwargs):
"""
The cat whisperer is usually very solitary and private.
Thus any attempts at invoking the cat whisperer directly
will be met with no resistance, because this is Python,
and honestly he could use the friends.
returns: whisperings of cats
"""
from inspect import currentframe
frames = []
frame = currentframe()
while frame is not None:
frame = frame.f_back
try:
c_frame = frame.f_locals.copy()
co_name = frame.f_code.co_name
except AttributeError:
break
else:
frames.append(
PrettyKitty(co_name, {k: v for k, v in c_frame.items()
if not any([k.startswith('_'), callable(v)])}, **kwargs))
return frames
| """
You really shouldn't be poking cats. But if you insist,
it is recommended to bring catnip as it's not unusual for
cats to attack dicks who poke them.
where:
I leave this as an exercise for the reader. But
a word of wisdom from my 1st grade teacher: never do
anything that you wouldn't want to be caught dead doing.
Sadly he went to jail not long after whispering those
words in my ear.
catnip:
catnip can grow in the wild in many places around
the world. If no catnip can readily be found in yours
or any of your neighbors yards then just pass True as the
argument.
returns: possibly what you want.
"""
if not catnip:
from random import randint
class BadCat(InterruptedError):
pass
if randint(1, 10) == 7:
mew = "You attempt to poke the cat but it attacks. " \
"Maybe if you gave it some catnip?"
raise BadCat(mew)
return __cat_whisperer()[where] | identifier_body |
catsass.py | #!/usr/bin/env python
"""Seriously the cats ass. Seriously.
CatsAss is the cats ass for replacing multiple prints in
simple debugging situations.
----

----
*Requires Python 3.6*
"""
import os.path
from collections import namedtuple
from itertools import groupby
from pprint import pformat, pprint
__author__ = "Duroktar"
__license__ = "MIT"
# == API ==
# === the_cats_ass() ===
def the_cats_ass():
"""This function is the_cats_ass. That's an over-statement.
This is an under-statement. See what I did there?
What _you_ can do here is save the sys.ENVIRONMENT
by reducing print-ed waste. Mew.
returns: probably what you want
"""
return __cat_whisperer()[Cat.ASS]
# === comb() ===
def comb(cat, *brush):
"""
Filter the results of poking the cat. Takes variable
names as strings for the args.
cat:
the_cats_ass() or similar
brush:
the variables you wish to see (as strings)
returns: hairballs
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k in brush})
# === avoid() ===
def avoid(cat, *undesirables):
"""
Omit any undesirable variables from the result.
cat:
the_cats_ass() or similar
undesirables:
variables you wish to have omitted (as strings)
returns: from whence it came
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k not in undesirables})
# === poke_the_cat() ===
def poke_the_cat(where, catnip=False):
"""
You really shouldn't be poking cats. But if you insist,
it is recommended to bring catnip as it's not unusual for
cats to attack dicks who poke them.
where:
I leave this as an exercise for the reader. But
a word of wisdom from my 1st grade teacher: never do
anything that you wouldn't want to be caught dead doing.
Sadly he went to jail not long after whispering those
words in my ear.
catnip:
catnip can grow in the wild in many places around
the world. If no catnip can readily be found in yours
or any of your neighbors yards then just pass True as the
argument.
returns: possibly what you want.
"""
if not catnip:
from random import randint
class BadCat(InterruptedError):
pass
if randint(1, 10) == 7:
mew = "You attempt to poke the cat but it attacks. " \
"Maybe if you gave it some catnip?"
raise BadCat(mew)
return __cat_whisperer()[where]
# === schrodingers_cat() ===
def schrodingers_cat(peek=False):
"""
Peek in the box for a 50/50 shot of retrieving your
desired output, while the other half of the time the
cat is dead and the function returns nothing at all.
If you decide not to peek, the cat -being neither
dead nor alive- responds with random nonsense.
peek:
whether to peek in the box
returns: depends
"""
from random import choice, randint
if peek:
if randint(1, 10) % 2 == 0:
# RIP
return "Nothing at all"
else:
return poke_the_cat(Cat.LEGS, catnip=True)
else:
garbled_cries = "mew meow wokka beocat ekkie".split()
return choice(garbled_cries)
def calico_kitty():
"""I can haz colorz?"""
return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]
def tuxedo_cat():
"""Finer than a pheasant"""
return __cat_whisperer(colors=True, coat='tuxedo_colorz', logo_colorz='dark_logo_colorz')[Cat.ASS]
__LIBDIR__ = os.path.abspath(os.path.dirname(__file__))
# === PrettyKitty() ===
class PrettyKitty:
"""I can has repr?"""
def __init__(self, ctx, values, cat=None, logo=None,
marker='|/', logo_offset=6, template=None,
formatter=pformat, colors=False, coat=None,
logo_colorz=None, term_bg="dark", title=None,
title_start=(6, 45)):
# The callers name usually.
self.ctx = ctx
# The local variables data.
self.values = values
# The line where the talk bubble should
# start must end with the marker, or the
# data will be printed just below the logo,
# which may not be what you want.
self.marker = marker
# Other formatters can be swapped in, JSON
# -for example- should work, or yaml.
self.formatter = formatter
# Allows the logo to be offset to either side;
# positive numbers move it left and negative
# numbers move it right.
self.logo_offset = logo_offset
self.term_bg = term_bg
if title is None:
title = "Meowed with love by Duroktar, 2017"
self.title = title
self.title_location = title_start
# Colors ~*%~
self.colors = colors
self.coat = coat | if template is None:
# Option 1
template = Template({
"Name": self.ctx,
"Vars": self.values}, 1)
# Option 2
template = Template({self.ctx: self.values}, 1)
self.template = template
if cat is None:
cat = open(os.path.join(__LIBDIR__, 'octocat'), 'r').readlines()
self.cat = cat
if logo is None:
logo = open(os.path.join(__LIBDIR__, 'logo'), 'r').readlines()
self.logo = logo
def __repr__(self):
return self.haz_format() + "\n\n"
def haz_format(self):
from shutil import get_terminal_size
def yield_pads(lines, l):
for line in lines:
line = line.rstrip("\n").rstrip()
length = len(line)
if length < l:
yield line + " " * (l - length - 1)
else:
yield line
def rfill_lines(filler, start=0, offset=0, column=None):
height = len(filler)
for i in range(height):
index = start + i
try:
line = cat[index]
except IndexError:
cat.append(f"{' ' * pivot}{filler[i]}")
else:
if column is not None:
new_l = f"{line[:-(term_width - column) - offset]}{filler[i]}"
else:
new_l = f"{line[:-(term_width - pivot) - offset]}{filler[i]}"
cat[index] = new_l
term_width, term_height = get_terminal_size((80, 30))
cat = list(yield_pads(self.cat, term_width))
pivot = max((len(l.encode('unicode-escape')) for l in self.cat))
logo_offset = self.logo_offset
logo_width = max((len(str(l)) for l in self.logo))
logo = list(yield_pads(self.logo, logo_width - 1))
logo_height = len(logo)
marker = self.marker
data_start_line = [i - 1 for i, v in enumerate(cat) if v.strip().endswith(marker)]
if not data_start_line:
data_start_line = [logo_height + 1]
data_start_line = data_start_line[0]
if logo_height > data_start_line:
data_start_line = logo_height + 1
title = [self.title]
title_start_line, title_start_column = self.title_location
data = self.formatter(self.template.view, width=(term_width - pivot))
if self.colors:
cat, logo, title, data = self.haz_colorz(cat, logo, title, data)
rfill_lines(logo, offset=logo_offset)
rfill_lines(title, start=title_start_line, column=title_start_column)
rfill_lines(data.splitlines(), start=data_start_line, offset=self.template.offset)
return "\n".join((l.rstrip() for l in cat))
def haz_colorz(self, cat, logo, title, data):
from catsass.colorz import kitty_colorz
color_stuffs = kitty_colorz()
if color_stuffs is None:
return cat, logo, title, data
def color_lines(lines, color_mapping, words=False):
if any([len(k) > 1 for k in color_mapping.keys()]):
words = True
search_lines = [groupby(line.split()) for line in lines]
else:
search_lines = [groupby(line) for line in lines]
rv = []
for groups in search_lines:
line = []
for item, group in groups:
color = color_mapping.get(item)
if color is None:
line.append("".join(group))
else:
line.append(color("".join(group)).color_str)
if words:
rv.append(" ".join(line))
else:
rv.append("".join(line))
return rv
highlight = color_stuffs.get('highlight')
# Customz
cat_colorz = color_stuffs.get(self.coat) or {}
logo_colorz = color_stuffs.get(self.logo_colorz) or {}
# All this will be customizable in the next release.
title_colors = color_stuffs.get('title_colorz')
python3_lexer = color_stuffs.get('Python3Lexer')
terminal_formatter = color_stuffs.get('TerminalFormatter')
# Slap-chop! Why? I dunno..
cat = color_lines(cat, cat_colorz)
logo = color_lines(logo, logo_colorz)
title = color_lines(title, title_colors)
if highlight is not None:
data = highlight(data, python3_lexer(stripnl=False), terminal_formatter(bg=self.term_bg))
return cat, logo, title, data
# === Cat ===
class Cat:
"""
Different places to poke the cat. If the wrong scope is
being printed then you probably just need to poke the cat
somewhere else.
"""
TAIL = 0
ASS = 1
LEGS = 2
# === __cat_whisperer() ===
def __cat_whisperer(**kwargs):
"""
The cat whisperer is usually very solitary and private.
Thus any attempts at invoking the cat whisperer directly
will be met with no resistance, because this is Python,
and honestly he could use the friends.
returns: whisperings of cats
"""
from inspect import currentframe
frames = []
frame = currentframe()
while frame is not None:
frame = frame.f_back
try:
c_frame = frame.f_locals.copy()
co_name = frame.f_code.co_name
except AttributeError:
break
else:
frames.append(
PrettyKitty(co_name, {k: v for k, v in c_frame.items()
if not any([k.startswith('_'), callable(v)])}, **kwargs))
return frames | self.logo_colorz = logo_colorz
# TODO Should be public
Template = namedtuple("Template", "view offset") | random_line_split |
catsass.py | #!/usr/bin/env python
"""Seriously the cats ass. Seriously.
CatsAss is the cats ass for replacing multiple prints in
simple debugging situations.
----

----
*Requires Python 3.6*
"""
import os.path
from collections import namedtuple
from itertools import groupby
from pprint import pformat, pprint
__author__ = "Duroktar"
__license__ = "MIT"
# == API ==
# === the_cats_ass() ===
def the_cats_ass():
"""This function is the_cats_ass. That's an over-statement.
This is an under-statement. See what I did there?
What _you_ can do here is save the sys.ENVIRONMENT
by reducing print-ed waste. Mew.
returns: probably what you want
"""
return __cat_whisperer()[Cat.ASS]
# === comb() ===
def comb(cat, *brush):
"""
Filter the results of poking the cat. Takes variable
names as strings for the args.
cat:
the_cats_ass() or similar
brush:
the variables you wish to see (as strings)
returns: hairballs
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k in brush})
# === avoid() ===
def avoid(cat, *undesirables):
"""
Omit any undesirable variables from the result.
cat:
the_cats_ass() or similar
undesirables:
variables you wish to have omitted (as strings)
returns: from whence it came
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k not in undesirables})
# === poke_the_cat() ===
def poke_the_cat(where, catnip=False):
"""
You really shouldn't be poking cats. But if you insist,
it is recommended to bring catnip as it's not unusual for
cats to attack dicks who poke them.
where:
I leave this as an exercise for the reader. But
a word of wisdom from my 1st grade teacher: never do
anything that you wouldn't want to be caught dead doing.
Sadly he went to jail not long after whispering those
words in my ear.
catnip:
catnip can grow in the wild in many places around
the world. If no catnip can readily be found in yours
or any of your neighbors yards then just pass True as the
argument.
returns: possibly what you want.
"""
if not catnip:
from random import randint
class | (InterruptedError):
pass
if randint(1, 10) == 7:
mew = "You attempt to poke the cat but it attacks. " \
"Maybe if you gave it some catnip?"
raise BadCat(mew)
return __cat_whisperer()[where]
# === schrodingers_cat() ===
def schrodingers_cat(peek=False):
"""
Peek in the box for a 50/50 shot of retrieving your
desired output, while the other half of the time the
cat is dead and the function returns nothing at all.
If you decide not to peek, the cat -being neither
dead nor alive- responds with random nonsense.
peek:
whether to peek in the box
returns: depends
"""
from random import choice, randint
if peek:
if randint(1, 10) % 2 == 0:
# RIP
return "Nothing at all"
else:
return poke_the_cat(Cat.LEGS, catnip=True)
else:
garbled_cries = "mew meow wokka beocat ekkie".split()
return choice(garbled_cries)
def calico_kitty():
"""I can haz colorz?"""
return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]
def tuxedo_cat():
"""Finer than a pheasant"""
return __cat_whisperer(colors=True, coat='tuxedo_colorz', logo_colorz='dark_logo_colorz')[Cat.ASS]
__LIBDIR__ = os.path.abspath(os.path.dirname(__file__))
# === PrettyKitty() ===
class PrettyKitty:
"""I can has repr?"""
def __init__(self, ctx, values, cat=None, logo=None,
marker='|/', logo_offset=6, template=None,
formatter=pformat, colors=False, coat=None,
logo_colorz=None, term_bg="dark", title=None,
title_start=(6, 45)):
# The callers name usually.
self.ctx = ctx
# The local variables data.
self.values = values
# The line where the talk bubble should
# start must end with the marker, or the
# data will be printed just below the logo,
# which may not be what you want.
self.marker = marker
# Other formatters can be swapped in, JSON
# -for example- should work, or yaml.
self.formatter = formatter
# Allows the logo to be offset to either side;
# positive numbers move it left and negative
# numbers move it right.
self.logo_offset = logo_offset
self.term_bg = term_bg
if title is None:
title = "Meowed with love by Duroktar, 2017"
self.title = title
self.title_location = title_start
# Colors ~*%~
self.colors = colors
self.coat = coat
self.logo_colorz = logo_colorz
# TODO Should be public
Template = namedtuple("Template", "view offset")
if template is None:
# Option 1
template = Template({
"Name": self.ctx,
"Vars": self.values}, 1)
# Option 2
template = Template({self.ctx: self.values}, 1)
self.template = template
if cat is None:
cat = open(os.path.join(__LIBDIR__, 'octocat'), 'r').readlines()
self.cat = cat
if logo is None:
logo = open(os.path.join(__LIBDIR__, 'logo'), 'r').readlines()
self.logo = logo
def __repr__(self):
return self.haz_format() + "\n\n"
def haz_format(self):
from shutil import get_terminal_size
def yield_pads(lines, l):
for line in lines:
line = line.rstrip("\n").rstrip()
length = len(line)
if length < l:
yield line + " " * (l - length - 1)
else:
yield line
def rfill_lines(filler, start=0, offset=0, column=None):
height = len(filler)
for i in range(height):
index = start + i
try:
line = cat[index]
except IndexError:
cat.append(f"{' ' * pivot}{filler[i]}")
else:
if column is not None:
new_l = f"{line[:-(term_width - column) - offset]}{filler[i]}"
else:
new_l = f"{line[:-(term_width - pivot) - offset]}{filler[i]}"
cat[index] = new_l
term_width, term_height = get_terminal_size((80, 30))
cat = list(yield_pads(self.cat, term_width))
pivot = max((len(l.encode('unicode-escape')) for l in self.cat))
logo_offset = self.logo_offset
logo_width = max((len(str(l)) for l in self.logo))
logo = list(yield_pads(self.logo, logo_width - 1))
logo_height = len(logo)
marker = self.marker
data_start_line = [i - 1 for i, v in enumerate(cat) if v.strip().endswith(marker)]
if not data_start_line:
data_start_line = [logo_height + 1]
data_start_line = data_start_line[0]
if logo_height > data_start_line:
data_start_line = logo_height + 1
title = [self.title]
title_start_line, title_start_column = self.title_location
data = self.formatter(self.template.view, width=(term_width - pivot))
if self.colors:
cat, logo, title, data = self.haz_colorz(cat, logo, title, data)
rfill_lines(logo, offset=logo_offset)
rfill_lines(title, start=title_start_line, column=title_start_column)
rfill_lines(data.splitlines(), start=data_start_line, offset=self.template.offset)
return "\n".join((l.rstrip() for l in cat))
def haz_colorz(self, cat, logo, title, data):
from catsass.colorz import kitty_colorz
color_stuffs = kitty_colorz()
if color_stuffs is None:
return cat, logo, title, data
def color_lines(lines, color_mapping, words=False):
if any([len(k) > 1 for k in color_mapping.keys()]):
words = True
search_lines = [groupby(line.split()) for line in lines]
else:
search_lines = [groupby(line) for line in lines]
rv = []
for groups in search_lines:
line = []
for item, group in groups:
color = color_mapping.get(item)
if color is None:
line.append("".join(group))
else:
line.append(color("".join(group)).color_str)
if words:
rv.append(" ".join(line))
else:
rv.append("".join(line))
return rv
highlight = color_stuffs.get('highlight')
# Customz
cat_colorz = color_stuffs.get(self.coat) or {}
logo_colorz = color_stuffs.get(self.logo_colorz) or {}
# All this will be customizable in the next release.
title_colors = color_stuffs.get('title_colorz')
python3_lexer = color_stuffs.get('Python3Lexer')
terminal_formatter = color_stuffs.get('TerminalFormatter')
# Slap-chop! Why? I dunno..
cat = color_lines(cat, cat_colorz)
logo = color_lines(logo, logo_colorz)
title = color_lines(title, title_colors)
if highlight is not None:
data = highlight(data, python3_lexer(stripnl=False), terminal_formatter(bg=self.term_bg))
return cat, logo, title, data
# === Cat ===
class Cat:
"""
Different places to poke the cat. If the wrong scope is
being printed then you probably just need to poke the cat
somewhere else.
"""
TAIL = 0
ASS = 1
LEGS = 2
# === __cat_whisperer() ===
def __cat_whisperer(**kwargs):
"""
The cat whisperer is usually very solitary and private.
Thus any attempts at invoking the cat whisperer directly
will be met with no resistance, because this is Python,
and honestly he could use the friends.
returns: whisperings of cats
"""
from inspect import currentframe
frames = []
frame = currentframe()
while frame is not None:
frame = frame.f_back
try:
c_frame = frame.f_locals.copy()
co_name = frame.f_code.co_name
except AttributeError:
break
else:
frames.append(
PrettyKitty(co_name, {k: v for k, v in c_frame.items()
if not any([k.startswith('_'), callable(v)])}, **kwargs))
return frames
| BadCat | identifier_name |
catsass.py | #!/usr/bin/env python
"""Seriously the cats ass. Seriously.
CatsAss is the cats ass for replacing multiple prints in
simple debugging situations.
----

----
*Requires Python 3.6*
"""
import os.path
from collections import namedtuple
from itertools import groupby
from pprint import pformat, pprint
__author__ = "Duroktar"
__license__ = "MIT"
# == API ==
# === the_cats_ass() ===
def the_cats_ass():
"""This function is the_cats_ass. That's an over-statement.
This is an under-statement. See what I did there?
What _you_ can do here is save the sys.ENVIRONMENT
by reducing print-ed waste. Mew.
returns: probably what you want
"""
return __cat_whisperer()[Cat.ASS]
# === comb() ===
def comb(cat, *brush):
"""
Filter the results of poking the cat. Takes variable
names as strings for the args.
cat:
the_cats_ass() or similar
brush:
the variables you wish to see (as strings)
returns: hairballs
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k in brush})
# === avoid() ===
def avoid(cat, *undesirables):
"""
Omit any undesirable variables from the result.
cat:
the_cats_ass() or similar
undesirables:
variables you wish to have omitted (as strings)
returns: from whence it came
"""
return PrettyKitty(cat.ctx, {k: v for k, v in cat.values.items()
if k not in undesirables})
# === poke_the_cat() ===
def poke_the_cat(where, catnip=False):
"""
You really shouldn't be poking cats. But if you insist,
it is recommended to bring catnip as it's not unusual for
cats to attack dicks who poke them.
where:
I leave this as an exercise for the reader. But
a word of wisdom from my 1st grade teacher: never do
anything that you wouldn't want to be caught dead doing.
Sadly he went to jail not long after whispering those
words in my ear.
catnip:
catnip can grow in the wild in many places around
the world. If no catnip can readily be found in yours
or any of your neighbors yards then just pass True as the
argument.
returns: possibly what you want.
"""
if not catnip:
from random import randint
class BadCat(InterruptedError):
pass
if randint(1, 10) == 7:
mew = "You attempt to poke the cat but it attacks. " \
"Maybe if you gave it some catnip?"
raise BadCat(mew)
return __cat_whisperer()[where]
# === schrodingers_cat() ===
def schrodingers_cat(peek=False):
"""
Peek in the box for a 50/50 shot of retrieving your
desired output, while the other half of the time the
cat is dead and the function returns nothing at all.
If you decide not to peek, the cat -being neither
dead nor alive- responds with random nonsense.
peek:
whether to peek in the box
returns: depends
"""
from random import choice, randint
if peek:
if randint(1, 10) % 2 == 0:
# RIP
return "Nothing at all"
else:
return poke_the_cat(Cat.LEGS, catnip=True)
else:
garbled_cries = "mew meow wokka beocat ekkie".split()
return choice(garbled_cries)
def calico_kitty():
"""I can haz colorz?"""
return __cat_whisperer(colors=True, coat='calico_colorz', logo_colorz='logo_colorz')[Cat.ASS]
def tuxedo_cat():
"""Finer than a pheasant"""
return __cat_whisperer(colors=True, coat='tuxedo_colorz', logo_colorz='dark_logo_colorz')[Cat.ASS]
__LIBDIR__ = os.path.abspath(os.path.dirname(__file__))
# === PrettyKitty() ===
class PrettyKitty:
"""I can has repr?"""
def __init__(self, ctx, values, cat=None, logo=None,
marker='|/', logo_offset=6, template=None,
formatter=pformat, colors=False, coat=None,
logo_colorz=None, term_bg="dark", title=None,
title_start=(6, 45)):
# The callers name usually.
self.ctx = ctx
# The local variables data.
self.values = values
# The line where the talk bubble should
# start must end with the marker, or the
# data will be printed just below the logo,
# which may not be what you want.
self.marker = marker
# Other formatters can be swapped in, JSON
# -for example- should work, or yaml.
self.formatter = formatter
# Allows the logo to be offset to either side;
# positive numbers move it left and negative
# numbers move it right.
self.logo_offset = logo_offset
self.term_bg = term_bg
if title is None:
title = "Meowed with love by Duroktar, 2017"
self.title = title
self.title_location = title_start
# Colors ~*%~
self.colors = colors
self.coat = coat
self.logo_colorz = logo_colorz
# TODO Should be public
Template = namedtuple("Template", "view offset")
if template is None:
# Option 1
template = Template({
"Name": self.ctx,
"Vars": self.values}, 1)
# Option 2
template = Template({self.ctx: self.values}, 1)
self.template = template
if cat is None:
cat = open(os.path.join(__LIBDIR__, 'octocat'), 'r').readlines()
self.cat = cat
if logo is None:
logo = open(os.path.join(__LIBDIR__, 'logo'), 'r').readlines()
self.logo = logo
def __repr__(self):
return self.haz_format() + "\n\n"
def haz_format(self):
from shutil import get_terminal_size
def yield_pads(lines, l):
for line in lines:
line = line.rstrip("\n").rstrip()
length = len(line)
if length < l:
yield line + " " * (l - length - 1)
else:
yield line
def rfill_lines(filler, start=0, offset=0, column=None):
height = len(filler)
for i in range(height):
index = start + i
try:
line = cat[index]
except IndexError:
cat.append(f"{' ' * pivot}{filler[i]}")
else:
if column is not None:
new_l = f"{line[:-(term_width - column) - offset]}{filler[i]}"
else:
new_l = f"{line[:-(term_width - pivot) - offset]}{filler[i]}"
cat[index] = new_l
term_width, term_height = get_terminal_size((80, 30))
cat = list(yield_pads(self.cat, term_width))
pivot = max((len(l.encode('unicode-escape')) for l in self.cat))
logo_offset = self.logo_offset
logo_width = max((len(str(l)) for l in self.logo))
logo = list(yield_pads(self.logo, logo_width - 1))
logo_height = len(logo)
marker = self.marker
data_start_line = [i - 1 for i, v in enumerate(cat) if v.strip().endswith(marker)]
if not data_start_line:
data_start_line = [logo_height + 1]
data_start_line = data_start_line[0]
if logo_height > data_start_line:
data_start_line = logo_height + 1
title = [self.title]
title_start_line, title_start_column = self.title_location
data = self.formatter(self.template.view, width=(term_width - pivot))
if self.colors:
cat, logo, title, data = self.haz_colorz(cat, logo, title, data)
rfill_lines(logo, offset=logo_offset)
rfill_lines(title, start=title_start_line, column=title_start_column)
rfill_lines(data.splitlines(), start=data_start_line, offset=self.template.offset)
return "\n".join((l.rstrip() for l in cat))
def haz_colorz(self, cat, logo, title, data):
from catsass.colorz import kitty_colorz
color_stuffs = kitty_colorz()
if color_stuffs is None:
return cat, logo, title, data
def color_lines(lines, color_mapping, words=False):
if any([len(k) > 1 for k in color_mapping.keys()]):
words = True
search_lines = [groupby(line.split()) for line in lines]
else:
search_lines = [groupby(line) for line in lines]
rv = []
for groups in search_lines:
line = []
for item, group in groups:
color = color_mapping.get(item)
if color is None:
line.append("".join(group))
else:
line.append(color("".join(group)).color_str)
if words:
rv.append(" ".join(line))
else:
|
return rv
highlight = color_stuffs.get('highlight')
# Customz
cat_colorz = color_stuffs.get(self.coat) or {}
logo_colorz = color_stuffs.get(self.logo_colorz) or {}
# All this will be customizable in the next release.
title_colors = color_stuffs.get('title_colorz')
python3_lexer = color_stuffs.get('Python3Lexer')
terminal_formatter = color_stuffs.get('TerminalFormatter')
# Slap-chop! Why? I dunno..
cat = color_lines(cat, cat_colorz)
logo = color_lines(logo, logo_colorz)
title = color_lines(title, title_colors)
if highlight is not None:
data = highlight(data, python3_lexer(stripnl=False), terminal_formatter(bg=self.term_bg))
return cat, logo, title, data
# === Cat ===
class Cat:
"""
Different places to poke the cat. If the wrong scope is
being printed then you probably just need to poke the cat
somewhere else.
"""
TAIL = 0
ASS = 1
LEGS = 2
# === __cat_whisperer() ===
def __cat_whisperer(**kwargs):
"""
The cat whisperer is usually very solitary and private.
Thus any attempts at invoking the cat whisperer directly
will be met with no resistance, because this is Python,
and honestly he could use the friends.
returns: whisperings of cats
"""
from inspect import currentframe
frames = []
frame = currentframe()
while frame is not None:
frame = frame.f_back
try:
c_frame = frame.f_locals.copy()
co_name = frame.f_code.co_name
except AttributeError:
break
else:
frames.append(
PrettyKitty(co_name, {k: v for k, v in c_frame.items()
if not any([k.startswith('_'), callable(v)])}, **kwargs))
return frames
| rv.append("".join(line)) | conditional_block |
Modules.py | import requests
import re
from bs4 import BeautifulSoup
import json
import psycopg2
from datetime import datetime
import subprocess
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from shutil import copyfileobj
import time
import xml.etree.cElementTree as etree
import ResolveRouter
class Transcribe:
|
class ParseText:
"""
This class handles parsing of two entities:
\n\tText files containing one instance of a transcribed podcast or...
\n\tnohup files containing multiple instances of a transcribed podcast
"""
def nohupTranscriptionContent(filePath):
"""
This parses the content of nohup. The size of nohup is basically unlimited but
each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- a list of all the occurences of realTimeFactor\n
index 1 -- a list of all the occurences of transcriptions\n
index 2 -- a list of all the occurences of the transcription ID\n
index 3 -- a list of all the occurences of the total transcription time.\n\n
\n\n
\-\---Example usage----\n
parsedContent = nohupTranscriptionContent("ok.txt")
for i in range(len(parsedContent[0])):
print(parsedContent[0][i]) # realtimefactor
print(parsedContent[1][i]) # transcription
print(parsedContent[2][i]) # transcription ID
print(parsedContent[3][i]) # transcription time
"""
try:
continu = True
fileContent = ""
f = open(filePath, 'r')
while (continu):
temp = f.readline(900000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id(.*?) (.*?)\n', fileContent)
transcriptionList = []
transcriptionIDList = []
for item in transcription:
if(len(item[1]) > 1000):
transcriptionIDList.append(item[0])
transcriptionList.append(item[1])
results.append(transcriptionList)
results.append(transcriptionIDList)
transcriptionTime = re.findall(r'seconds / (.*?) seconds\.', fileContent)
results.append(transcriptionTime)
return results
except Exception as e:
Tools.writeException("nohupTranscriptionContent", e)
return False
def fileTranscriptionContent(filePath):
"""
This parses the content of the transcription file. The size of the file can basically be unlimited
but each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- url\n
index 1 -- realTimeFactor\n
index 2 -- transcription\n
"""
try:
continu = True
f = open(filePath, 'r')
fileContent = ""
while (continu):
temp = f.readline(300000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
f.close()
url = re.findall(r'URL:(.*?)\n', fileContent)
results.append(url)
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id1 (.*?)\n', fileContent)
for item in transcription:
if(len(item) > 500):
results.append(item.replace("'", "''"))
if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):
return results
else:
Tools.writeException("fileTranscriptionContent", "ERROR attempted to parse " + filePath + " but got " + str(results))
return False
except Exception as e:
Tools.writeException("fileTranscriptionContent", e)
class Tools:
"""
Random functions
"""
def cleanupFolder(folderName):
"""
deletes all contents of the specified folder (but not the folder itself).\n
returns true if successful. False if an error was thrown or the number of running
processes is not = 0
"""
try:
if(Tools.numRunningProcesses() == 0):
process = subprocess.call('rm -r ./' + folderName + '/*', shell=True)
return True
else:
return False
except Exception as e:
Tools.writeException("cleanupFolder", e)
return False
def numRunningProcesses():
"""
gets the number of runnning transcription processes
"""
try:
proc = subprocess.run("ps -Af|grep -i \"online2-wav-nnet3-latgen-faster\"", stdout=subprocess.PIPE, shell=True)
np = (len(str(proc.stdout).split("\\n")) - 3)
if(np == None):
np = 0
return np
except Exception as e:
Tools.writeException("numRunningProcesses", e)
return -1
def writeException(className, exceptionString):
"""
Writes Exception given the string format of the class name and the 'e' in any
Exception as e premise
"""
errorFile = open("error.log", 'a')
errorFile.write("ERROR occured in " + className + " at " + str(datetime.now()) + " with the following message\n" + str(exceptionString) + "\n\n")
errorFile.close()
def getFirstFile(folderName):
"""
Returns with the filename of the first file in the given directory. Just provide the directory's name
with no leading './'
"""
listFiles = subprocess.run("ls ./" + folderName, shell=True, stdout=subprocess.PIPE)
fileName = re.search(r"b'(.*?)\\n", str(listFiles.stdout))[1]
if(len(fileName) > 0):
return fileName
else:
return False
def transcribeAll(service, url, fileName):
"""
Does everything you need to transcribe a podcast given the filename\n
Download podcast, wait 40 seconds, change podcast to .wav, wait 10 seconds,
remove the .mp3 file, run the transcription
"""
if(service == "omny.fm"):
url = url.replace(".mp3","") + ".mp3"
subprocess.Popen("wget -c -O ./podcasts/" + fileName + ".mp3 " + url + " && sleep 40 && ffmpeg -i ./podcasts/"
+ fileName + ".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/" + fileName + ".wav && sleep 10 && rm ./podcasts/"
+ fileName + ".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false "
+ "--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 "
+ "--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id" + fileName
+ " utterance-id" + fileName + "|' 'scp:echo utterance-id" + fileName + " ./podcasts/" + fileName + ".wav|' 'ark:/dev/null' &", shell=True)
class DatabaseInteract:
"""
This is where the database is updated. Refer to the example clips/header for format information.\n\n
Seeding the database would include the usage of 'insertHeader' then 'insertClips'. Pretty much every
function in here will require a dbConnection argument
"""
def uploadPodcast(dbConnection, homepage, name, description, category, source, imageurl, web, twitter, facebook, rss):
"""
HomePage --> the homepage of the podcast (NOT NULL)\n
Name --> The name of the podcast (NOT NULL)\n
Description --> a short description of the podcast\n
Category --> The category of the podcast\n
Source --> The service of which the podcast is being accessed through\n
ImageURI --> Podcast cover art\n
Web --> The website of the podcaster\n
Twitter --> The twitter account of the podcaster\n
Facebook --> the facebook account of the podcaster\n
LastUpdated --> the date that this was last updated.\n
RSS --> The URL of the podcasts RSS feed\n
If you dont have values for a certain field just pass it in as an empty string
"""
try:
cursor = dbConnection.cursor()
name = name.replace("'", "''")
description = description.replace("'", "''")
cursor.execute("""INSERT INTO podcasts(homepage, name, description, category, source, imageuri, web, twitter, Facebook, rss) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""", (homepage, name, description, category, source, imageurl, web, twitter, facebook, rss))
dbConnection.commit()
cursor.close()
return True
except Exception as e:
Tools.writeException("insertHeader", "e")
return False
def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title):
"""
audiourl --> url of the transcriptions mp3 is stored here (NOT NULL)\n
PodcastName --> THe name of the show (references podcast(name))\n
Description --> The provided summary of that days podcast\n
Date --> The date that podcast aired (parsed to mm-dd-yyyy\n
Title --> The title of that specific podcast\n
Duration --> the running time of that podcast (use strptime to parse, need mm-dd-yyyy\n
pending --> right now will be false because were not transcribing\n
(dateTranscribed) --> date of transcription (updated later)\n
"""
try:
cursor = dbConnection.cursor()
title = title.replace("'", "''")
cursor.execute("INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('" + audiourl + "', NULL, '" + podcastName + "', NULL, '" + description + "', '" + parsedDate + "', '" + title + "', FALSE, NULL);")
dbConnection.commit()
cursor.close()
return True
except:
return False
return False
def insertTranscription(dbConnection, realtimefactor, transcription, duration, dbID):
"""
This basically uploads the arguents to the database, returning false and throwing an
error if unsuccesful (or true otherwise)\n
"""
try:
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET realtimefactor = '" + realtimefactor + "', transcription = '" + transcription + "', datetranscribed = now(), duration = '" + duration + "' WHERE id = '" + str(dbID) + "';")
dbConnection.commit()
cursor.close()
return True
except Exception as e:
Tools.writeException("uploadTranscriptionData", e)
return False
def checkPre(dbConnection):
"""
checks the database for empty transcription entries, returns a list with \n\n
index 0 -- audiourl\n
index 1 -- id\n
index 2 -- podcast name\n
index 3 -- service of podcast
"""
cursor = dbConnection.cursor()
cursor.execute("SELECT audiourl, T.id, podcastName, source FROM transcriptions AS T JOIN podcasts as P ON P.name = T.podcastname WHERE COALESCE(T.transcription, '') = '' AND pending = FALSE LIMIT 1;")
entry = cursor.fetchone()
cursor.close()
return entry
def refreshDatabase(dbConnection):
"""
This is to be used when both the podcasts folder and transcripts folder are empty.\n
For every entry in the database that has an empty transcript and a pending flag set to true, change
the pending flag to false.
Honestly this is used to deal with a weird bug and should be run every now and then
"""
try:
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = FALSE WHERE COALESCE(transcription, '') = '';")
dbConnection.commit()
cursor.close()
except Exception as e:
Tools.writeException("refreshDatabase", e)
def checkIfExists(dbconnection, title):
"""
given title, if the podcast is in the database already return true. False if
the podcast does not exist in the database
"""
cursor = dbconnection.cursor()
output = ""
title = title.replace("'", "''")
try:
cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';")
dbconnection.commit()
output = cursor.fetchone()
cursor.close()
if(output is None):
return False
else:
return True
except:
dbconnection.rollback()
cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';")
dbconnection.commit()
output = cursor.fetchone()
cursor.close()
if(output is None):
return False
else:
return True
def podcastInitRSS(conn, url):
"""
Gets the following podcast details:
name, homepage, description, category, source, web, twitter, facebook, rss \n
If all match it uploads it to the database
"""
try:
headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}
req = requests.get(url, headers=headers)
root = etree.fromstring(req.text)
resArray = []
homepage = root[0].find("link").text
name = root[0].find("title").text
description = ""
try:
description = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}summary").text
except:
pass
try:
description = root[0].find("description").text
except:
pass
category = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}category").attrib["text"]
image = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}image").attrib["href"]
if(len(name) > 0 and len(description) > 0 and len(category) > 0 and len(image) > 0 and len(homepage) > 0):
DatabaseInteract.uploadPodcast(conn, homepage, name, description, category, "", image, "", "", "", url)
except Exception as e:
Tools.writeException("podcastInitRSS", e + ".\n issue with url " + url)
def rssCheck(podcastName, source, url):
"""
Checks the rss urls in the database and returns an array of each of the important fields
"""
try:
headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}
req = requests.get(url, headers=headers)
root = etree.fromstring(req.text)
rssArray = []
for element in root[0].iter('item'):
try:
title = element.find("title").text.replace("''", "'")
description = element.find("description").text.replace("<strong>", "").replace("</strong>", "").replace("&", "and").replace("'","''")
date = element.find("pubDate").text
date = date.split(" ")
date = datetime.strptime(date[1] + date[2] + date[3], "%d%b%Y")
dateString = str(date.month) + "-" + str(date.day) + "-" + str(date.year)
url = ResolveRouter.urlRouter(podcastName, source, element)
except:
print("error in XMLDetailsDebug parsing issue")
if(len(title) > 0 and len(description) > 0 and len(dateString) > 0 and len(url) > 0):
rssArray.append([title, dateString, url, description])
else:
print("error in XMLDetailsDebug parsing issue")
return rssArray
except Exception as e:
print(e)
Tools.writeException("getXMLDetailsDebug", e)
| def runAutoCheck(dbConnection, maxConcurrent):
"""
runs an automatic check to see if any transcriptions need to be started or are already finished
and need to be reuploded\n\n
Needs dbConnection & an integer representing the max concurrent transcriptons that can be ran at a time\n\n
This is a function that you dont want to parse and upload files from the 'transcripts' folder into.
because you really dont know which files are in progress or not whatever. ill fix later .
"""
# checks if any shows are pending.
fileContent = DatabaseInteract.checkPre(dbConnection)
if(len(fileContent) > 0 and Tools.numRunningProcesses() < maxConcurrent):
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = TRUE WHERE id = '" + str(fileContent[1]) + "';")
dbConnection.commit()
cursor.close()
url = fileContent[0]
indexID = str(fileContent[1]) # get the ID instead of the filename
service = str(fileContent[3])
# podcastName = fileContent[2]
Tools.transcribeAll(service, url, indexID) # download the mp3 will print when done
def updateScript(dbconnection):
"""
scans all rss feeds for new
"""
cursor = dbconnection.cursor()
cursor.execute("select rss, name, source from podcasts;")
rssArray = cursor.fetchall()
for rss in rssArray:
print("chekcing name " + str(rss[1]))
url = str(rss[0])
name = str(rss[1])
source = str(rss[2])
rssArray = DatabaseInteract.rssCheck(name, source, url)
for item in rssArray:
if(DatabaseInteract.checkIfExists(dbconnection, item[0]) == False):
DatabaseInteract.insertClip(dbconnection, item[2], name, item[3], item[1], item[0])
def resetScript(dbConnection, maxConcurrent):
"""
Waits for the running transcription processes to end (2 min intervals). \n
Then deletes everything in the 'podcasts' folder, parses all transcripts, and updates the
databases
"""
while (Tools.numRunningProcesses() != 0): # wait for the transcriptions to end. Pings every 2 mins
time.sleep(120)
emptyPodcastFolder = Tools.cleanupFolder("podcasts")
DatabaseInteract.refreshDatabase(dbConnection)
def parseUpload(dbconnection, fileName):
"""
Requires dbconnection and the filename (location) of the file being parsed
"""
nhContent = ParseText.nohupTranscriptionContent(fileName)
count = 0
while count < len(nhContent[0]):
try:
rtf = nhContent[0][count]
transcription = nhContent[1][count].replace("'", "''").replace("_", "")
dbID = nhContent[2][count].replace(".", "")
duration = nhContent[3][count]
DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)
count += 1
except:
print("couldnt upload one at index " + str(count))
count += 1 | identifier_body |
Modules.py | import requests
import re
from bs4 import BeautifulSoup
import json
import psycopg2
from datetime import datetime
import subprocess
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from shutil import copyfileobj
import time
import xml.etree.cElementTree as etree
import ResolveRouter
class Transcribe:
def runAutoCheck(dbConnection, maxConcurrent):
"""
runs an automatic check to see if any transcriptions need to be started or are already finished
and need to be reuploded\n\n
Needs dbConnection & an integer representing the max concurrent transcriptons that can be ran at a time\n\n
This is a function that you dont want to parse and upload files from the 'transcripts' folder into.
because you really dont know which files are in progress or not whatever. ill fix later .
"""
# checks if any shows are pending.
fileContent = DatabaseInteract.checkPre(dbConnection)
if(len(fileContent) > 0 and Tools.numRunningProcesses() < maxConcurrent):
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = TRUE WHERE id = '" + str(fileContent[1]) + "';")
dbConnection.commit()
cursor.close()
url = fileContent[0]
indexID = str(fileContent[1]) # get the ID instead of the filename
service = str(fileContent[3])
# podcastName = fileContent[2]
Tools.transcribeAll(service, url, indexID) # download the mp3 will print when done
def updateScript(dbconnection):
"""
scans all rss feeds for new
"""
cursor = dbconnection.cursor()
cursor.execute("select rss, name, source from podcasts;")
rssArray = cursor.fetchall()
for rss in rssArray:
print("chekcing name " + str(rss[1]))
url = str(rss[0])
name = str(rss[1])
source = str(rss[2])
rssArray = DatabaseInteract.rssCheck(name, source, url)
for item in rssArray:
if(DatabaseInteract.checkIfExists(dbconnection, item[0]) == False):
DatabaseInteract.insertClip(dbconnection, item[2], name, item[3], item[1], item[0])
def resetScript(dbConnection, maxConcurrent):
"""
Waits for the running transcription processes to end (2 min intervals). \n
Then deletes everything in the 'podcasts' folder, parses all transcripts, and updates the
databases
"""
while (Tools.numRunningProcesses() != 0): # wait for the transcriptions to end. Pings every 2 mins
time.sleep(120)
emptyPodcastFolder = Tools.cleanupFolder("podcasts")
DatabaseInteract.refreshDatabase(dbConnection)
def parseUpload(dbconnection, fileName):
"""
Requires dbconnection and the filename (location) of the file being parsed
"""
nhContent = ParseText.nohupTranscriptionContent(fileName)
count = 0
while count < len(nhContent[0]):
try:
rtf = nhContent[0][count]
transcription = nhContent[1][count].replace("'", "''").replace("_", "")
dbID = nhContent[2][count].replace(".", "")
duration = nhContent[3][count]
DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)
count += 1
except:
print("couldnt upload one at index " + str(count))
count += 1
class ParseText:
"""
This class handles parsing of two entities:
\n\tText files containing one instance of a transcribed podcast or...
\n\tnohup files containing multiple instances of a transcribed podcast
"""
def nohupTranscriptionContent(filePath):
"""
This parses the content of nohup. The size of nohup is basically unlimited but
each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- a list of all the occurences of realTimeFactor\n
index 1 -- a list of all the occurences of transcriptions\n
index 2 -- a list of all the occurences of the transcription ID\n
index 3 -- a list of all the occurences of the total transcription time.\n\n
\n\n
\-\---Example usage----\n
parsedContent = nohupTranscriptionContent("ok.txt")
for i in range(len(parsedContent[0])):
print(parsedContent[0][i]) # realtimefactor
print(parsedContent[1][i]) # transcription
print(parsedContent[2][i]) # transcription ID
print(parsedContent[3][i]) # transcription time
"""
try:
continu = True
fileContent = ""
f = open(filePath, 'r')
while (continu):
temp = f.readline(900000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id(.*?) (.*?)\n', fileContent)
transcriptionList = []
transcriptionIDList = []
for item in transcription:
if(len(item[1]) > 1000):
transcriptionIDList.append(item[0])
transcriptionList.append(item[1])
results.append(transcriptionList)
results.append(transcriptionIDList)
transcriptionTime = re.findall(r'seconds / (.*?) seconds\.', fileContent)
results.append(transcriptionTime)
return results
except Exception as e:
Tools.writeException("nohupTranscriptionContent", e)
return False
def fileTranscriptionContent(filePath):
"""
This parses the content of the transcription file. The size of the file can basically be unlimited
but each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- url\n
index 1 -- realTimeFactor\n
index 2 -- transcription\n
"""
try:
continu = True
f = open(filePath, 'r')
fileContent = ""
while (continu):
temp = f.readline(300000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
f.close()
url = re.findall(r'URL:(.*?)\n', fileContent)
results.append(url)
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id1 (.*?)\n', fileContent)
for item in transcription:
if(len(item) > 500):
results.append(item.replace("'", "''"))
if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):
return results
else:
Tools.writeException("fileTranscriptionContent", "ERROR attempted to parse " + filePath + " but got " + str(results))
return False
except Exception as e:
Tools.writeException("fileTranscriptionContent", e)
class Tools:
"""
Random functions
"""
def cleanupFolder(folderName):
"""
deletes all contents of the specified folder (but not the folder itself).\n
returns true if successful. False if an error was thrown or the number of running
processes is not = 0
"""
try:
if(Tools.numRunningProcesses() == 0):
process = subprocess.call('rm -r ./' + folderName + '/*', shell=True)
return True
else:
return False
except Exception as e:
Tools.writeException("cleanupFolder", e)
return False
def | ():
"""
gets the number of runnning transcription processes
"""
try:
proc = subprocess.run("ps -Af|grep -i \"online2-wav-nnet3-latgen-faster\"", stdout=subprocess.PIPE, shell=True)
np = (len(str(proc.stdout).split("\\n")) - 3)
if(np == None):
np = 0
return np
except Exception as e:
Tools.writeException("numRunningProcesses", e)
return -1
def writeException(className, exceptionString):
"""
Writes Exception given the string format of the class name and the 'e' in any
Exception as e premise
"""
errorFile = open("error.log", 'a')
errorFile.write("ERROR occured in " + className + " at " + str(datetime.now()) + " with the following message\n" + str(exceptionString) + "\n\n")
errorFile.close()
def getFirstFile(folderName):
"""
Returns with the filename of the first file in the given directory. Just provide the directory's name
with no leading './'
"""
listFiles = subprocess.run("ls ./" + folderName, shell=True, stdout=subprocess.PIPE)
fileName = re.search(r"b'(.*?)\\n", str(listFiles.stdout))[1]
if(len(fileName) > 0):
return fileName
else:
return False
def transcribeAll(service, url, fileName):
"""
Does everything you need to transcribe a podcast given the filename\n
Download podcast, wait 40 seconds, change podcast to .wav, wait 10 seconds,
remove the .mp3 file, run the transcription
"""
if(service == "omny.fm"):
url = url.replace(".mp3","") + ".mp3"
subprocess.Popen("wget -c -O ./podcasts/" + fileName + ".mp3 " + url + " && sleep 40 && ffmpeg -i ./podcasts/"
+ fileName + ".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/" + fileName + ".wav && sleep 10 && rm ./podcasts/"
+ fileName + ".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false "
+ "--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 "
+ "--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id" + fileName
+ " utterance-id" + fileName + "|' 'scp:echo utterance-id" + fileName + " ./podcasts/" + fileName + ".wav|' 'ark:/dev/null' &", shell=True)
class DatabaseInteract:
"""
This is where the database is updated. Refer to the example clips/header for format information.\n\n
Seeding the database would include the usage of 'insertHeader' then 'insertClips'. Pretty much every
function in here will require a dbConnection argument
"""
def uploadPodcast(dbConnection, homepage, name, description, category, source, imageurl, web, twitter, facebook, rss):
"""
HomePage --> the homepage of the podcast (NOT NULL)\n
Name --> The name of the podcast (NOT NULL)\n
Description --> a short description of the podcast\n
Category --> The category of the podcast\n
Source --> The service of which the podcast is being accessed through\n
ImageURI --> Podcast cover art\n
Web --> The website of the podcaster\n
Twitter --> The twitter account of the podcaster\n
Facebook --> the facebook account of the podcaster\n
LastUpdated --> the date that this was last updated.\n
RSS --> The URL of the podcasts RSS feed\n
If you dont have values for a certain field just pass it in as an empty string
"""
try:
cursor = dbConnection.cursor()
name = name.replace("'", "''")
description = description.replace("'", "''")
cursor.execute("""INSERT INTO podcasts(homepage, name, description, category, source, imageuri, web, twitter, Facebook, rss) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""", (homepage, name, description, category, source, imageurl, web, twitter, facebook, rss))
dbConnection.commit()
cursor.close()
return True
except Exception as e:
Tools.writeException("insertHeader", "e")
return False
def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title):
"""
audiourl --> url of the transcriptions mp3 is stored here (NOT NULL)\n
PodcastName --> THe name of the show (references podcast(name))\n
Description --> The provided summary of that days podcast\n
Date --> The date that podcast aired (parsed to mm-dd-yyyy\n
Title --> The title of that specific podcast\n
Duration --> the running time of that podcast (use strptime to parse, need mm-dd-yyyy\n
pending --> right now will be false because were not transcribing\n
(dateTranscribed) --> date of transcription (updated later)\n
"""
try:
cursor = dbConnection.cursor()
title = title.replace("'", "''")
cursor.execute("INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('" + audiourl + "', NULL, '" + podcastName + "', NULL, '" + description + "', '" + parsedDate + "', '" + title + "', FALSE, NULL);")
dbConnection.commit()
cursor.close()
return True
except:
return False
return False
def insertTranscription(dbConnection, realtimefactor, transcription, duration, dbID):
"""
This basically uploads the arguents to the database, returning false and throwing an
error if unsuccesful (or true otherwise)\n
"""
try:
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET realtimefactor = '" + realtimefactor + "', transcription = '" + transcription + "', datetranscribed = now(), duration = '" + duration + "' WHERE id = '" + str(dbID) + "';")
dbConnection.commit()
cursor.close()
return True
except Exception as e:
Tools.writeException("uploadTranscriptionData", e)
return False
def checkPre(dbConnection):
"""
checks the database for empty transcription entries, returns a list with \n\n
index 0 -- audiourl\n
index 1 -- id\n
index 2 -- podcast name\n
index 3 -- service of podcast
"""
cursor = dbConnection.cursor()
cursor.execute("SELECT audiourl, T.id, podcastName, source FROM transcriptions AS T JOIN podcasts as P ON P.name = T.podcastname WHERE COALESCE(T.transcription, '') = '' AND pending = FALSE LIMIT 1;")
entry = cursor.fetchone()
cursor.close()
return entry
def refreshDatabase(dbConnection):
"""
This is to be used when both the podcasts folder and transcripts folder are empty.\n
For every entry in the database that has an empty transcript and a pending flag set to true, change
the pending flag to false.
Honestly this is used to deal with a weird bug and should be run every now and then
"""
try:
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = FALSE WHERE COALESCE(transcription, '') = '';")
dbConnection.commit()
cursor.close()
except Exception as e:
Tools.writeException("refreshDatabase", e)
def checkIfExists(dbconnection, title):
"""
given title, if the podcast is in the database already return true. False if
the podcast does not exist in the database
"""
cursor = dbconnection.cursor()
output = ""
title = title.replace("'", "''")
try:
cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';")
dbconnection.commit()
output = cursor.fetchone()
cursor.close()
if(output is None):
return False
else:
return True
except:
dbconnection.rollback()
cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';")
dbconnection.commit()
output = cursor.fetchone()
cursor.close()
if(output is None):
return False
else:
return True
def podcastInitRSS(conn, url):
"""
Gets the following podcast details:
name, homepage, description, category, source, web, twitter, facebook, rss \n
If all match it uploads it to the database
"""
try:
headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}
req = requests.get(url, headers=headers)
root = etree.fromstring(req.text)
resArray = []
homepage = root[0].find("link").text
name = root[0].find("title").text
description = ""
try:
description = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}summary").text
except:
pass
try:
description = root[0].find("description").text
except:
pass
category = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}category").attrib["text"]
image = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}image").attrib["href"]
if(len(name) > 0 and len(description) > 0 and len(category) > 0 and len(image) > 0 and len(homepage) > 0):
DatabaseInteract.uploadPodcast(conn, homepage, name, description, category, "", image, "", "", "", url)
except Exception as e:
Tools.writeException("podcastInitRSS", e + ".\n issue with url " + url)
def rssCheck(podcastName, source, url):
"""
Checks the rss urls in the database and returns an array of each of the important fields
"""
try:
headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}
req = requests.get(url, headers=headers)
root = etree.fromstring(req.text)
rssArray = []
for element in root[0].iter('item'):
try:
title = element.find("title").text.replace("''", "'")
description = element.find("description").text.replace("<strong>", "").replace("</strong>", "").replace("&", "and").replace("'","''")
date = element.find("pubDate").text
date = date.split(" ")
date = datetime.strptime(date[1] + date[2] + date[3], "%d%b%Y")
dateString = str(date.month) + "-" + str(date.day) + "-" + str(date.year)
url = ResolveRouter.urlRouter(podcastName, source, element)
except:
print("error in XMLDetailsDebug parsing issue")
if(len(title) > 0 and len(description) > 0 and len(dateString) > 0 and len(url) > 0):
rssArray.append([title, dateString, url, description])
else:
print("error in XMLDetailsDebug parsing issue")
return rssArray
except Exception as e:
print(e)
Tools.writeException("getXMLDetailsDebug", e)
| numRunningProcesses | identifier_name |
Modules.py | import requests
import re
from bs4 import BeautifulSoup
import json
import psycopg2
from datetime import datetime
import subprocess
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from shutil import copyfileobj
import time
import xml.etree.cElementTree as etree
import ResolveRouter
class Transcribe:
def runAutoCheck(dbConnection, maxConcurrent):
"""
runs an automatic check to see if any transcriptions need to be started or are already finished
and need to be reuploded\n\n
Needs dbConnection & an integer representing the max concurrent transcriptons that can be ran at a time\n\n
This is a function that you dont want to parse and upload files from the 'transcripts' folder into.
because you really dont know which files are in progress or not whatever. ill fix later .
"""
# checks if any shows are pending.
fileContent = DatabaseInteract.checkPre(dbConnection)
if(len(fileContent) > 0 and Tools.numRunningProcesses() < maxConcurrent):
|
def updateScript(dbconnection):
"""
scans all rss feeds for new
"""
cursor = dbconnection.cursor()
cursor.execute("select rss, name, source from podcasts;")
rssArray = cursor.fetchall()
for rss in rssArray:
print("chekcing name " + str(rss[1]))
url = str(rss[0])
name = str(rss[1])
source = str(rss[2])
rssArray = DatabaseInteract.rssCheck(name, source, url)
for item in rssArray:
if(DatabaseInteract.checkIfExists(dbconnection, item[0]) == False):
DatabaseInteract.insertClip(dbconnection, item[2], name, item[3], item[1], item[0])
def resetScript(dbConnection, maxConcurrent):
"""
Waits for the running transcription processes to end (2 min intervals). \n
Then deletes everything in the 'podcasts' folder, parses all transcripts, and updates the
databases
"""
while (Tools.numRunningProcesses() != 0): # wait for the transcriptions to end. Pings every 2 mins
time.sleep(120)
emptyPodcastFolder = Tools.cleanupFolder("podcasts")
DatabaseInteract.refreshDatabase(dbConnection)
def parseUpload(dbconnection, fileName):
"""
Requires dbconnection and the filename (location) of the file being parsed
"""
nhContent = ParseText.nohupTranscriptionContent(fileName)
count = 0
while count < len(nhContent[0]):
try:
rtf = nhContent[0][count]
transcription = nhContent[1][count].replace("'", "''").replace("_", "")
dbID = nhContent[2][count].replace(".", "")
duration = nhContent[3][count]
DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)
count += 1
except:
print("couldnt upload one at index " + str(count))
count += 1
class ParseText:
"""
This class handles parsing of two entities:
\n\tText files containing one instance of a transcribed podcast or...
\n\tnohup files containing multiple instances of a transcribed podcast
"""
def nohupTranscriptionContent(filePath):
"""
This parses the content of nohup. The size of nohup is basically unlimited but
each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- a list of all the occurences of realTimeFactor\n
index 1 -- a list of all the occurences of transcriptions\n
index 2 -- a list of all the occurences of the transcription ID\n
index 3 -- a list of all the occurences of the total transcription time.\n\n
\n\n
\-\---Example usage----\n
parsedContent = nohupTranscriptionContent("ok.txt")
for i in range(len(parsedContent[0])):
print(parsedContent[0][i]) # realtimefactor
print(parsedContent[1][i]) # transcription
print(parsedContent[2][i]) # transcription ID
print(parsedContent[3][i]) # transcription time
"""
try:
continu = True
fileContent = ""
f = open(filePath, 'r')
while (continu):
temp = f.readline(900000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id(.*?) (.*?)\n', fileContent)
transcriptionList = []
transcriptionIDList = []
for item in transcription:
if(len(item[1]) > 1000):
transcriptionIDList.append(item[0])
transcriptionList.append(item[1])
results.append(transcriptionList)
results.append(transcriptionIDList)
transcriptionTime = re.findall(r'seconds / (.*?) seconds\.', fileContent)
results.append(transcriptionTime)
return results
except Exception as e:
Tools.writeException("nohupTranscriptionContent", e)
return False
def fileTranscriptionContent(filePath):
"""
This parses the content of the transcription file. The size of the file can basically be unlimited
but each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- url\n
index 1 -- realTimeFactor\n
index 2 -- transcription\n
"""
try:
continu = True
f = open(filePath, 'r')
fileContent = ""
while (continu):
temp = f.readline(300000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
f.close()
url = re.findall(r'URL:(.*?)\n', fileContent)
results.append(url)
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id1 (.*?)\n', fileContent)
for item in transcription:
if(len(item) > 500):
results.append(item.replace("'", "''"))
if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):
return results
else:
Tools.writeException("fileTranscriptionContent", "ERROR attempted to parse " + filePath + " but got " + str(results))
return False
except Exception as e:
Tools.writeException("fileTranscriptionContent", e)
class Tools:
"""
Random functions
"""
def cleanupFolder(folderName):
"""
deletes all contents of the specified folder (but not the folder itself).\n
returns true if successful. False if an error was thrown or the number of running
processes is not = 0
"""
try:
if(Tools.numRunningProcesses() == 0):
process = subprocess.call('rm -r ./' + folderName + '/*', shell=True)
return True
else:
return False
except Exception as e:
Tools.writeException("cleanupFolder", e)
return False
def numRunningProcesses():
"""
gets the number of runnning transcription processes
"""
try:
proc = subprocess.run("ps -Af|grep -i \"online2-wav-nnet3-latgen-faster\"", stdout=subprocess.PIPE, shell=True)
np = (len(str(proc.stdout).split("\\n")) - 3)
if(np == None):
np = 0
return np
except Exception as e:
Tools.writeException("numRunningProcesses", e)
return -1
def writeException(className, exceptionString):
"""
Writes Exception given the string format of the class name and the 'e' in any
Exception as e premise
"""
errorFile = open("error.log", 'a')
errorFile.write("ERROR occured in " + className + " at " + str(datetime.now()) + " with the following message\n" + str(exceptionString) + "\n\n")
errorFile.close()
def getFirstFile(folderName):
"""
Returns with the filename of the first file in the given directory. Just provide the directory's name
with no leading './'
"""
listFiles = subprocess.run("ls ./" + folderName, shell=True, stdout=subprocess.PIPE)
fileName = re.search(r"b'(.*?)\\n", str(listFiles.stdout))[1]
if(len(fileName) > 0):
return fileName
else:
return False
def transcribeAll(service, url, fileName):
"""
Does everything you need to transcribe a podcast given the filename\n
Download podcast, wait 40 seconds, change podcast to .wav, wait 10 seconds,
remove the .mp3 file, run the transcription
"""
if(service == "omny.fm"):
url = url.replace(".mp3","") + ".mp3"
subprocess.Popen("wget -c -O ./podcasts/" + fileName + ".mp3 " + url + " && sleep 40 && ffmpeg -i ./podcasts/"
+ fileName + ".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/" + fileName + ".wav && sleep 10 && rm ./podcasts/"
+ fileName + ".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false "
+ "--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 "
+ "--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id" + fileName
+ " utterance-id" + fileName + "|' 'scp:echo utterance-id" + fileName + " ./podcasts/" + fileName + ".wav|' 'ark:/dev/null' &", shell=True)
class DatabaseInteract:
"""
This is where the database is updated. Refer to the example clips/header for format information.\n\n
Seeding the database would include the usage of 'insertHeader' then 'insertClips'. Pretty much every
function in here will require a dbConnection argument
"""
def uploadPodcast(dbConnection, homepage, name, description, category, source, imageurl, web, twitter, facebook, rss):
"""
HomePage --> the homepage of the podcast (NOT NULL)\n
Name --> The name of the podcast (NOT NULL)\n
Description --> a short description of the podcast\n
Category --> The category of the podcast\n
Source --> The service of which the podcast is being accessed through\n
ImageURI --> Podcast cover art\n
Web --> The website of the podcaster\n
Twitter --> The twitter account of the podcaster\n
Facebook --> the facebook account of the podcaster\n
LastUpdated --> the date that this was last updated.\n
RSS --> The URL of the podcasts RSS feed\n
If you dont have values for a certain field just pass it in as an empty string
"""
try:
cursor = dbConnection.cursor()
name = name.replace("'", "''")
description = description.replace("'", "''")
cursor.execute("""INSERT INTO podcasts(homepage, name, description, category, source, imageuri, web, twitter, Facebook, rss) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""", (homepage, name, description, category, source, imageurl, web, twitter, facebook, rss))
dbConnection.commit()
cursor.close()
return True
except Exception as e:
Tools.writeException("insertHeader", "e")
return False
def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title):
"""
audiourl --> url of the transcriptions mp3 is stored here (NOT NULL)\n
PodcastName --> THe name of the show (references podcast(name))\n
Description --> The provided summary of that days podcast\n
Date --> The date that podcast aired (parsed to mm-dd-yyyy\n
Title --> The title of that specific podcast\n
Duration --> the running time of that podcast (use strptime to parse, need mm-dd-yyyy\n
pending --> right now will be false because were not transcribing\n
(dateTranscribed) --> date of transcription (updated later)\n
"""
try:
cursor = dbConnection.cursor()
title = title.replace("'", "''")
cursor.execute("INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('" + audiourl + "', NULL, '" + podcastName + "', NULL, '" + description + "', '" + parsedDate + "', '" + title + "', FALSE, NULL);")
dbConnection.commit()
cursor.close()
return True
except:
return False
return False
def insertTranscription(dbConnection, realtimefactor, transcription, duration, dbID):
"""
This basically uploads the arguents to the database, returning false and throwing an
error if unsuccesful (or true otherwise)\n
"""
try:
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET realtimefactor = '" + realtimefactor + "', transcription = '" + transcription + "', datetranscribed = now(), duration = '" + duration + "' WHERE id = '" + str(dbID) + "';")
dbConnection.commit()
cursor.close()
return True
except Exception as e:
Tools.writeException("uploadTranscriptionData", e)
return False
def checkPre(dbConnection):
"""
checks the database for empty transcription entries, returns a list with \n\n
index 0 -- audiourl\n
index 1 -- id\n
index 2 -- podcast name\n
index 3 -- service of podcast
"""
cursor = dbConnection.cursor()
cursor.execute("SELECT audiourl, T.id, podcastName, source FROM transcriptions AS T JOIN podcasts as P ON P.name = T.podcastname WHERE COALESCE(T.transcription, '') = '' AND pending = FALSE LIMIT 1;")
entry = cursor.fetchone()
cursor.close()
return entry
def refreshDatabase(dbConnection):
"""
This is to be used when both the podcasts folder and transcripts folder are empty.\n
For every entry in the database that has an empty transcript and a pending flag set to true, change
the pending flag to false.
Honestly this is used to deal with a weird bug and should be run every now and then
"""
try:
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = FALSE WHERE COALESCE(transcription, '') = '';")
dbConnection.commit()
cursor.close()
except Exception as e:
Tools.writeException("refreshDatabase", e)
def checkIfExists(dbconnection, title):
"""
given title, if the podcast is in the database already return true. False if
the podcast does not exist in the database
"""
cursor = dbconnection.cursor()
output = ""
title = title.replace("'", "''")
try:
cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';")
dbconnection.commit()
output = cursor.fetchone()
cursor.close()
if(output is None):
return False
else:
return True
except:
dbconnection.rollback()
cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';")
dbconnection.commit()
output = cursor.fetchone()
cursor.close()
if(output is None):
return False
else:
return True
def podcastInitRSS(conn, url):
"""
Gets the following podcast details:
name, homepage, description, category, source, web, twitter, facebook, rss \n
If all match it uploads it to the database
"""
try:
headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}
req = requests.get(url, headers=headers)
root = etree.fromstring(req.text)
resArray = []
homepage = root[0].find("link").text
name = root[0].find("title").text
description = ""
try:
description = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}summary").text
except:
pass
try:
description = root[0].find("description").text
except:
pass
category = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}category").attrib["text"]
image = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}image").attrib["href"]
if(len(name) > 0 and len(description) > 0 and len(category) > 0 and len(image) > 0 and len(homepage) > 0):
DatabaseInteract.uploadPodcast(conn, homepage, name, description, category, "", image, "", "", "", url)
except Exception as e:
Tools.writeException("podcastInitRSS", e + ".\n issue with url " + url)
def rssCheck(podcastName, source, url):
"""
Checks the rss urls in the database and returns an array of each of the important fields
"""
try:
headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}
req = requests.get(url, headers=headers)
root = etree.fromstring(req.text)
rssArray = []
for element in root[0].iter('item'):
try:
title = element.find("title").text.replace("''", "'")
description = element.find("description").text.replace("<strong>", "").replace("</strong>", "").replace("&", "and").replace("'","''")
date = element.find("pubDate").text
date = date.split(" ")
date = datetime.strptime(date[1] + date[2] + date[3], "%d%b%Y")
dateString = str(date.month) + "-" + str(date.day) + "-" + str(date.year)
url = ResolveRouter.urlRouter(podcastName, source, element)
except:
print("error in XMLDetailsDebug parsing issue")
if(len(title) > 0 and len(description) > 0 and len(dateString) > 0 and len(url) > 0):
rssArray.append([title, dateString, url, description])
else:
print("error in XMLDetailsDebug parsing issue")
return rssArray
except Exception as e:
print(e)
Tools.writeException("getXMLDetailsDebug", e)
| cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = TRUE WHERE id = '" + str(fileContent[1]) + "';")
dbConnection.commit()
cursor.close()
url = fileContent[0]
indexID = str(fileContent[1]) # get the ID instead of the filename
service = str(fileContent[3])
# podcastName = fileContent[2]
Tools.transcribeAll(service, url, indexID) # download the mp3 will print when done | conditional_block |
Modules.py | import requests
import re
from bs4 import BeautifulSoup
import json
import psycopg2
from datetime import datetime
import subprocess
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from shutil import copyfileobj
import time
import xml.etree.cElementTree as etree
import ResolveRouter
class Transcribe:
def runAutoCheck(dbConnection, maxConcurrent):
"""
runs an automatic check to see if any transcriptions need to be started or are already finished
and need to be reuploded\n\n
Needs dbConnection & an integer representing the max concurrent transcriptons that can be ran at a time\n\n
This is a function that you dont want to parse and upload files from the 'transcripts' folder into.
because you really dont know which files are in progress or not whatever. ill fix later .
"""
# checks if any shows are pending.
fileContent = DatabaseInteract.checkPre(dbConnection)
if(len(fileContent) > 0 and Tools.numRunningProcesses() < maxConcurrent):
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = TRUE WHERE id = '" + str(fileContent[1]) + "';")
dbConnection.commit()
cursor.close()
url = fileContent[0]
indexID = str(fileContent[1]) # get the ID instead of the filename
service = str(fileContent[3])
# podcastName = fileContent[2]
Tools.transcribeAll(service, url, indexID) # download the mp3 will print when done
def updateScript(dbconnection):
"""
scans all rss feeds for new
"""
cursor = dbconnection.cursor()
cursor.execute("select rss, name, source from podcasts;")
rssArray = cursor.fetchall()
for rss in rssArray:
print("chekcing name " + str(rss[1]))
url = str(rss[0])
name = str(rss[1])
source = str(rss[2])
rssArray = DatabaseInteract.rssCheck(name, source, url)
for item in rssArray:
if(DatabaseInteract.checkIfExists(dbconnection, item[0]) == False):
DatabaseInteract.insertClip(dbconnection, item[2], name, item[3], item[1], item[0])
def resetScript(dbConnection, maxConcurrent):
"""
Waits for the running transcription processes to end (2 min intervals). \n
Then deletes everything in the 'podcasts' folder, parses all transcripts, and updates the
databases
"""
while (Tools.numRunningProcesses() != 0): # wait for the transcriptions to end. Pings every 2 mins
time.sleep(120)
emptyPodcastFolder = Tools.cleanupFolder("podcasts")
DatabaseInteract.refreshDatabase(dbConnection)
def parseUpload(dbconnection, fileName):
"""
Requires dbconnection and the filename (location) of the file being parsed
"""
nhContent = ParseText.nohupTranscriptionContent(fileName)
count = 0
while count < len(nhContent[0]):
try:
rtf = nhContent[0][count]
transcription = nhContent[1][count].replace("'", "''").replace("_", "")
dbID = nhContent[2][count].replace(".", "")
duration = nhContent[3][count]
DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)
count += 1
except:
print("couldnt upload one at index " + str(count))
count += 1
class ParseText:
"""
This class handles parsing of two entities:
\n\tText files containing one instance of a transcribed podcast or...
\n\tnohup files containing multiple instances of a transcribed podcast
"""
def nohupTranscriptionContent(filePath):
"""
This parses the content of nohup. The size of nohup is basically unlimited but
each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- a list of all the occurences of realTimeFactor\n
index 1 -- a list of all the occurences of transcriptions\n
index 2 -- a list of all the occurences of the transcription ID\n
index 3 -- a list of all the occurences of the total transcription time.\n\n
\n\n
\-\---Example usage----\n
parsedContent = nohupTranscriptionContent("ok.txt")
for i in range(len(parsedContent[0])):
print(parsedContent[0][i]) # realtimefactor
print(parsedContent[1][i]) # transcription
print(parsedContent[2][i]) # transcription ID
print(parsedContent[3][i]) # transcription time
"""
try:
continu = True
fileContent = ""
f = open(filePath, 'r')
while (continu):
temp = f.readline(900000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp | results = []
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id(.*?) (.*?)\n', fileContent)
transcriptionList = []
transcriptionIDList = []
for item in transcription:
if(len(item[1]) > 1000):
transcriptionIDList.append(item[0])
transcriptionList.append(item[1])
results.append(transcriptionList)
results.append(transcriptionIDList)
transcriptionTime = re.findall(r'seconds / (.*?) seconds\.', fileContent)
results.append(transcriptionTime)
return results
except Exception as e:
Tools.writeException("nohupTranscriptionContent", e)
return False
def fileTranscriptionContent(filePath):
"""
This parses the content of the transcription file. The size of the file can basically be unlimited
but each line has to be under 300000 characters(?). This then returns the following...\n\n
index 0 -- url\n
index 1 -- realTimeFactor\n
index 2 -- transcription\n
"""
try:
continu = True
f = open(filePath, 'r')
fileContent = ""
while (continu):
temp = f.readline(300000)
if(len(temp) == 0):
continu = False
else:
fileContent += temp
results = []
f.close()
url = re.findall(r'URL:(.*?)\n', fileContent)
results.append(url)
realTimeFactor = re.findall(r'Timing stats: real-time factor for offline decoding was (.*?) = ', fileContent)
results.append(realTimeFactor)
transcription = re.findall(r'utterance-id1 (.*?)\n', fileContent)
for item in transcription:
if(len(item) > 500):
results.append(item.replace("'", "''"))
if((len(results[0]) > 0) and (len(results[1]) > 0) and (len(results[2]) > 0)):
return results
else:
Tools.writeException("fileTranscriptionContent", "ERROR attempted to parse " + filePath + " but got " + str(results))
return False
except Exception as e:
Tools.writeException("fileTranscriptionContent", e)
class Tools:
"""
Random functions
"""
def cleanupFolder(folderName):
"""
deletes all contents of the specified folder (but not the folder itself).\n
returns true if successful. False if an error was thrown or the number of running
processes is not = 0
"""
try:
if(Tools.numRunningProcesses() == 0):
process = subprocess.call('rm -r ./' + folderName + '/*', shell=True)
return True
else:
return False
except Exception as e:
Tools.writeException("cleanupFolder", e)
return False
def numRunningProcesses():
"""
gets the number of runnning transcription processes
"""
try:
proc = subprocess.run("ps -Af|grep -i \"online2-wav-nnet3-latgen-faster\"", stdout=subprocess.PIPE, shell=True)
np = (len(str(proc.stdout).split("\\n")) - 3)
if(np == None):
np = 0
return np
except Exception as e:
Tools.writeException("numRunningProcesses", e)
return -1
def writeException(className, exceptionString):
"""
Writes Exception given the string format of the class name and the 'e' in any
Exception as e premise
"""
errorFile = open("error.log", 'a')
errorFile.write("ERROR occured in " + className + " at " + str(datetime.now()) + " with the following message\n" + str(exceptionString) + "\n\n")
errorFile.close()
def getFirstFile(folderName):
"""
Returns with the filename of the first file in the given directory. Just provide the directory's name
with no leading './'
"""
listFiles = subprocess.run("ls ./" + folderName, shell=True, stdout=subprocess.PIPE)
fileName = re.search(r"b'(.*?)\\n", str(listFiles.stdout))[1]
if(len(fileName) > 0):
return fileName
else:
return False
def transcribeAll(service, url, fileName):
"""
Does everything you need to transcribe a podcast given the filename\n
Download podcast, wait 40 seconds, change podcast to .wav, wait 10 seconds,
remove the .mp3 file, run the transcription
"""
if(service == "omny.fm"):
url = url.replace(".mp3","") + ".mp3"
subprocess.Popen("wget -c -O ./podcasts/" + fileName + ".mp3 " + url + " && sleep 40 && ffmpeg -i ./podcasts/"
+ fileName + ".mp3 -acodec pcm_s16le -ac 1 -ar 8000 ./podcasts/" + fileName + ".wav && sleep 10 && rm ./podcasts/"
+ fileName + ".mp3 && nohup ./online2-wav-nnet3-latgen-faster --online=false --do-endpointing=false "
+ "--frame-subsampling-factor=3 --config=online.conf --max-mem=2000000000 --max-active=7000 --beam=15.0 --lattice-beam=6.0 "
+ "--acoustic-scale=1.0 --word-symbol-table=words.txt final.mdl HCLG.fst 'ark:echo utterance-id" + fileName
+ " utterance-id" + fileName + "|' 'scp:echo utterance-id" + fileName + " ./podcasts/" + fileName + ".wav|' 'ark:/dev/null' &", shell=True)
class DatabaseInteract:
"""
This is where the database is updated. Refer to the example clips/header for format information.\n\n
Seeding the database would include the usage of 'insertHeader' then 'insertClips'. Pretty much every
function in here will require a dbConnection argument
"""
def uploadPodcast(dbConnection, homepage, name, description, category, source, imageurl, web, twitter, facebook, rss):
"""
HomePage --> the homepage of the podcast (NOT NULL)\n
Name --> The name of the podcast (NOT NULL)\n
Description --> a short description of the podcast\n
Category --> The category of the podcast\n
Source --> The service of which the podcast is being accessed through\n
ImageURI --> Podcast cover art\n
Web --> The website of the podcaster\n
Twitter --> The twitter account of the podcaster\n
Facebook --> the facebook account of the podcaster\n
LastUpdated --> the date that this was last updated.\n
RSS --> The URL of the podcasts RSS feed\n
If you dont have values for a certain field just pass it in as an empty string
"""
try:
cursor = dbConnection.cursor()
name = name.replace("'", "''")
description = description.replace("'", "''")
cursor.execute("""INSERT INTO podcasts(homepage, name, description, category, source, imageuri, web, twitter, Facebook, rss) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""", (homepage, name, description, category, source, imageurl, web, twitter, facebook, rss))
dbConnection.commit()
cursor.close()
return True
except Exception as e:
Tools.writeException("insertHeader", "e")
return False
def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title):
"""
audiourl --> url of the transcriptions mp3 is stored here (NOT NULL)\n
PodcastName --> THe name of the show (references podcast(name))\n
Description --> The provided summary of that days podcast\n
Date --> The date that podcast aired (parsed to mm-dd-yyyy\n
Title --> The title of that specific podcast\n
Duration --> the running time of that podcast (use strptime to parse, need mm-dd-yyyy\n
pending --> right now will be false because were not transcribing\n
(dateTranscribed) --> date of transcription (updated later)\n
"""
try:
cursor = dbConnection.cursor()
title = title.replace("'", "''")
cursor.execute("INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('" + audiourl + "', NULL, '" + podcastName + "', NULL, '" + description + "', '" + parsedDate + "', '" + title + "', FALSE, NULL);")
dbConnection.commit()
cursor.close()
return True
except:
return False
return False
def insertTranscription(dbConnection, realtimefactor, transcription, duration, dbID):
"""
This basically uploads the arguents to the database, returning false and throwing an
error if unsuccesful (or true otherwise)\n
"""
try:
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET realtimefactor = '" + realtimefactor + "', transcription = '" + transcription + "', datetranscribed = now(), duration = '" + duration + "' WHERE id = '" + str(dbID) + "';")
dbConnection.commit()
cursor.close()
return True
except Exception as e:
Tools.writeException("uploadTranscriptionData", e)
return False
def checkPre(dbConnection):
"""
checks the database for empty transcription entries, returns a list with \n\n
index 0 -- audiourl\n
index 1 -- id\n
index 2 -- podcast name\n
index 3 -- service of podcast
"""
cursor = dbConnection.cursor()
cursor.execute("SELECT audiourl, T.id, podcastName, source FROM transcriptions AS T JOIN podcasts as P ON P.name = T.podcastname WHERE COALESCE(T.transcription, '') = '' AND pending = FALSE LIMIT 1;")
entry = cursor.fetchone()
cursor.close()
return entry
def refreshDatabase(dbConnection):
"""
This is to be used when both the podcasts folder and transcripts folder are empty.\n
For every entry in the database that has an empty transcript and a pending flag set to true, change
the pending flag to false.
Honestly this is used to deal with a weird bug and should be run every now and then
"""
try:
cursor = dbConnection.cursor()
cursor.execute("UPDATE transcriptions SET pending = FALSE WHERE COALESCE(transcription, '') = '';")
dbConnection.commit()
cursor.close()
except Exception as e:
Tools.writeException("refreshDatabase", e)
def checkIfExists(dbconnection, title):
"""
given title, if the podcast is in the database already return true. False if
the podcast does not exist in the database
"""
cursor = dbconnection.cursor()
output = ""
title = title.replace("'", "''")
try:
cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';")
dbconnection.commit()
output = cursor.fetchone()
cursor.close()
if(output is None):
return False
else:
return True
except:
dbconnection.rollback()
cursor.execute("SELECT * FROM transcriptions WHERE title = '" + title + "';")
dbconnection.commit()
output = cursor.fetchone()
cursor.close()
if(output is None):
return False
else:
return True
def podcastInitRSS(conn, url):
"""
Gets the following podcast details:
name, homepage, description, category, source, web, twitter, facebook, rss \n
If all match it uploads it to the database
"""
try:
headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}
req = requests.get(url, headers=headers)
root = etree.fromstring(req.text)
resArray = []
homepage = root[0].find("link").text
name = root[0].find("title").text
description = ""
try:
description = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}summary").text
except:
pass
try:
description = root[0].find("description").text
except:
pass
category = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}category").attrib["text"]
image = root[0].find("{http://www.itunes.com/dtds/podcast-1.0.dtd}image").attrib["href"]
if(len(name) > 0 and len(description) > 0 and len(category) > 0 and len(image) > 0 and len(homepage) > 0):
DatabaseInteract.uploadPodcast(conn, homepage, name, description, category, "", image, "", "", "", url)
except Exception as e:
Tools.writeException("podcastInitRSS", e + ".\n issue with url " + url)
def rssCheck(podcastName, source, url):
"""
Checks the rss urls in the database and returns an array of each of the important fields
"""
try:
headers = {'Accept':'text/html, application/xhtml+xml, application/xml; q=0.9, */*; q=0.8' ,'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'}
req = requests.get(url, headers=headers)
root = etree.fromstring(req.text)
rssArray = []
for element in root[0].iter('item'):
try:
title = element.find("title").text.replace("''", "'")
description = element.find("description").text.replace("<strong>", "").replace("</strong>", "").replace("&", "and").replace("'","''")
date = element.find("pubDate").text
date = date.split(" ")
date = datetime.strptime(date[1] + date[2] + date[3], "%d%b%Y")
dateString = str(date.month) + "-" + str(date.day) + "-" + str(date.year)
url = ResolveRouter.urlRouter(podcastName, source, element)
except:
print("error in XMLDetailsDebug parsing issue")
if(len(title) > 0 and len(description) > 0 and len(dateString) > 0 and len(url) > 0):
rssArray.append([title, dateString, url, description])
else:
print("error in XMLDetailsDebug parsing issue")
return rssArray
except Exception as e:
print(e)
Tools.writeException("getXMLDetailsDebug", e) | random_line_split |
|
preprocess.py | import numpy as np
import pandas as pd
import os.path
import sys, traceback
import random
import re
import string
import pickle
import string
from nltk.probability import FreqDist
MAX_TOKENS = 512
MAX_WORDS = 400
def truncate(text):
"""Truncate the text."""
# TODO fix this to use a variable instead of 511
text = (text[:511]) if len(text) > MAX_TOKENS else text
return text
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def contractions(text):
contractions = {
"ain't": "are not ",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"i'd": "i had",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as ",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"}
words = text.split()
final_string = ""
try:
for word in words:
word = word.lower()
if hasNumbers(word) == False:
if word in contractions:
# print('Word: ' + word)
# print('Replacement: ' + contractions[word])
final_string += contractions[word]
final_string += ' '
flag = True
else:
final_string += word
final_string += ' '
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
print("type error: " + str(e))
exit()
return final_string
def removePunctuationFromList(all_words):
all_words = [''.join(c for c in s if c not in string.punctuation)
for s in all_words]
# Remove the empty strings:
all_words = [s for s in all_words if s]
return all_words
def cleanText(text):
"""Clean up the text."""
try:
text = str(text)
# remove contactions and stop words
text = contractions(text)
# remove html entities
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
new_text = cleanr.sub('', text.strip())
return re.sub(r'\s+', ' ', re.sub(r'\W+', " ", new_text))
# TAG_RE = re.compile(r'<[^>]+>')
except:
print("An exception occurred with: " + text)
return str(text)
def getAllWords(lines, stop_words):
all_words = {}
try:
for line in lines:
words = line.split()
for word in words:
if word not in stop_words:
all_words[word] = True
temp = all_words.keys()
# removePunctuationFromList(temp)
top_words = FreqDist(temp)
print("All Words list length : ", len(top_words))
# print(str(list(all_words1.keys())[:100]))
# use top 20000 words
return list(top_words.keys())[:20000]
# word_features = list(all_words.keys())[:6000]
# featuresets = [(find_features(rev, word_features), category)
# for (rev, category) in documents]
# print("Feature sets list length : ", len(featuresets))
except Exception as e:
print("type error: " + str(e))
exit()
def removeWordsNotIn(text, stop_words):
words = text.split()
final_string = ""
flag = False
try:
for word in words:
word = word.lower()
if word not in stop_words:
final_string += word
final_string += ' '
flag = True
else:
flag = False
if(flag): | return final_string
def shortenText(text, all_words):
# print('shortenText')
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if len(word) > 7:
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
# if we hit max number of token, stop parsing string
return final_string[:-1]
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def addWordsIn(text, all_words):
""" Also does truncation """
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
return shortenText(text, all_words)
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def read_data(filepath):
"""Read the CSV from disk."""
df = pd.read_csv(filepath, delimiter=',')
stop_words = ["will", "done", "goes","let", "know", "just", "put" "also",
"got", "can", "get" "said", "mr", "mrs", "one", "two", "three",
"four", "five", "i", "me", "my", "myself", "we", "our",
"ours","ourselves","you","youre","your","yours","yourself","yourselves","he","him","his","himself","she","her","hers","herself","it","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that","these","those","am","is","are","was","were","be","been","being","have","has","had","having","do","does","did","doing","a","an","the","and","but","if","or","because","as","until","while","of","at","by","for","with","about","against","between","into","through","during","before","after","above","below","to","from","up","down","in","out","on","off","over","under","again","further","then","once","here","there","when","where","why","how",
"all","any","both","each","few","more","most","other","some","such",
"can", "will",
"just",
"don",
"don't",
"should",
"should've",
"now",
"d",
"ll",
"m",
"o",
"re",
"ve",
"y",
"ain",
"aren",
"aren't",
"couldn",
"couldn't",
"didn",
"didn't",
"doesn",
"doesn't",
"hadn",
"hadn't",
"hasn",
"hasn't",
"haven",
"haven't",
"isn",
"isn't",
"ma",
"mightn",
"mightn't",
"mustn",
"mustn't",
"needn",
"needn't",
"shan",
"shan't",
"shouldn"
"shouldn't",
"wasn",
"wasn't",
"weren",
"weren't",
"won",
"won't",
"wouldn",
"wouldn't"]
# pandas drop columns using list of column names
df = df.drop(['doc_id', 'date', 'title'], axis=1)
print('Cleaning text')
df["clean_text"] = df['text'].apply(cleanText)
print('Removing words in stop words')
df['clean_text'] = [removeWordsNotIn(line, stop_words) for line in df['clean_text']]
clean_text = df["clean_text"].tolist()
# print(clean_text[:10])
print('Getting all words')
all_words = getAllWords(clean_text, stop_words)
# print('adding words in all_words')
df['clean_text'] = [addWordsIn(line, all_words) for line in df['clean_text']]
# df.text = df.text.apply(lambda x: x.translate(None, string.punctuation))
# df.clean_text = df.clean_text.apply(lambda x: x.translate(string.digits))
# df["clean_text"] = df['text'].str.replace('[^\w\s]','')
print('Finished reading and cleaning data')
print('Number of rows in dataframe: ' + str(len(df.index)))
return df
def preprocess_file(filepath, output_path, flag):
df = read_data(filepath)
if(flag):
header = ["ID", "clean_text", "star_rating", 'human_tag']
else:
header = ["ID", "clean_text"]
print('Output: ' + output_path)
df.to_csv(output_path, columns = header, index=False)
def main():
"""Main function of the program."""
# Specify path
training_filepath = 'data/training.csv'
testing_filepath = 'data/public_test_features.csv'
# Check whether the specified path exists or not
isExist = os.path.exists(training_filepath)
if(isExist):
print('Reading from ' + training_filepath)
else:
print('Training file not found in the app path.')
exit()
preprocess_file(training_filepath, 'data/clean_training1.csv', True)
# Check whether the specified path exists or not
isExist = os.path.exists(testing_filepath)
if(isExist):
print('Reading from ' + testing_filepath)
else:
print('Testing file not found in the app path.')
exit()
preprocess_file(testing_filepath,'data/clean_testing1.csv', False)
if __name__ == "__main__":
main() | final_string = final_string[:-1]
except Exception as e:
# print("type error: " + str(e))
print("type error")
exit() | random_line_split |
preprocess.py | import numpy as np
import pandas as pd
import os.path
import sys, traceback
import random
import re
import string
import pickle
import string
from nltk.probability import FreqDist
MAX_TOKENS = 512
MAX_WORDS = 400
def truncate(text):
"""Truncate the text."""
# TODO fix this to use a variable instead of 511
text = (text[:511]) if len(text) > MAX_TOKENS else text
return text
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def contractions(text):
contractions = {
"ain't": "are not ",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"i'd": "i had",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as ",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"}
words = text.split()
final_string = ""
try:
for word in words:
word = word.lower()
if hasNumbers(word) == False:
if word in contractions:
# print('Word: ' + word)
# print('Replacement: ' + contractions[word])
final_string += contractions[word]
final_string += ' '
flag = True
else:
final_string += word
final_string += ' '
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
print("type error: " + str(e))
exit()
return final_string
def removePunctuationFromList(all_words):
all_words = [''.join(c for c in s if c not in string.punctuation)
for s in all_words]
# Remove the empty strings:
all_words = [s for s in all_words if s]
return all_words
def cleanText(text):
"""Clean up the text."""
try:
text = str(text)
# remove contactions and stop words
text = contractions(text)
# remove html entities
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
new_text = cleanr.sub('', text.strip())
return re.sub(r'\s+', ' ', re.sub(r'\W+', " ", new_text))
# TAG_RE = re.compile(r'<[^>]+>')
except:
print("An exception occurred with: " + text)
return str(text)
def getAllWords(lines, stop_words):
all_words = {}
try:
for line in lines:
words = line.split()
for word in words:
if word not in stop_words:
all_words[word] = True
temp = all_words.keys()
# removePunctuationFromList(temp)
top_words = FreqDist(temp)
print("All Words list length : ", len(top_words))
# print(str(list(all_words1.keys())[:100]))
# use top 20000 words
return list(top_words.keys())[:20000]
# word_features = list(all_words.keys())[:6000]
# featuresets = [(find_features(rev, word_features), category)
# for (rev, category) in documents]
# print("Feature sets list length : ", len(featuresets))
except Exception as e:
print("type error: " + str(e))
exit()
def removeWordsNotIn(text, stop_words):
words = text.split()
final_string = ""
flag = False
try:
for word in words:
word = word.lower()
if word not in stop_words:
final_string += word
final_string += ' '
flag = True
else:
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
# print("type error: " + str(e))
print("type error")
exit()
return final_string
def shortenText(text, all_words):
# print('shortenText')
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if len(word) > 7:
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
# if we hit max number of token, stop parsing string
return final_string[:-1]
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def addWordsIn(text, all_words):
""" Also does truncation """
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
return shortenText(text, all_words)
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def | (filepath):
"""Read the CSV from disk."""
df = pd.read_csv(filepath, delimiter=',')
stop_words = ["will", "done", "goes","let", "know", "just", "put" "also",
"got", "can", "get" "said", "mr", "mrs", "one", "two", "three",
"four", "five", "i", "me", "my", "myself", "we", "our",
"ours","ourselves","you","youre","your","yours","yourself","yourselves","he","him","his","himself","she","her","hers","herself","it","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that","these","those","am","is","are","was","were","be","been","being","have","has","had","having","do","does","did","doing","a","an","the","and","but","if","or","because","as","until","while","of","at","by","for","with","about","against","between","into","through","during","before","after","above","below","to","from","up","down","in","out","on","off","over","under","again","further","then","once","here","there","when","where","why","how",
"all","any","both","each","few","more","most","other","some","such",
"can", "will",
"just",
"don",
"don't",
"should",
"should've",
"now",
"d",
"ll",
"m",
"o",
"re",
"ve",
"y",
"ain",
"aren",
"aren't",
"couldn",
"couldn't",
"didn",
"didn't",
"doesn",
"doesn't",
"hadn",
"hadn't",
"hasn",
"hasn't",
"haven",
"haven't",
"isn",
"isn't",
"ma",
"mightn",
"mightn't",
"mustn",
"mustn't",
"needn",
"needn't",
"shan",
"shan't",
"shouldn"
"shouldn't",
"wasn",
"wasn't",
"weren",
"weren't",
"won",
"won't",
"wouldn",
"wouldn't"]
# pandas drop columns using list of column names
df = df.drop(['doc_id', 'date', 'title'], axis=1)
print('Cleaning text')
df["clean_text"] = df['text'].apply(cleanText)
print('Removing words in stop words')
df['clean_text'] = [removeWordsNotIn(line, stop_words) for line in df['clean_text']]
clean_text = df["clean_text"].tolist()
# print(clean_text[:10])
print('Getting all words')
all_words = getAllWords(clean_text, stop_words)
# print('adding words in all_words')
df['clean_text'] = [addWordsIn(line, all_words) for line in df['clean_text']]
# df.text = df.text.apply(lambda x: x.translate(None, string.punctuation))
# df.clean_text = df.clean_text.apply(lambda x: x.translate(string.digits))
# df["clean_text"] = df['text'].str.replace('[^\w\s]','')
print('Finished reading and cleaning data')
print('Number of rows in dataframe: ' + str(len(df.index)))
return df
def preprocess_file(filepath, output_path, flag):
df = read_data(filepath)
if(flag):
header = ["ID", "clean_text", "star_rating", 'human_tag']
else:
header = ["ID", "clean_text"]
print('Output: ' + output_path)
df.to_csv(output_path, columns = header, index=False)
def main():
"""Main function of the program."""
# Specify path
training_filepath = 'data/training.csv'
testing_filepath = 'data/public_test_features.csv'
# Check whether the specified path exists or not
isExist = os.path.exists(training_filepath)
if(isExist):
print('Reading from ' + training_filepath)
else:
print('Training file not found in the app path.')
exit()
preprocess_file(training_filepath, 'data/clean_training1.csv', True)
# Check whether the specified path exists or not
isExist = os.path.exists(testing_filepath)
if(isExist):
print('Reading from ' + testing_filepath)
else:
print('Testing file not found in the app path.')
exit()
preprocess_file(testing_filepath,'data/clean_testing1.csv', False)
if __name__ == "__main__":
main()
| read_data | identifier_name |
preprocess.py | import numpy as np
import pandas as pd
import os.path
import sys, traceback
import random
import re
import string
import pickle
import string
from nltk.probability import FreqDist
MAX_TOKENS = 512
MAX_WORDS = 400
def truncate(text):
"""Truncate the text."""
# TODO fix this to use a variable instead of 511
text = (text[:511]) if len(text) > MAX_TOKENS else text
return text
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def contractions(text):
contractions = {
"ain't": "are not ",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"i'd": "i had",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as ",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"}
words = text.split()
final_string = ""
try:
for word in words:
word = word.lower()
if hasNumbers(word) == False:
if word in contractions:
# print('Word: ' + word)
# print('Replacement: ' + contractions[word])
final_string += contractions[word]
final_string += ' '
flag = True
else:
|
if(flag):
final_string = final_string[:-1]
except Exception as e:
print("type error: " + str(e))
exit()
return final_string
def removePunctuationFromList(all_words):
all_words = [''.join(c for c in s if c not in string.punctuation)
for s in all_words]
# Remove the empty strings:
all_words = [s for s in all_words if s]
return all_words
def cleanText(text):
"""Clean up the text."""
try:
text = str(text)
# remove contactions and stop words
text = contractions(text)
# remove html entities
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
new_text = cleanr.sub('', text.strip())
return re.sub(r'\s+', ' ', re.sub(r'\W+', " ", new_text))
# TAG_RE = re.compile(r'<[^>]+>')
except:
print("An exception occurred with: " + text)
return str(text)
def getAllWords(lines, stop_words):
all_words = {}
try:
for line in lines:
words = line.split()
for word in words:
if word not in stop_words:
all_words[word] = True
temp = all_words.keys()
# removePunctuationFromList(temp)
top_words = FreqDist(temp)
print("All Words list length : ", len(top_words))
# print(str(list(all_words1.keys())[:100]))
# use top 20000 words
return list(top_words.keys())[:20000]
# word_features = list(all_words.keys())[:6000]
# featuresets = [(find_features(rev, word_features), category)
# for (rev, category) in documents]
# print("Feature sets list length : ", len(featuresets))
except Exception as e:
print("type error: " + str(e))
exit()
def removeWordsNotIn(text, stop_words):
words = text.split()
final_string = ""
flag = False
try:
for word in words:
word = word.lower()
if word not in stop_words:
final_string += word
final_string += ' '
flag = True
else:
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
# print("type error: " + str(e))
print("type error")
exit()
return final_string
def shortenText(text, all_words):
# print('shortenText')
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if len(word) > 7:
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
# if we hit max number of token, stop parsing string
return final_string[:-1]
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def addWordsIn(text, all_words):
""" Also does truncation """
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
return shortenText(text, all_words)
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def read_data(filepath):
"""Read the CSV from disk."""
df = pd.read_csv(filepath, delimiter=',')
stop_words = ["will", "done", "goes","let", "know", "just", "put" "also",
"got", "can", "get" "said", "mr", "mrs", "one", "two", "three",
"four", "five", "i", "me", "my", "myself", "we", "our",
"ours","ourselves","you","youre","your","yours","yourself","yourselves","he","him","his","himself","she","her","hers","herself","it","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that","these","those","am","is","are","was","were","be","been","being","have","has","had","having","do","does","did","doing","a","an","the","and","but","if","or","because","as","until","while","of","at","by","for","with","about","against","between","into","through","during","before","after","above","below","to","from","up","down","in","out","on","off","over","under","again","further","then","once","here","there","when","where","why","how",
"all","any","both","each","few","more","most","other","some","such",
"can", "will",
"just",
"don",
"don't",
"should",
"should've",
"now",
"d",
"ll",
"m",
"o",
"re",
"ve",
"y",
"ain",
"aren",
"aren't",
"couldn",
"couldn't",
"didn",
"didn't",
"doesn",
"doesn't",
"hadn",
"hadn't",
"hasn",
"hasn't",
"haven",
"haven't",
"isn",
"isn't",
"ma",
"mightn",
"mightn't",
"mustn",
"mustn't",
"needn",
"needn't",
"shan",
"shan't",
"shouldn"
"shouldn't",
"wasn",
"wasn't",
"weren",
"weren't",
"won",
"won't",
"wouldn",
"wouldn't"]
# pandas drop columns using list of column names
df = df.drop(['doc_id', 'date', 'title'], axis=1)
print('Cleaning text')
df["clean_text"] = df['text'].apply(cleanText)
print('Removing words in stop words')
df['clean_text'] = [removeWordsNotIn(line, stop_words) for line in df['clean_text']]
clean_text = df["clean_text"].tolist()
# print(clean_text[:10])
print('Getting all words')
all_words = getAllWords(clean_text, stop_words)
# print('adding words in all_words')
df['clean_text'] = [addWordsIn(line, all_words) for line in df['clean_text']]
# df.text = df.text.apply(lambda x: x.translate(None, string.punctuation))
# df.clean_text = df.clean_text.apply(lambda x: x.translate(string.digits))
# df["clean_text"] = df['text'].str.replace('[^\w\s]','')
print('Finished reading and cleaning data')
print('Number of rows in dataframe: ' + str(len(df.index)))
return df
def preprocess_file(filepath, output_path, flag):
df = read_data(filepath)
if(flag):
header = ["ID", "clean_text", "star_rating", 'human_tag']
else:
header = ["ID", "clean_text"]
print('Output: ' + output_path)
df.to_csv(output_path, columns = header, index=False)
def main():
"""Main function of the program."""
# Specify path
training_filepath = 'data/training.csv'
testing_filepath = 'data/public_test_features.csv'
# Check whether the specified path exists or not
isExist = os.path.exists(training_filepath)
if(isExist):
print('Reading from ' + training_filepath)
else:
print('Training file not found in the app path.')
exit()
preprocess_file(training_filepath, 'data/clean_training1.csv', True)
# Check whether the specified path exists or not
isExist = os.path.exists(testing_filepath)
if(isExist):
print('Reading from ' + testing_filepath)
else:
print('Testing file not found in the app path.')
exit()
preprocess_file(testing_filepath,'data/clean_testing1.csv', False)
if __name__ == "__main__":
main()
| final_string += word
final_string += ' '
flag = False | conditional_block |
preprocess.py | import numpy as np
import pandas as pd
import os.path
import sys, traceback
import random
import re
import string
import pickle
import string
from nltk.probability import FreqDist
MAX_TOKENS = 512
MAX_WORDS = 400
def truncate(text):
"""Truncate the text."""
# TODO fix this to use a variable instead of 511
text = (text[:511]) if len(text) > MAX_TOKENS else text
return text
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def contractions(text):
contractions = {
"ain't": "are not ",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"i'd": "i had",
"i'd've": "i would have",
"i'll": "i will",
"i'll've": "i will have",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as ",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there would",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you will",
"you'll've": "you will have",
"you're": "you are",
"you've": "you have"}
words = text.split()
final_string = ""
try:
for word in words:
word = word.lower()
if hasNumbers(word) == False:
if word in contractions:
# print('Word: ' + word)
# print('Replacement: ' + contractions[word])
final_string += contractions[word]
final_string += ' '
flag = True
else:
final_string += word
final_string += ' '
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
print("type error: " + str(e))
exit()
return final_string
def removePunctuationFromList(all_words):
all_words = [''.join(c for c in s if c not in string.punctuation)
for s in all_words]
# Remove the empty strings:
all_words = [s for s in all_words if s]
return all_words
def cleanText(text):
"""Clean up the text."""
try:
text = str(text)
# remove contactions and stop words
text = contractions(text)
# remove html entities
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
new_text = cleanr.sub('', text.strip())
return re.sub(r'\s+', ' ', re.sub(r'\W+', " ", new_text))
# TAG_RE = re.compile(r'<[^>]+>')
except:
print("An exception occurred with: " + text)
return str(text)
def getAllWords(lines, stop_words):
all_words = {}
try:
for line in lines:
words = line.split()
for word in words:
if word not in stop_words:
all_words[word] = True
temp = all_words.keys()
# removePunctuationFromList(temp)
top_words = FreqDist(temp)
print("All Words list length : ", len(top_words))
# print(str(list(all_words1.keys())[:100]))
# use top 20000 words
return list(top_words.keys())[:20000]
# word_features = list(all_words.keys())[:6000]
# featuresets = [(find_features(rev, word_features), category)
# for (rev, category) in documents]
# print("Feature sets list length : ", len(featuresets))
except Exception as e:
print("type error: " + str(e))
exit()
def removeWordsNotIn(text, stop_words):
|
def shortenText(text, all_words):
# print('shortenText')
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if len(word) > 7:
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
# if we hit max number of token, stop parsing string
return final_string[:-1]
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def addWordsIn(text, all_words):
""" Also does truncation """
count = 0
final_string = ""
try:
words = text.split()
for word in words:
word = word.lower()
if word in all_words:
count += 1
if(count == MAX_WORDS-1):
return shortenText(text, all_words)
else:
final_string += word
final_string += ' '
final_string = final_string[:-1]
except Exception as e:
print("Error")
# exit()
print("type error: " + str(e))
return final_string
def read_data(filepath):
"""Read the CSV from disk."""
df = pd.read_csv(filepath, delimiter=',')
stop_words = ["will", "done", "goes","let", "know", "just", "put" "also",
"got", "can", "get" "said", "mr", "mrs", "one", "two", "three",
"four", "five", "i", "me", "my", "myself", "we", "our",
"ours","ourselves","you","youre","your","yours","yourself","yourselves","he","him","his","himself","she","her","hers","herself","it","its","itself","they","them","their","theirs","themselves","what","which","who","whom","this","that","these","those","am","is","are","was","were","be","been","being","have","has","had","having","do","does","did","doing","a","an","the","and","but","if","or","because","as","until","while","of","at","by","for","with","about","against","between","into","through","during","before","after","above","below","to","from","up","down","in","out","on","off","over","under","again","further","then","once","here","there","when","where","why","how",
"all","any","both","each","few","more","most","other","some","such",
"can", "will",
"just",
"don",
"don't",
"should",
"should've",
"now",
"d",
"ll",
"m",
"o",
"re",
"ve",
"y",
"ain",
"aren",
"aren't",
"couldn",
"couldn't",
"didn",
"didn't",
"doesn",
"doesn't",
"hadn",
"hadn't",
"hasn",
"hasn't",
"haven",
"haven't",
"isn",
"isn't",
"ma",
"mightn",
"mightn't",
"mustn",
"mustn't",
"needn",
"needn't",
"shan",
"shan't",
"shouldn"
"shouldn't",
"wasn",
"wasn't",
"weren",
"weren't",
"won",
"won't",
"wouldn",
"wouldn't"]
# pandas drop columns using list of column names
df = df.drop(['doc_id', 'date', 'title'], axis=1)
print('Cleaning text')
df["clean_text"] = df['text'].apply(cleanText)
print('Removing words in stop words')
df['clean_text'] = [removeWordsNotIn(line, stop_words) for line in df['clean_text']]
clean_text = df["clean_text"].tolist()
# print(clean_text[:10])
print('Getting all words')
all_words = getAllWords(clean_text, stop_words)
# print('adding words in all_words')
df['clean_text'] = [addWordsIn(line, all_words) for line in df['clean_text']]
# df.text = df.text.apply(lambda x: x.translate(None, string.punctuation))
# df.clean_text = df.clean_text.apply(lambda x: x.translate(string.digits))
# df["clean_text"] = df['text'].str.replace('[^\w\s]','')
print('Finished reading and cleaning data')
print('Number of rows in dataframe: ' + str(len(df.index)))
return df
def preprocess_file(filepath, output_path, flag):
df = read_data(filepath)
if(flag):
header = ["ID", "clean_text", "star_rating", 'human_tag']
else:
header = ["ID", "clean_text"]
print('Output: ' + output_path)
df.to_csv(output_path, columns = header, index=False)
def main():
"""Main function of the program."""
# Specify path
training_filepath = 'data/training.csv'
testing_filepath = 'data/public_test_features.csv'
# Check whether the specified path exists or not
isExist = os.path.exists(training_filepath)
if(isExist):
print('Reading from ' + training_filepath)
else:
print('Training file not found in the app path.')
exit()
preprocess_file(training_filepath, 'data/clean_training1.csv', True)
# Check whether the specified path exists or not
isExist = os.path.exists(testing_filepath)
if(isExist):
print('Reading from ' + testing_filepath)
else:
print('Testing file not found in the app path.')
exit()
preprocess_file(testing_filepath,'data/clean_testing1.csv', False)
if __name__ == "__main__":
main()
| words = text.split()
final_string = ""
flag = False
try:
for word in words:
word = word.lower()
if word not in stop_words:
final_string += word
final_string += ' '
flag = True
else:
flag = False
if(flag):
final_string = final_string[:-1]
except Exception as e:
# print("type error: " + str(e))
print("type error")
exit()
return final_string | identifier_body |
lib.rs | //! A tiny and incomplete wasm interpreter
//!
//! This module contains a tiny and incomplete wasm interpreter built on top of
//! `walrus`'s module structure. Each `Interpreter` contains some state
//! about the execution of a wasm instance. The "incomplete" part here is
//! related to the fact that this is *only* used to execute the various
//! descriptor functions for wasm-bindgen.
//!
//! As a recap, the wasm-bindgen macro generate "descriptor functions" which
//! basically as a mapping of rustc's trait resolution in executable code. This
//! allows us to detect, after the macro is invoke, what trait selection did and
//! what types of functions look like. By executing descriptor functions they'll
//! each invoke a known import (with only one argument) some number of times,
//! which gives us a list of `u32` values to then decode.
//!
//! The interpreter here is only geared towards this one exact use case, so it's
//! quite small and likely not extra-efficient.
#![deny(missing_docs)]
use std::collections::{BTreeMap, HashMap, HashSet};
use walrus::ir::Instr;
use walrus::{ElementId, FunctionId, LocalId, Module, TableId};
/// A ready-to-go interpreter of a wasm module.
///
/// An interpreter currently represents effectively cached state. It is reused
/// between calls to `interpret` and is precomputed from a `Module`. It houses
/// state like the wasm stack, wasm memory, etc.
#[derive(Default)]
pub struct Interpreter {
// Function index of the `__wbindgen_describe` and
// `__wbindgen_describe_closure` imported functions. We special case this
// to know when the environment's imported function is called.
describe_id: Option<FunctionId>,
describe_closure_id: Option<FunctionId>,
// Id of the function table
functions: Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>,
}
impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module != "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn | (&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len());
for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32(n) => stack.push(n),
_ => panic!("non-i32 constant"),
},
Instr::LocalGet(e) => stack.push(self.locals.get(&e.local).cloned().unwrap_or(0)),
Instr::LocalSet(e) => {
let val = stack.pop().unwrap();
self.locals.insert(e.local, val);
}
Instr::LocalTee(e) => {
let val = *stack.last().unwrap();
self.locals.insert(e.local, val);
}
// Blindly assume all globals are the stack pointer
Instr::GlobalGet(_) => stack.push(self.interp.sp),
Instr::GlobalSet(_) => {
let val = stack.pop().unwrap();
self.interp.sp = val;
}
// Support simple arithmetic, mainly for the stack pointer
// manipulation
Instr::Binop(e) => {
let rhs = stack.pop().unwrap();
let lhs = stack.pop().unwrap();
stack.push(match e.op {
BinaryOp::I32Sub => lhs - rhs,
BinaryOp::I32Add => lhs + rhs,
op => panic!("invalid binary op {:?}", op),
});
}
// Support small loads/stores to the stack. These show up in debug
// mode where there's some traffic on the linear stack even when in
// theory there doesn't need to be.
Instr::Load(e) => {
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
stack.push(self.interp.mem[address as usize / 4])
}
Instr::Store(e) => {
let value = stack.pop().unwrap();
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
self.interp.mem[address as usize / 4] = value;
}
Instr::Return(_) => {
log::debug!("return");
self.done = true;
}
Instr::Drop(_) => {
log::debug!("drop");
stack.pop().unwrap();
}
Instr::Call(e) => {
// If this function is calling the `__wbindgen_describe`
// function, which we've precomputed the id for, then
// it's telling us about the next `u32` element in the
// descriptor to return. We "call" the imported function
// here by directly inlining it.
if Some(e.func) == self.interp.describe_id {
let val = stack.pop().unwrap();
log::debug!("__wbindgen_describe({})", val);
self.interp.descriptor.push(val as u32);
// If this function is calling the `__wbindgen_describe_closure`
// function then it's similar to the above, except there's a
// slightly different signature. Note that we don't eval the
// previous arguments because they shouldn't have any side
// effects we're interested in.
} else if Some(e.func) == self.interp.describe_closure_id {
let val = stack.pop().unwrap();
stack.pop();
stack.pop();
log::debug!("__wbindgen_describe_closure({})", val);
self.interp.descriptor_table_idx = Some(val as u32);
stack.push(0)
// ... otherwise this is a normal call so we recurse.
} else {
let ty = self.module.types.get(self.module.funcs.get(e.func).ty());
let args = (0..ty.params().len())
.map(|_| stack.pop().unwrap())
.collect::<Vec<_>>();
self.interp.call(e.func, self.module, &args);
}
}
// All other instructions shouldn't be used by our various
// descriptor functions. LLVM optimizations may mean that some
// of the above instructions aren't actually needed either, but
// the above instructions have empirically been required when
// executing our own test suite in wasm-bindgen.
//
// Note that LLVM may change over time to generate new
// instructions in debug mode, and we'll have to react to those
// sorts of changes as they arise.
s => panic!("unknown instruction {:?}", s),
}
}
}
| interpret_descriptor | identifier_name |
lib.rs | //! A tiny and incomplete wasm interpreter
//!
//! This module contains a tiny and incomplete wasm interpreter built on top of
//! `walrus`'s module structure. Each `Interpreter` contains some state
//! about the execution of a wasm instance. The "incomplete" part here is
//! related to the fact that this is *only* used to execute the various
//! descriptor functions for wasm-bindgen.
//!
//! As a recap, the wasm-bindgen macro generate "descriptor functions" which
//! basically as a mapping of rustc's trait resolution in executable code. This
//! allows us to detect, after the macro is invoke, what trait selection did and
//! what types of functions look like. By executing descriptor functions they'll
//! each invoke a known import (with only one argument) some number of times,
//! which gives us a list of `u32` values to then decode.
//!
//! The interpreter here is only geared towards this one exact use case, so it's
//! quite small and likely not extra-efficient.
#![deny(missing_docs)]
use std::collections::{BTreeMap, HashMap, HashSet};
use walrus::ir::Instr;
use walrus::{ElementId, FunctionId, LocalId, Module, TableId};
/// A ready-to-go interpreter of a wasm module.
///
/// An interpreter currently represents effectively cached state. It is reused
/// between calls to `interpret` and is precomputed from a `Module`. It houses
/// state like the wasm stack, wasm memory, etc.
#[derive(Default)]
pub struct Interpreter {
// Function index of the `__wbindgen_describe` and
// `__wbindgen_describe_closure` imported functions. We special case this
// to know when the environment's imported function is called.
describe_id: Option<FunctionId>,
describe_closure_id: Option<FunctionId>,
// Id of the function table
functions: Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>,
}
impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module != "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn interpret_descriptor(&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> |
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32(n) => stack.push(n),
_ => panic!("non-i32 constant"),
},
Instr::LocalGet(e) => stack.push(self.locals.get(&e.local).cloned().unwrap_or(0)),
Instr::LocalSet(e) => {
let val = stack.pop().unwrap();
self.locals.insert(e.local, val);
}
Instr::LocalTee(e) => {
let val = *stack.last().unwrap();
self.locals.insert(e.local, val);
}
// Blindly assume all globals are the stack pointer
Instr::GlobalGet(_) => stack.push(self.interp.sp),
Instr::GlobalSet(_) => {
let val = stack.pop().unwrap();
self.interp.sp = val;
}
// Support simple arithmetic, mainly for the stack pointer
// manipulation
Instr::Binop(e) => {
let rhs = stack.pop().unwrap();
let lhs = stack.pop().unwrap();
stack.push(match e.op {
BinaryOp::I32Sub => lhs - rhs,
BinaryOp::I32Add => lhs + rhs,
op => panic!("invalid binary op {:?}", op),
});
}
// Support small loads/stores to the stack. These show up in debug
// mode where there's some traffic on the linear stack even when in
// theory there doesn't need to be.
Instr::Load(e) => {
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
stack.push(self.interp.mem[address as usize / 4])
}
Instr::Store(e) => {
let value = stack.pop().unwrap();
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
self.interp.mem[address as usize / 4] = value;
}
Instr::Return(_) => {
log::debug!("return");
self.done = true;
}
Instr::Drop(_) => {
log::debug!("drop");
stack.pop().unwrap();
}
Instr::Call(e) => {
// If this function is calling the `__wbindgen_describe`
// function, which we've precomputed the id for, then
// it's telling us about the next `u32` element in the
// descriptor to return. We "call" the imported function
// here by directly inlining it.
if Some(e.func) == self.interp.describe_id {
let val = stack.pop().unwrap();
log::debug!("__wbindgen_describe({})", val);
self.interp.descriptor.push(val as u32);
// If this function is calling the `__wbindgen_describe_closure`
// function then it's similar to the above, except there's a
// slightly different signature. Note that we don't eval the
// previous arguments because they shouldn't have any side
// effects we're interested in.
} else if Some(e.func) == self.interp.describe_closure_id {
let val = stack.pop().unwrap();
stack.pop();
stack.pop();
log::debug!("__wbindgen_describe_closure({})", val);
self.interp.descriptor_table_idx = Some(val as u32);
stack.push(0)
// ... otherwise this is a normal call so we recurse.
} else {
let ty = self.module.types.get(self.module.funcs.get(e.func).ty());
let args = (0..ty.params().len())
.map(|_| stack.pop().unwrap())
.collect::<Vec<_>>();
self.interp.call(e.func, self.module, &args);
}
}
// All other instructions shouldn't be used by our various
// descriptor functions. LLVM optimizations may mean that some
// of the above instructions aren't actually needed either, but
// the above instructions have empirically been required when
// executing our own test suite in wasm-bindgen.
//
// Note that LLVM may change over time to generate new
// instructions in debug mode, and we'll have to react to those
// sorts of changes as they arise.
s => panic!("unknown instruction {:?}", s),
}
}
}
| {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len());
for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
} | identifier_body |
lib.rs | //! A tiny and incomplete wasm interpreter
//!
//! This module contains a tiny and incomplete wasm interpreter built on top of
//! `walrus`'s module structure. Each `Interpreter` contains some state
//! about the execution of a wasm instance. The "incomplete" part here is
//! related to the fact that this is *only* used to execute the various
//! descriptor functions for wasm-bindgen.
//!
//! As a recap, the wasm-bindgen macro generate "descriptor functions" which
//! basically as a mapping of rustc's trait resolution in executable code. This
//! allows us to detect, after the macro is invoke, what trait selection did and
//! what types of functions look like. By executing descriptor functions they'll
//! each invoke a known import (with only one argument) some number of times,
//! which gives us a list of `u32` values to then decode.
//!
//! The interpreter here is only geared towards this one exact use case, so it's
//! quite small and likely not extra-efficient.
#![deny(missing_docs)]
use std::collections::{BTreeMap, HashMap, HashSet};
use walrus::ir::Instr;
use walrus::{ElementId, FunctionId, LocalId, Module, TableId};
/// A ready-to-go interpreter of a wasm module.
///
/// An interpreter currently represents effectively cached state. It is reused
/// between calls to `interpret` and is precomputed from a `Module`. It houses
/// state like the wasm stack, wasm memory, etc.
#[derive(Default)]
pub struct Interpreter {
// Function index of the `__wbindgen_describe` and
// `__wbindgen_describe_closure` imported functions. We special case this
// to know when the environment's imported function is called.
describe_id: Option<FunctionId>,
describe_closure_id: Option<FunctionId>,
// Id of the function table
functions: Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>, | impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module != "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn interpret_descriptor(&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len());
for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32(n) => stack.push(n),
_ => panic!("non-i32 constant"),
},
Instr::LocalGet(e) => stack.push(self.locals.get(&e.local).cloned().unwrap_or(0)),
Instr::LocalSet(e) => {
let val = stack.pop().unwrap();
self.locals.insert(e.local, val);
}
Instr::LocalTee(e) => {
let val = *stack.last().unwrap();
self.locals.insert(e.local, val);
}
// Blindly assume all globals are the stack pointer
Instr::GlobalGet(_) => stack.push(self.interp.sp),
Instr::GlobalSet(_) => {
let val = stack.pop().unwrap();
self.interp.sp = val;
}
// Support simple arithmetic, mainly for the stack pointer
// manipulation
Instr::Binop(e) => {
let rhs = stack.pop().unwrap();
let lhs = stack.pop().unwrap();
stack.push(match e.op {
BinaryOp::I32Sub => lhs - rhs,
BinaryOp::I32Add => lhs + rhs,
op => panic!("invalid binary op {:?}", op),
});
}
// Support small loads/stores to the stack. These show up in debug
// mode where there's some traffic on the linear stack even when in
// theory there doesn't need to be.
Instr::Load(e) => {
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
stack.push(self.interp.mem[address as usize / 4])
}
Instr::Store(e) => {
let value = stack.pop().unwrap();
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
self.interp.mem[address as usize / 4] = value;
}
Instr::Return(_) => {
log::debug!("return");
self.done = true;
}
Instr::Drop(_) => {
log::debug!("drop");
stack.pop().unwrap();
}
Instr::Call(e) => {
// If this function is calling the `__wbindgen_describe`
// function, which we've precomputed the id for, then
// it's telling us about the next `u32` element in the
// descriptor to return. We "call" the imported function
// here by directly inlining it.
if Some(e.func) == self.interp.describe_id {
let val = stack.pop().unwrap();
log::debug!("__wbindgen_describe({})", val);
self.interp.descriptor.push(val as u32);
// If this function is calling the `__wbindgen_describe_closure`
// function then it's similar to the above, except there's a
// slightly different signature. Note that we don't eval the
// previous arguments because they shouldn't have any side
// effects we're interested in.
} else if Some(e.func) == self.interp.describe_closure_id {
let val = stack.pop().unwrap();
stack.pop();
stack.pop();
log::debug!("__wbindgen_describe_closure({})", val);
self.interp.descriptor_table_idx = Some(val as u32);
stack.push(0)
// ... otherwise this is a normal call so we recurse.
} else {
let ty = self.module.types.get(self.module.funcs.get(e.func).ty());
let args = (0..ty.params().len())
.map(|_| stack.pop().unwrap())
.collect::<Vec<_>>();
self.interp.call(e.func, self.module, &args);
}
}
// All other instructions shouldn't be used by our various
// descriptor functions. LLVM optimizations may mean that some
// of the above instructions aren't actually needed either, but
// the above instructions have empirically been required when
// executing our own test suite in wasm-bindgen.
//
// Note that LLVM may change over time to generate new
// instructions in debug mode, and we'll have to react to those
// sorts of changes as they arise.
s => panic!("unknown instruction {:?}", s),
}
}
} | }
| random_line_split |
lib.rs | //! A tiny and incomplete wasm interpreter
//!
//! This module contains a tiny and incomplete wasm interpreter built on top of
//! `walrus`'s module structure. Each `Interpreter` contains some state
//! about the execution of a wasm instance. The "incomplete" part here is
//! related to the fact that this is *only* used to execute the various
//! descriptor functions for wasm-bindgen.
//!
//! As a recap, the wasm-bindgen macro generate "descriptor functions" which
//! basically as a mapping of rustc's trait resolution in executable code. This
//! allows us to detect, after the macro is invoke, what trait selection did and
//! what types of functions look like. By executing descriptor functions they'll
//! each invoke a known import (with only one argument) some number of times,
//! which gives us a list of `u32` values to then decode.
//!
//! The interpreter here is only geared towards this one exact use case, so it's
//! quite small and likely not extra-efficient.
#![deny(missing_docs)]
use std::collections::{BTreeMap, HashMap, HashSet};
use walrus::ir::Instr;
use walrus::{ElementId, FunctionId, LocalId, Module, TableId};
/// A ready-to-go interpreter of a wasm module.
///
/// An interpreter currently represents effectively cached state. It is reused
/// between calls to `interpret` and is precomputed from a `Module`. It houses
/// state like the wasm stack, wasm memory, etc.
#[derive(Default)]
pub struct Interpreter {
// Function index of the `__wbindgen_describe` and
// `__wbindgen_describe_closure` imported functions. We special case this
// to know when the environment's imported function is called.
describe_id: Option<FunctionId>,
describe_closure_id: Option<FunctionId>,
// Id of the function table
functions: Option<TableId>,
// A mapping of string names to the function index, filled with all exported
// functions.
name_map: HashMap<String, FunctionId>,
// The current stack pointer (global 0) and wasm memory (the stack). Only
// used in a limited capacity.
sp: i32,
mem: Vec<i32>,
scratch: Vec<i32>,
// The descriptor which we're assembling, a list of `u32` entries. This is
// very specific to wasm-bindgen and is the purpose for the existence of
// this module.
descriptor: Vec<u32>,
// When invoking the `__wbindgen_describe_closure` imported function, this
// stores the last table index argument, used for finding a different
// descriptor.
descriptor_table_idx: Option<u32>,
}
impl Interpreter {
/// Creates a new interpreter from a provided `Module`, precomputing all
/// information necessary to interpret further.
///
/// Note that the `module` passed in to this function must be the same as
/// the `module` passed to `interpret` below.
pub fn new(module: &Module) -> Result<Interpreter, anyhow::Error> {
let mut ret = Interpreter::default();
// The descriptor functions shouldn't really use all that much memory
// (the LLVM call stack, now the wasm stack). To handle that let's give
// our selves a little bit of memory and set the stack pointer (global
// 0) to the top.
ret.mem = vec![0; 0x400];
ret.sp = ret.mem.len() as i32;
// Figure out where the `__wbindgen_describe` imported function is, if
// it exists. We'll special case calls to this function as our
// interpretation should only invoke this function as an imported
// function.
for import in module.imports.iter() {
let id = match import.kind {
walrus::ImportKind::Function(id) => id,
_ => continue,
};
if import.module != "__wbindgen_placeholder__" {
continue;
}
if import.name == "__wbindgen_describe" {
ret.describe_id = Some(id);
} else if import.name == "__wbindgen_describe_closure" {
ret.describe_closure_id = Some(id);
}
}
// Build up the mapping of exported functions to function ids.
for export in module.exports.iter() {
let id = match export.item {
walrus::ExportItem::Function(id) => id,
_ => continue,
};
ret.name_map.insert(export.name.to_string(), id);
}
ret.functions = module.tables.main_function_table()?;
Ok(ret)
}
/// Interprets the execution of the descriptor function `func`.
///
/// This function will execute `func` in the `module` provided. Note that
/// the `module` provided here must be the same as the one passed to `new`
/// when this `Interpreter` was constructed.
///
/// The `func` must be a wasm-bindgen descriptor function meaning that it
/// doesn't do anything like use floats or i64. Instead all it should do is
/// call other functions, sometimes some stack pointer manipulation, and
/// then call the one imported `__wbindgen_describe` function. Anything else
/// will cause this interpreter to panic.
///
/// When the descriptor has finished running the assembled descriptor list
/// is returned. The descriptor returned can then be re-parsed into an
/// actual `Descriptor` in the cli-support crate.
///
/// # Return value
///
/// Returns `Some` if `func` was found in the `module` and `None` if it was
/// not found in the `module`.
pub fn interpret_descriptor(&mut self, id: FunctionId, module: &Module) -> Option<&[u32]> {
self.descriptor.truncate(0);
// We should have a blank wasm and LLVM stack at both the start and end
// of the call.
assert_eq!(self.sp, self.mem.len() as i32);
self.call(id, module, &[]);
assert_eq!(self.sp, self.mem.len() as i32);
Some(&self.descriptor)
}
/// Interprets a "closure descriptor", figuring out the signature of the
/// closure that was intended.
///
/// This function will take an `id` which is known to internally
/// execute `__wbindgen_describe_closure` and interpret it. The
/// `wasm-bindgen` crate controls all callers of this internal import. It
/// will then take the index passed to `__wbindgen_describe_closure` and
/// interpret it as a function pointer. This means it'll look up within the
/// element section (function table) which index it points to. Upon finding
/// the relevant entry it'll assume that function is a descriptor function,
/// and then it will execute the descriptor function.
///
/// The returned value is the return value of the descriptor function found.
/// The `entry_removal_list` list is also then populated with an index of
/// the entry in the elements section (and then the index within that
/// section) of the function that needs to be snip'd out.
pub fn interpret_closure_descriptor(
&mut self,
id: FunctionId,
module: &Module,
entry_removal_list: &mut HashSet<(ElementId, usize)>,
) -> Option<&[u32]> {
// Call the `id` function. This is an internal `#[inline(never)]`
// whose code is completely controlled by the `wasm-bindgen` crate, so
// it should take some arguments (the number of arguments depends on the
// optimization level) and return one (all of which we don't care about
// here). What we're interested in is that while executing this function
// it'll call `__wbindgen_describe_closure` with an argument that we
// look for.
assert!(self.descriptor_table_idx.is_none());
let func = module.funcs.get(id);
let params = module.types.get(func.ty()).params();
assert!(
params.iter().all(|p| *p == walrus::ValType::I32),
"closure descriptors should only have i32 params"
);
let num_params = params.len();
assert!(
num_params <= 2,
"closure descriptors have 2 parameters, but might lose some parameters due to LTO"
);
let args = vec![0; num_params];
self.call(id, module, &args);
let descriptor_table_idx = self
.descriptor_table_idx
.take()
.expect("descriptor function should return index");
// After we've got the table index of the descriptor function we're
// interested go take a look in the function table to find what the
// actual index of the function is.
let entry =
wasm_bindgen_wasm_conventions::get_function_table_entry(module, descriptor_table_idx)
.expect("failed to find entry in function table");
let descriptor_id = entry.func.expect("element segment slot wasn't set");
entry_removal_list.insert((entry.element, entry.idx));
// And now execute the descriptor!
self.interpret_descriptor(descriptor_id, module)
}
/// Returns the function id of the `__wbindgen_describe_closure`
/// imported function.
pub fn describe_closure_id(&self) -> Option<FunctionId> {
self.describe_closure_id
}
/// Returns the detected id of the function table.
pub fn function_table_id(&self) -> Option<TableId> {
self.functions
}
fn call(&mut self, id: FunctionId, module: &Module, args: &[i32]) -> Option<i32> {
let func = module.funcs.get(id);
log::debug!("starting a call of {:?} {:?}", id, func.name);
log::debug!("arguments {:?}", args);
let local = match &func.kind {
walrus::FunctionKind::Local(l) => l,
_ => panic!("can only call locally defined functions"),
};
let entry = local.entry_block();
let block = local.block(entry);
let mut frame = Frame {
module,
interp: self,
locals: BTreeMap::new(),
done: false,
};
assert_eq!(local.args.len(), args.len());
for (arg, val) in local.args.iter().zip(args) {
frame.locals.insert(*arg, *val);
}
for (instr, _) in block.instrs.iter() {
frame.eval(instr);
if frame.done {
break;
}
}
self.scratch.last().cloned()
}
}
struct Frame<'a> {
module: &'a Module,
interp: &'a mut Interpreter,
locals: BTreeMap<LocalId, i32>,
done: bool,
}
impl Frame<'_> {
fn eval(&mut self, instr: &Instr) {
use walrus::ir::*;
let stack = &mut self.interp.scratch;
match instr {
Instr::Const(c) => match c.value {
Value::I32(n) => stack.push(n),
_ => panic!("non-i32 constant"),
},
Instr::LocalGet(e) => stack.push(self.locals.get(&e.local).cloned().unwrap_or(0)),
Instr::LocalSet(e) => {
let val = stack.pop().unwrap();
self.locals.insert(e.local, val);
}
Instr::LocalTee(e) => {
let val = *stack.last().unwrap();
self.locals.insert(e.local, val);
}
// Blindly assume all globals are the stack pointer
Instr::GlobalGet(_) => stack.push(self.interp.sp),
Instr::GlobalSet(_) => {
let val = stack.pop().unwrap();
self.interp.sp = val;
}
// Support simple arithmetic, mainly for the stack pointer
// manipulation
Instr::Binop(e) => {
let rhs = stack.pop().unwrap();
let lhs = stack.pop().unwrap();
stack.push(match e.op {
BinaryOp::I32Sub => lhs - rhs,
BinaryOp::I32Add => lhs + rhs,
op => panic!("invalid binary op {:?}", op),
});
}
// Support small loads/stores to the stack. These show up in debug
// mode where there's some traffic on the linear stack even when in
// theory there doesn't need to be.
Instr::Load(e) => {
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
stack.push(self.interp.mem[address as usize / 4])
}
Instr::Store(e) => {
let value = stack.pop().unwrap();
let address = stack.pop().unwrap();
let address = address as u32 + e.arg.offset;
assert!(address % 4 == 0);
self.interp.mem[address as usize / 4] = value;
}
Instr::Return(_) => {
log::debug!("return");
self.done = true;
}
Instr::Drop(_) => {
log::debug!("drop");
stack.pop().unwrap();
}
Instr::Call(e) => {
// If this function is calling the `__wbindgen_describe`
// function, which we've precomputed the id for, then
// it's telling us about the next `u32` element in the
// descriptor to return. We "call" the imported function
// here by directly inlining it.
if Some(e.func) == self.interp.describe_id {
let val = stack.pop().unwrap();
log::debug!("__wbindgen_describe({})", val);
self.interp.descriptor.push(val as u32);
// If this function is calling the `__wbindgen_describe_closure`
// function then it's similar to the above, except there's a
// slightly different signature. Note that we don't eval the
// previous arguments because they shouldn't have any side
// effects we're interested in.
} else if Some(e.func) == self.interp.describe_closure_id {
let val = stack.pop().unwrap();
stack.pop();
stack.pop();
log::debug!("__wbindgen_describe_closure({})", val);
self.interp.descriptor_table_idx = Some(val as u32);
stack.push(0)
// ... otherwise this is a normal call so we recurse.
} else |
}
// All other instructions shouldn't be used by our various
// descriptor functions. LLVM optimizations may mean that some
// of the above instructions aren't actually needed either, but
// the above instructions have empirically been required when
// executing our own test suite in wasm-bindgen.
//
// Note that LLVM may change over time to generate new
// instructions in debug mode, and we'll have to react to those
// sorts of changes as they arise.
s => panic!("unknown instruction {:?}", s),
}
}
}
| {
let ty = self.module.types.get(self.module.funcs.get(e.func).ty());
let args = (0..ty.params().len())
.map(|_| stack.pop().unwrap())
.collect::<Vec<_>>();
self.interp.call(e.func, self.module, &args);
} | conditional_block |
maze.rs | //! I would like to approach the problem in two distinct ways
//!
//! One of them is floodfill - solution is highly suboptimal in terms of computational complexity,
//! but it parallelizes perfectly - every iteration step recalculates new maze path data basing
//! entirely on previous iteration. The aproach has a problem, that every iteration step is O(n)
//! itself, where n is entire maze size. However - the solution scales perfectly if we can have
//! separated thread for every field, which happens if we are on some kind of strong SIMD
//! architecture - like GPU. I see that in the offer there was a "FPGA" thing, and as we are
//! talking about financial calculation, I assume this is a reason of "FPGA" being there.
//!
//! The other approach is trying to have just nice solution for normal processors - just implement
//! properly aligned A* as pretty easy and common solutions for pathfinding. Nothing special there,
//! but on SISD arch it should behave pretty nicely (it could be probably improved by using some
//! more sophisticated algo like double ended A*, but I am lazy - to much work not showing too
//! much, if I would really find more time I would rather try to do something more interesting -
//! visualization, or kind of optimization - but I don't believe I would find motivation for that).
//!
//! I figured out additional "approach" (except taking completely different search algo). Maze
//! could be easly preprocessed to directed graph, where each cell (so actually non wall maze field)
//! has connection to the closest path crossing, and then running any pathfinding alg on that.
//! Benefit of that is that pathfinding itself is performed on strongly reduced graph, downside is
//! obviously need of preprocessing (not this much - possible to be done in O(x * y), but every
//! field have to be visited, while most reasonable finding algorithms avoids visiting every
//! field). The problem that if exit is not on the crossing then there is no incomming path to it
//! is actually not difficult to solve - simple raycast from exit can be done to find all fields
//! "connected" to exit (O(x + y)).
//!
//! In terms of visualization (even printing to text) - I don't even try to be efficient.
use std::cmp::Ordering;
use std::io::BufRead;
mod flood;
pub use flood::flood;
mod astar;
pub use astar::astar;
/// Direction from which its needed to approach the field to achieve it with given cost. As it is
/// possible to have same distance from multiple directions, it is a simple bitset. This is needed,
/// as in oru problem cost of next step is dependent on the fact if there is a turn on this step.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Dir(u8);
impl Dir {
pub const NONE: Dir = Dir(0);
pub const LEFT: Dir = Dir(1);
pub const UP: Dir = Dir(2);
pub const RIGHT: Dir = Dir(4);
pub const DOWN: Dir = Dir(8);
pub const ANY: Dir = Dir(1 | 2 | 4 | 8);
pub fn has_all(&self, Dir(other): Dir) -> bool {
self.0 & other == other
}
/// Returns directions in which at least one step is needed
pub fn vec((from_x, from_y): (usize, usize), (to_x, to_y): (usize, usize)) -> Self {
let h = match from_x.cmp(&to_x) {
Ordering::Less => Self::LEFT,
Ordering::Greater => Self::RIGHT,
Ordering::Equal => Self::NONE,
};
let v = match from_y.cmp(&to_y) {
Ordering::Less => Self::UP,
Ordering::Greater => Self::DOWN,
Ordering::Equal => Self::NONE,
};
h | v
}
/// Rotates left
pub fn left(mut self) -> Self {
let down = (self.0 & 1) << 3;
self.0 >>= 1;
self.0 |= down;
self
}
/// Rotates right
pub fn right(mut self) -> Self {
let left = (self.0 & 8) >> 3;
self.0 <<= 1;
self.0 |= left;
self.0 &= 0xf;
self
}
/// Returns minimal number of rotations so at least one encoded direction would match every
/// given direction at least once | // I have feeling it is strongly suboptimal; Actually as both directions are encoded as 4
// bits, just precalculated table would be best solution
let mut min = 4;
for dir in [Self::LEFT, Self::RIGHT, Self::UP, Self::DOWN].iter() {
let mut d = *dir;
if !self.has_all(d) {
continue;
}
let mut o = other.0 & !dir.0;
let mut cnt = 0;
while o != 0 {
cnt += 1;
d = d.left();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
d = *dir;
o = other.0 & !dir.0;
cnt = 0;
while o != 0 {
cnt += 1;
d = d.right();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
}
min
}
}
impl std::ops::BitOr for Dir {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
/// Single field in maze
#[derive(Clone, Copy, Debug)]
enum Field {
Empty,
Wall,
/// Empty field with known distance from the start of the maze
/// It doesn't need to be the closes path - it is distance calulated using some path
Calculated(Dir, usize),
}
/// Whole maze reprezentation
pub struct Maze {
/// All fields flattened
maze: Box<[Field]>,
/// Width of maze as it is needed for proper addressing (inlcuding external wall)
w: usize,
}
impl Maze {
/// Maps coord to field index
fn idx(&self, x: usize, y: usize) -> usize {
// On overflow just give invalid (too big) index - anything from here would be wall by
// default which is simplification on purpose
y.saturating_mul(self.w).saturating_add(x)
}
/// Maps field index to coordinates
fn coords(&self, idx: usize) -> (usize, usize) {
(idx % self.w, idx / self.w)
}
/// Returns index of field in given direction (defined to be wrapping)
fn in_dir_idx(&self, idx: usize, dir: Dir) -> usize {
let (x, y) = self.coords(idx);
// Doing wrapping sub basically because maze size is way smaller than my indexing type size
// (considering >= 16bit machine), so after wrapping I would have invalid field, so Wall by
// default
let (x, y) = match dir {
Dir::UP => (x, y.wrapping_sub(1)),
Dir::DOWN => (x, y + 1),
Dir::LEFT => (x.wrapping_sub(1), y),
Dir::RIGHT => (x + 1, y),
_ => (x, y),
};
self.idx(x, y)
}
/// Returns field in given direction from given one (Wall if no such field)
/// If Dir has more than one direction encoded, field with same idx is returned
fn in_dir(&self, idx: usize, dir: Dir) -> Field {
self.maze
.get(self.in_dir_idx(idx, dir))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives field from given coord (Wall if no such field)
fn field(&self, x: usize, y: usize) -> Field {
self.maze
.get(self.idx(x, y))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives mutable field from given coord
fn field_mut(&mut self, x: usize, y: usize) -> Option<&mut Field> {
self.maze.get_mut(self.idx(x, y))
}
/// Creates valid maze from input containing maze description, and x/y dimentions of it
pub fn from_input(x: usize, y: usize, input: impl BufRead) -> Self {
// Iterating over bytes is bad idea, but only interesting charactes are 0 and 1 which
// happens to be ASCII bytes. I am aware it wont work with any non-ASCII UTF representation
// of 0 and 1 and "I don't care, what they're going to say..."
let maze = input
.lines()
.take(y)
.flat_map(|line| line.unwrap().into_bytes())
.map(|field| match field {
b'0' => Field::Wall,
b'1' => Field::Empty,
_ => panic!("Invalid input"),
})
.collect();
Maze { maze, w: x }
}
}
#[cfg(feature = "text_visualize")]
impl std::fmt::Display for Maze {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// While printing maze, externall wall is not printed
for line in self.maze.chunks(self.w) {
let line: String = line
.iter()
.map(|field| match field {
Field::Empty => ' ',
Field::Wall => '#',
Field::Calculated(_, distance) => {
(distance % 10).to_string().chars().last().unwrap()
}
})
.chain(std::iter::once('\n'))
.collect();
f.write_str(&line)?;
}
Ok(())
}
}
/// As both "parts" of excercise are actually two separated applications, here we have maze "main"
/// (with preparsed arguments).
///
/// The last argument is function for caluclating the shortest path.
/// As an argument it takes initial maze, with at least one field with known distance - which is
/// considered to be an "initial cost" of entering into the maze with this input, and additionally
/// a field where we algorithm is looking path to. Returned maze contains exit field calculated to
/// the closest path, and some another field calculated to have "at least this good" path.
///
/// If there is no path to given exit, calculator should return maze with not calculated exit field
pub fn main(
x: usize,
y: usize,
input: impl BufRead,
calculator: impl Fn(Maze, usize, usize) -> Maze,
) {
let mut maze = Maze::from_input(x, y, input);
*maze.field_mut(0, 1).unwrap() = Field::Calculated(Dir::ANY, 0);
#[cfg(feature = "text_visualize")]
println!("Initial maze:\n\n{}\n", maze);
let maze = calculator(maze, x - 1, y - 2);
#[cfg(feature = "text_visualize")]
println!("Calculated maze:\n\n{}\n", maze);
match maze.field(x - 1, y - 2) {
Field::Empty => println!("UNREACHABLE"),
Field::Wall => println!("INVALID"),
Field::Calculated(_, cost) => println!("{}", cost),
}
} | pub fn min_rotation(self, other: Self) -> usize { | random_line_split |
maze.rs | //! I would like to approach the problem in two distinct ways
//!
//! One of them is floodfill - solution is highly suboptimal in terms of computational complexity,
//! but it parallelizes perfectly - every iteration step recalculates new maze path data basing
//! entirely on previous iteration. The aproach has a problem, that every iteration step is O(n)
//! itself, where n is entire maze size. However - the solution scales perfectly if we can have
//! separated thread for every field, which happens if we are on some kind of strong SIMD
//! architecture - like GPU. I see that in the offer there was a "FPGA" thing, and as we are
//! talking about financial calculation, I assume this is a reason of "FPGA" being there.
//!
//! The other approach is trying to have just nice solution for normal processors - just implement
//! properly aligned A* as pretty easy and common solutions for pathfinding. Nothing special there,
//! but on SISD arch it should behave pretty nicely (it could be probably improved by using some
//! more sophisticated algo like double ended A*, but I am lazy - to much work not showing too
//! much, if I would really find more time I would rather try to do something more interesting -
//! visualization, or kind of optimization - but I don't believe I would find motivation for that).
//!
//! I figured out additional "approach" (except taking completely different search algo). Maze
//! could be easly preprocessed to directed graph, where each cell (so actually non wall maze field)
//! has connection to the closest path crossing, and then running any pathfinding alg on that.
//! Benefit of that is that pathfinding itself is performed on strongly reduced graph, downside is
//! obviously need of preprocessing (not this much - possible to be done in O(x * y), but every
//! field have to be visited, while most reasonable finding algorithms avoids visiting every
//! field). The problem that if exit is not on the crossing then there is no incomming path to it
//! is actually not difficult to solve - simple raycast from exit can be done to find all fields
//! "connected" to exit (O(x + y)).
//!
//! In terms of visualization (even printing to text) - I don't even try to be efficient.
use std::cmp::Ordering;
use std::io::BufRead;
mod flood;
pub use flood::flood;
mod astar;
pub use astar::astar;
/// Direction from which its needed to approach the field to achieve it with given cost. As it is
/// possible to have same distance from multiple directions, it is a simple bitset. This is needed,
/// as in oru problem cost of next step is dependent on the fact if there is a turn on this step.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Dir(u8);
impl Dir {
pub const NONE: Dir = Dir(0);
pub const LEFT: Dir = Dir(1);
pub const UP: Dir = Dir(2);
pub const RIGHT: Dir = Dir(4);
pub const DOWN: Dir = Dir(8);
pub const ANY: Dir = Dir(1 | 2 | 4 | 8);
pub fn has_all(&self, Dir(other): Dir) -> bool {
self.0 & other == other
}
/// Returns directions in which at least one step is needed
pub fn vec((from_x, from_y): (usize, usize), (to_x, to_y): (usize, usize)) -> Self {
let h = match from_x.cmp(&to_x) {
Ordering::Less => Self::LEFT,
Ordering::Greater => Self::RIGHT,
Ordering::Equal => Self::NONE,
};
let v = match from_y.cmp(&to_y) {
Ordering::Less => Self::UP,
Ordering::Greater => Self::DOWN,
Ordering::Equal => Self::NONE,
};
h | v
}
/// Rotates left
pub fn left(mut self) -> Self {
let down = (self.0 & 1) << 3;
self.0 >>= 1;
self.0 |= down;
self
}
/// Rotates right
pub fn right(mut self) -> Self {
let left = (self.0 & 8) >> 3;
self.0 <<= 1;
self.0 |= left;
self.0 &= 0xf;
self
}
/// Returns minimal number of rotations so at least one encoded direction would match every
/// given direction at least once
pub fn min_rotation(self, other: Self) -> usize {
// I have feeling it is strongly suboptimal; Actually as both directions are encoded as 4
// bits, just precalculated table would be best solution
let mut min = 4;
for dir in [Self::LEFT, Self::RIGHT, Self::UP, Self::DOWN].iter() {
let mut d = *dir;
if !self.has_all(d) {
continue;
}
let mut o = other.0 & !dir.0;
let mut cnt = 0;
while o != 0 {
cnt += 1;
d = d.left();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
d = *dir;
o = other.0 & !dir.0;
cnt = 0;
while o != 0 {
cnt += 1;
d = d.right();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
}
min
}
}
impl std::ops::BitOr for Dir {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
/// Single field in maze
#[derive(Clone, Copy, Debug)]
enum | {
Empty,
Wall,
/// Empty field with known distance from the start of the maze
/// It doesn't need to be the closes path - it is distance calulated using some path
Calculated(Dir, usize),
}
/// Whole maze reprezentation
pub struct Maze {
/// All fields flattened
maze: Box<[Field]>,
/// Width of maze as it is needed for proper addressing (inlcuding external wall)
w: usize,
}
impl Maze {
/// Maps coord to field index
fn idx(&self, x: usize, y: usize) -> usize {
// On overflow just give invalid (too big) index - anything from here would be wall by
// default which is simplification on purpose
y.saturating_mul(self.w).saturating_add(x)
}
/// Maps field index to coordinates
fn coords(&self, idx: usize) -> (usize, usize) {
(idx % self.w, idx / self.w)
}
/// Returns index of field in given direction (defined to be wrapping)
fn in_dir_idx(&self, idx: usize, dir: Dir) -> usize {
let (x, y) = self.coords(idx);
// Doing wrapping sub basically because maze size is way smaller than my indexing type size
// (considering >= 16bit machine), so after wrapping I would have invalid field, so Wall by
// default
let (x, y) = match dir {
Dir::UP => (x, y.wrapping_sub(1)),
Dir::DOWN => (x, y + 1),
Dir::LEFT => (x.wrapping_sub(1), y),
Dir::RIGHT => (x + 1, y),
_ => (x, y),
};
self.idx(x, y)
}
/// Returns field in given direction from given one (Wall if no such field)
/// If Dir has more than one direction encoded, field with same idx is returned
fn in_dir(&self, idx: usize, dir: Dir) -> Field {
self.maze
.get(self.in_dir_idx(idx, dir))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives field from given coord (Wall if no such field)
fn field(&self, x: usize, y: usize) -> Field {
self.maze
.get(self.idx(x, y))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives mutable field from given coord
fn field_mut(&mut self, x: usize, y: usize) -> Option<&mut Field> {
self.maze.get_mut(self.idx(x, y))
}
/// Creates valid maze from input containing maze description, and x/y dimentions of it
pub fn from_input(x: usize, y: usize, input: impl BufRead) -> Self {
// Iterating over bytes is bad idea, but only interesting charactes are 0 and 1 which
// happens to be ASCII bytes. I am aware it wont work with any non-ASCII UTF representation
// of 0 and 1 and "I don't care, what they're going to say..."
let maze = input
.lines()
.take(y)
.flat_map(|line| line.unwrap().into_bytes())
.map(|field| match field {
b'0' => Field::Wall,
b'1' => Field::Empty,
_ => panic!("Invalid input"),
})
.collect();
Maze { maze, w: x }
}
}
#[cfg(feature = "text_visualize")]
impl std::fmt::Display for Maze {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// While printing maze, externall wall is not printed
for line in self.maze.chunks(self.w) {
let line: String = line
.iter()
.map(|field| match field {
Field::Empty => ' ',
Field::Wall => '#',
Field::Calculated(_, distance) => {
(distance % 10).to_string().chars().last().unwrap()
}
})
.chain(std::iter::once('\n'))
.collect();
f.write_str(&line)?;
}
Ok(())
}
}
/// As both "parts" of excercise are actually two separated applications, here we have maze "main"
/// (with preparsed arguments).
///
/// The last argument is function for caluclating the shortest path.
/// As an argument it takes initial maze, with at least one field with known distance - which is
/// considered to be an "initial cost" of entering into the maze with this input, and additionally
/// a field where we algorithm is looking path to. Returned maze contains exit field calculated to
/// the closest path, and some another field calculated to have "at least this good" path.
///
/// If there is no path to given exit, calculator should return maze with not calculated exit field
pub fn main(
x: usize,
y: usize,
input: impl BufRead,
calculator: impl Fn(Maze, usize, usize) -> Maze,
) {
let mut maze = Maze::from_input(x, y, input);
*maze.field_mut(0, 1).unwrap() = Field::Calculated(Dir::ANY, 0);
#[cfg(feature = "text_visualize")]
println!("Initial maze:\n\n{}\n", maze);
let maze = calculator(maze, x - 1, y - 2);
#[cfg(feature = "text_visualize")]
println!("Calculated maze:\n\n{}\n", maze);
match maze.field(x - 1, y - 2) {
Field::Empty => println!("UNREACHABLE"),
Field::Wall => println!("INVALID"),
Field::Calculated(_, cost) => println!("{}", cost),
}
}
| Field | identifier_name |
maze.rs | //! I would like to approach the problem in two distinct ways
//!
//! One of them is floodfill - solution is highly suboptimal in terms of computational complexity,
//! but it parallelizes perfectly - every iteration step recalculates new maze path data basing
//! entirely on previous iteration. The aproach has a problem, that every iteration step is O(n)
//! itself, where n is entire maze size. However - the solution scales perfectly if we can have
//! separated thread for every field, which happens if we are on some kind of strong SIMD
//! architecture - like GPU. I see that in the offer there was a "FPGA" thing, and as we are
//! talking about financial calculation, I assume this is a reason of "FPGA" being there.
//!
//! The other approach is trying to have just nice solution for normal processors - just implement
//! properly aligned A* as pretty easy and common solutions for pathfinding. Nothing special there,
//! but on SISD arch it should behave pretty nicely (it could be probably improved by using some
//! more sophisticated algo like double ended A*, but I am lazy - to much work not showing too
//! much, if I would really find more time I would rather try to do something more interesting -
//! visualization, or kind of optimization - but I don't believe I would find motivation for that).
//!
//! I figured out additional "approach" (except taking completely different search algo). Maze
//! could be easly preprocessed to directed graph, where each cell (so actually non wall maze field)
//! has connection to the closest path crossing, and then running any pathfinding alg on that.
//! Benefit of that is that pathfinding itself is performed on strongly reduced graph, downside is
//! obviously need of preprocessing (not this much - possible to be done in O(x * y), but every
//! field have to be visited, while most reasonable finding algorithms avoids visiting every
//! field). The problem that if exit is not on the crossing then there is no incomming path to it
//! is actually not difficult to solve - simple raycast from exit can be done to find all fields
//! "connected" to exit (O(x + y)).
//!
//! In terms of visualization (even printing to text) - I don't even try to be efficient.
use std::cmp::Ordering;
use std::io::BufRead;
mod flood;
pub use flood::flood;
mod astar;
pub use astar::astar;
/// Direction from which its needed to approach the field to achieve it with given cost. As it is
/// possible to have same distance from multiple directions, it is a simple bitset. This is needed,
/// as in oru problem cost of next step is dependent on the fact if there is a turn on this step.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
struct Dir(u8);
impl Dir {
pub const NONE: Dir = Dir(0);
pub const LEFT: Dir = Dir(1);
pub const UP: Dir = Dir(2);
pub const RIGHT: Dir = Dir(4);
pub const DOWN: Dir = Dir(8);
pub const ANY: Dir = Dir(1 | 2 | 4 | 8);
pub fn has_all(&self, Dir(other): Dir) -> bool {
self.0 & other == other
}
/// Returns directions in which at least one step is needed
pub fn vec((from_x, from_y): (usize, usize), (to_x, to_y): (usize, usize)) -> Self |
/// Rotates left
pub fn left(mut self) -> Self {
let down = (self.0 & 1) << 3;
self.0 >>= 1;
self.0 |= down;
self
}
/// Rotates right
pub fn right(mut self) -> Self {
let left = (self.0 & 8) >> 3;
self.0 <<= 1;
self.0 |= left;
self.0 &= 0xf;
self
}
/// Returns minimal number of rotations so at least one encoded direction would match every
/// given direction at least once
pub fn min_rotation(self, other: Self) -> usize {
// I have feeling it is strongly suboptimal; Actually as both directions are encoded as 4
// bits, just precalculated table would be best solution
let mut min = 4;
for dir in [Self::LEFT, Self::RIGHT, Self::UP, Self::DOWN].iter() {
let mut d = *dir;
if !self.has_all(d) {
continue;
}
let mut o = other.0 & !dir.0;
let mut cnt = 0;
while o != 0 {
cnt += 1;
d = d.left();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
d = *dir;
o = other.0 & !dir.0;
cnt = 0;
while o != 0 {
cnt += 1;
d = d.right();
o &= !d.0;
}
min = std::cmp::min(min, cnt);
}
min
}
}
impl std::ops::BitOr for Dir {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
/// Single field in maze
#[derive(Clone, Copy, Debug)]
enum Field {
Empty,
Wall,
/// Empty field with known distance from the start of the maze
/// It doesn't need to be the closes path - it is distance calulated using some path
Calculated(Dir, usize),
}
/// Whole maze reprezentation
pub struct Maze {
/// All fields flattened
maze: Box<[Field]>,
/// Width of maze as it is needed for proper addressing (inlcuding external wall)
w: usize,
}
impl Maze {
/// Maps coord to field index
fn idx(&self, x: usize, y: usize) -> usize {
// On overflow just give invalid (too big) index - anything from here would be wall by
// default which is simplification on purpose
y.saturating_mul(self.w).saturating_add(x)
}
/// Maps field index to coordinates
fn coords(&self, idx: usize) -> (usize, usize) {
(idx % self.w, idx / self.w)
}
/// Returns index of field in given direction (defined to be wrapping)
fn in_dir_idx(&self, idx: usize, dir: Dir) -> usize {
let (x, y) = self.coords(idx);
// Doing wrapping sub basically because maze size is way smaller than my indexing type size
// (considering >= 16bit machine), so after wrapping I would have invalid field, so Wall by
// default
let (x, y) = match dir {
Dir::UP => (x, y.wrapping_sub(1)),
Dir::DOWN => (x, y + 1),
Dir::LEFT => (x.wrapping_sub(1), y),
Dir::RIGHT => (x + 1, y),
_ => (x, y),
};
self.idx(x, y)
}
/// Returns field in given direction from given one (Wall if no such field)
/// If Dir has more than one direction encoded, field with same idx is returned
fn in_dir(&self, idx: usize, dir: Dir) -> Field {
self.maze
.get(self.in_dir_idx(idx, dir))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives field from given coord (Wall if no such field)
fn field(&self, x: usize, y: usize) -> Field {
self.maze
.get(self.idx(x, y))
.copied()
.unwrap_or(Field::Wall)
}
/// Gives mutable field from given coord
fn field_mut(&mut self, x: usize, y: usize) -> Option<&mut Field> {
self.maze.get_mut(self.idx(x, y))
}
/// Creates valid maze from input containing maze description, and x/y dimentions of it
pub fn from_input(x: usize, y: usize, input: impl BufRead) -> Self {
// Iterating over bytes is bad idea, but only interesting charactes are 0 and 1 which
// happens to be ASCII bytes. I am aware it wont work with any non-ASCII UTF representation
// of 0 and 1 and "I don't care, what they're going to say..."
let maze = input
.lines()
.take(y)
.flat_map(|line| line.unwrap().into_bytes())
.map(|field| match field {
b'0' => Field::Wall,
b'1' => Field::Empty,
_ => panic!("Invalid input"),
})
.collect();
Maze { maze, w: x }
}
}
#[cfg(feature = "text_visualize")]
impl std::fmt::Display for Maze {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// While printing maze, externall wall is not printed
for line in self.maze.chunks(self.w) {
let line: String = line
.iter()
.map(|field| match field {
Field::Empty => ' ',
Field::Wall => '#',
Field::Calculated(_, distance) => {
(distance % 10).to_string().chars().last().unwrap()
}
})
.chain(std::iter::once('\n'))
.collect();
f.write_str(&line)?;
}
Ok(())
}
}
/// As both "parts" of excercise are actually two separated applications, here we have maze "main"
/// (with preparsed arguments).
///
/// The last argument is function for caluclating the shortest path.
/// As an argument it takes initial maze, with at least one field with known distance - which is
/// considered to be an "initial cost" of entering into the maze with this input, and additionally
/// a field where we algorithm is looking path to. Returned maze contains exit field calculated to
/// the closest path, and some another field calculated to have "at least this good" path.
///
/// If there is no path to given exit, calculator should return maze with not calculated exit field
pub fn main(
x: usize,
y: usize,
input: impl BufRead,
calculator: impl Fn(Maze, usize, usize) -> Maze,
) {
let mut maze = Maze::from_input(x, y, input);
*maze.field_mut(0, 1).unwrap() = Field::Calculated(Dir::ANY, 0);
#[cfg(feature = "text_visualize")]
println!("Initial maze:\n\n{}\n", maze);
let maze = calculator(maze, x - 1, y - 2);
#[cfg(feature = "text_visualize")]
println!("Calculated maze:\n\n{}\n", maze);
match maze.field(x - 1, y - 2) {
Field::Empty => println!("UNREACHABLE"),
Field::Wall => println!("INVALID"),
Field::Calculated(_, cost) => println!("{}", cost),
}
}
| {
let h = match from_x.cmp(&to_x) {
Ordering::Less => Self::LEFT,
Ordering::Greater => Self::RIGHT,
Ordering::Equal => Self::NONE,
};
let v = match from_y.cmp(&to_y) {
Ordering::Less => Self::UP,
Ordering::Greater => Self::DOWN,
Ordering::Equal => Self::NONE,
};
h | v
} | identifier_body |
darts.js | /*
* helping counting score when playing darts
* this is a game use to count darts when
* you re playing in real darts.
* this is free to use and use for demo for the moment
* please send mail to [email protected] if you need info
*
* Updated : 2017/02/04
*/
/*
* load game on document load
*/
$(document).ready(function() {
oDG = new DG()
oDG.initSounds();
oDG.buttonFactory();
oDG.init();
});
/*
* the dart game class
*/
function DG() |
/*
* initialize game variables
*/
DG.prototype.init = function(){
DG.buttonEnable = true;
DG.numberPlay = 3;
DG.isEndGame = false;
DG.player1 = true;
DG.multi = 1
DG.lastScore = 0;
DG.keyPressed = 0;
DG.currentPlayer = 'player1'
DG.currentPlayerName = $(".player1Name")[0].innerText;
DG.prototype.initColors()
DG.prototype.initWinnerButtons()
$('.p1Result').val(DG.game);
$('.p2Result').val(DG.game);
$('.playerWinBackground').hide();
$('.playerWin').hide();
}
/*
* initialize game Sounds
*/
DG.prototype.initSounds = function(){
DG.sample = new RapidSoundsSample('medias/dart2.mp3', 0.2);
DG.sampleError = new RapidSoundsSample('medias/error.wav', 0.2);
DG.sampleChangePlayer = new RapidSoundsSample('medias/changePlayer.mp3', 0.5);
DG.sampleplayer1 = new RapidSoundsSample('medias/testoo.mp3', 1);
DG.sampleplayer2 = new RapidSoundsSample('medias/rouge.low.mp3', 1);
}
/*
* button factory
*/
DG.prototype.buttonFactory = function(){
for (i=0; i<=20 ;i++){
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
}
i=25;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
i=50;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
for (i=1; i<=7 ;i++){
DG.prototype.createButtonEmpty(i, 'empty');
}
DG.prototype.createButton("x2", 'keyMulti', 2);
DG.prototype.createButton("x3", 'keyMulti', 3);
$( ".keyPlayer" ).on( "touchstart click", DG.prototype.minusResult );
$( ".keyMulti" ).on( "touchstart click", DG.prototype.multiScore );
$( ".startButton" ).on( "touchstart click", DG.prototype.savePlayers );
}
/*
* create button
*/
DG.prototype.createButton = function (i, buttonName = '', buttonId = ''){
newButtonWrapper = $("<span>").attr('class','button');
newButtonWrapper.attr('id','ok'+i);
newButtonWrapper.appendTo(".numbers");
newButton = $("<button>").attr('class',buttonName);
newButton.attr('id', buttonId);
newButton.appendTo("#ok"+i).html( i ).trigger('create');
$( "#"+buttonId ).on( "touchstart click", DG.prototype.PlaySound );
}
/*
* create empty button
*/
DG.prototype.createButtonEmpty = function (i,buttonName){
newButtonWrapper = $("<span>").attr('class','button ');
newButtonWrapper.attr('id',buttonName+i).appendTo(".numbers");
newButton = $("<button>").attr('disabled','true');
newButton.appendTo("#"+buttonName+i).html( " " );
}
/*
* initialize colors
*/
DG.prototype.initColors = function(){
DG.p1Color = '#333333'
DG.p2Color = '#cc0099'
DG.playerColor = DG.p1Color;
$('body').css({'background-color' : '#262626'})
$(".numberPlayLeftP1").css({'color' : 'white'});
$(".numberPlayLeftP2").css({'color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : DG.p1Color});
DG.prototype.initDarts(true);
}
/*
* repeat a string
*/
DG.prototype.addFullDarts = function(){
return DG.prototype.strRepeat('.', 3);
}
/*
* init darts
*/
DG.prototype.initDarts = function(bBegin = false){
if(bBegin == true){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.addFullDarts();
}else{
$(".numberPlayLeftP1")[0].innerText = "";
}
$(".numberPlayLeftP2")[0].innerText = "";
}
/*
* initialize winner buttons
* - click on yes button on win screen
* - click on no button on win screen
* - click on play again button on win screen
*/
DG.prototype.initWinnerButtons = function(){
$('.yes').on( "click", function() {
DG.prototype.stopSoundWin(DG.currentPlayer);
DG.prototype.init();
});
$('.no').on( "click", function() {
window.location.replace('./')
});
$('.playAgain').on( "click touchstart", function() {
window.location.replace('./')
});
}
/*
* manage the button multiply
*/
DG.prototype.multiManage = function(){
$(".keyMulti").css({'background-color' : DG.p1Color});
}
/*
* manage click on x2 or x3 button
*/
DG.prototype.multiScore = function(evt){
evt.preventDefault();
$(".keyMulti").css({'background-color' : DG.p1Color});
$(".keyMulti").css({'background-color' : DG.p1Color});
$(this).css({'background-color' : 'white'});
DG.multi = parseInt($(this)[0].id);
}
/*
* minus the score
*/
DG.prototype.minusResult = function(ev){
ev.preventDefault();
DG.lastMulti = 1
if ( DG.buttonEnable == true){
$(this).css({'background-color' : 'white'});
}else{
$(this).css({'background-color' : '#333333'});
return true;
}
if(true == DG.isEndGame) return false;
DG.prototype.multiManage();
// get the current pressed button score
DG.keyMark = $(this)[0].innerText;
if ('player1' == DG.currentPlayer){
DG.playerResult = ".p1Result";
}else{
DG.playerResult = ".p2Result";
}
if (DG.numberPlay == 3){
DG.lastScore = $(DG.playerResult).val() ;
}
DG.lastMulti = DG.multi
DG.result = $(DG.playerResult).val();
DG.result -= (DG.multi * DG.keyMark);
DG.prototype.saveData();
// initialize multi
DG.multi = 1
if (DG.result==1){
DG.prototype.endError()
return false;
}
if (DG.result == 0){
if(DG.lastMulti == 2){
DG.prototype.endGame();
}else{
DG.prototype.endError()
}
return true;
}else if (DG.result < 0 ){
DG.prototype.endError()
}else{
DG.numberPlay--;
if (DG.result > 0){
DG.prototype.initDarts();
if (DG.numberPlay == 0){
DG.prototype.remainingDarts();
DG.numberPlay=3;
DG.buttonEnable = false;
}else {
DG.prototype.remainingDarts();
}
// remaining darts
if ( DG.buttonEnable == false){
setTimeout(DG.prototype.changePlayer, 1000);
}else {
$(this).css({'background-color' : 'white'});
}
}
}
$(DG.playerResult).val(DG.result);
}
/*
* ucfirst the name
*/
DG.prototype.ucfirst = function(str) {
var f = str.charAt(0).toUpperCase();
return f + str.substr(1).toLowerCase();
}
/*
* ucfirst the name
*/
DG.prototype.hideSplash = function(str) {
$('.playerStart').hide();
$('.start').hide();
}
/*
* save Player name
* hide splash
*/
DG.prototype.savePlayers = function(ev){
if($('.p1ResultBlock').val() =='') DG.p1Name = "Player 1"
else DG.p1Name = DG.prototype.ucfirst($('.p1ResultBlock').val());
if($('.p2ResultBlock').val() =='') DG.p2Name = "Player 2"
else DG.p2Name = DG.prototype.ucfirst($('.p2ResultBlock').val());
$(".player1Name")[0].innerText = DG.p1Name;
DG.currentPlayerName = DG.p1Name;
$(".player2Name")[0].innerText = DG.p2Name;
setTimeout(DG.prototype.hideSplash, 500);
}
/*
* change player when a bad score was done
* score = 1
* score < 0
* last score was not multiplied by 2
*/
DG.prototype.endError = function(){
DG.sampleError.shootRound();
DG.result=DG.lastScore;
DG.numberPlay = 3;
DG.prototype.initDarts();
DG.buttonEnable = false;
$(DG.playerResult).val(DG.result);
setTimeout(DG.prototype.changePlayer, 500);
}
/*
* avoid double tap zoom on ipad and iphone
*/
$(this).bind('touchstart', function preventZoom(e) {
var t2 = e.timeStamp
, t1 = $(this).data('lastTouch') || t2
, dt = t2 - t1
, fingers = e.originalEvent.touches.length;
$(this).data('lastTouch', t2);
if (!dt || dt > 200 || fingers > 1) return; // not double-tap
e.preventDefault(); // double tap - prevent the zoom
// also synthesize click events we just swallowed up
$(this).trigger('click').trigger('click');
});
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySound = function() {
if ( DG.buttonEnable == true){
DG.sample.shootRound();
}
}
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySoundChangePlayer = function() {
DG.sampleChangePlayer.shootRound();
}
/*
* play sound when won
*/
DG.prototype.playSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.shootRound()
}
/*
* stop sound of winner
*/
DG.prototype.stopSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.stop()
}
/*
* display congratulations
*/
DG.prototype.endGame = function(){
$('.playerWinBackground').show();
DG.prototype.playSoundWin(DG.currentPlayer)
$('.playerWin').show();
//~ $(DG.playerResult).val('win');
DG.prototype.saveData();
DG.isEndGame = true;
if ('player1' == DG.currentPlayer){
DG.scoreP1 ++;
}else {
DG.scoreP2 ++;
}
$('.scorePlayer1')[0].innerText = DG.scoreP1;
$('.scorePlayer2')[0].innerText = DG.scoreP2;
if((DG.scoreP2 > 1)||(DG.scoreP1 > 1)){
$('.yes').hide();
$('.no').hide();
$('.gameStyle').hide();
$('.slash').hide();
$('.playAgain').show();
}
if((DG.scoreP2 + DG.scoreP1 == 2)){
// decider?
$('.gameStyle')[0].innerText = 'Decider ?';
}
if((DG.scoreP2 + DG.scoreP1 == 1)){
// revenge?
$('.gameStyle')[0].innerText = 'Revenge ?';
}
//~ DG.prototype.init();
}
/*
* save data with ajax
*/
DG.prototype.saveData = function(){
url="darts_save.php";
data = 'gameNumber=' + $(".gameNumber")[0].innerText;
data += '&' + DG.currentPlayer + '=' + DG.currentPlayerName;
data += '&keyPressed='+ (DG.multi * DG.keyMark);
data += '&scoreP1=' + $(".p1Result").val();
data += '&scoreP2=' + $(".p2Result").val();
dataType = 'json';
$.ajax({
type: "POST",
url: url,
data: data,
success: function (json) {
}
,
dataType: dataType
});
}
/*
* change the player
*/
DG.prototype.changePlayer = function(){
DG.buttonEnable =true;
DG.prototype.PlaySoundChangePlayer();
if ('player1' == DG.currentPlayer){
DG.currentPlayer = "player2";
DG.currentPlayerName = $(".player2Name")[0].innerText;
$(".p1Result").css({'background-color' : '#333333'});
$(".p2Result").css({'background-color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
}else {
DG.currentPlayer = "player1";
DG.currentPlayerName = $(".player1Name")[0].innerText;
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : '#333333'});
$("button").css({'background-color' : DG.p1Color});
}
DG.prototype.remainingDarts();
}
/*
* repeat a string
*/
DG.prototype.strRepeat = function(str, count){
strOut = ''
for (i=1; i<=count; i++){
strOut = strOut + str;
}
return strOut;
}
/*
* remaining darts
*/
DG.prototype.remainingDarts = function(){
DG.prototype.initDarts();
if ('player1' == DG.currentPlayer){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.strRepeat('.', DG.numberPlay);
}else {
$(".numberPlayLeftP2")[0].innerText = DG.prototype.strRepeat('.', DG.numberPlay);
}
}
| {
DG.scoreP1 = 0;
DG.scoreP2 = 0;
DG.game = 301
} | identifier_body |
darts.js | /*
* helping counting score when playing darts
* this is a game use to count darts when
* you re playing in real darts.
* this is free to use and use for demo for the moment
* please send mail to [email protected] if you need info
*
* Updated : 2017/02/04
*/
/*
* load game on document load
*/
$(document).ready(function() {
oDG = new DG()
oDG.initSounds();
oDG.buttonFactory();
oDG.init();
});
/*
* the dart game class
*/
function | (){
DG.scoreP1 = 0;
DG.scoreP2 = 0;
DG.game = 301
}
/*
* initialize game variables
*/
DG.prototype.init = function(){
DG.buttonEnable = true;
DG.numberPlay = 3;
DG.isEndGame = false;
DG.player1 = true;
DG.multi = 1
DG.lastScore = 0;
DG.keyPressed = 0;
DG.currentPlayer = 'player1'
DG.currentPlayerName = $(".player1Name")[0].innerText;
DG.prototype.initColors()
DG.prototype.initWinnerButtons()
$('.p1Result').val(DG.game);
$('.p2Result').val(DG.game);
$('.playerWinBackground').hide();
$('.playerWin').hide();
}
/*
* initialize game Sounds
*/
DG.prototype.initSounds = function(){
DG.sample = new RapidSoundsSample('medias/dart2.mp3', 0.2);
DG.sampleError = new RapidSoundsSample('medias/error.wav', 0.2);
DG.sampleChangePlayer = new RapidSoundsSample('medias/changePlayer.mp3', 0.5);
DG.sampleplayer1 = new RapidSoundsSample('medias/testoo.mp3', 1);
DG.sampleplayer2 = new RapidSoundsSample('medias/rouge.low.mp3', 1);
}
/*
* button factory
*/
DG.prototype.buttonFactory = function(){
for (i=0; i<=20 ;i++){
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
}
i=25;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
i=50;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
for (i=1; i<=7 ;i++){
DG.prototype.createButtonEmpty(i, 'empty');
}
DG.prototype.createButton("x2", 'keyMulti', 2);
DG.prototype.createButton("x3", 'keyMulti', 3);
$( ".keyPlayer" ).on( "touchstart click", DG.prototype.minusResult );
$( ".keyMulti" ).on( "touchstart click", DG.prototype.multiScore );
$( ".startButton" ).on( "touchstart click", DG.prototype.savePlayers );
}
/*
* create button
*/
DG.prototype.createButton = function (i, buttonName = '', buttonId = ''){
newButtonWrapper = $("<span>").attr('class','button');
newButtonWrapper.attr('id','ok'+i);
newButtonWrapper.appendTo(".numbers");
newButton = $("<button>").attr('class',buttonName);
newButton.attr('id', buttonId);
newButton.appendTo("#ok"+i).html( i ).trigger('create');
$( "#"+buttonId ).on( "touchstart click", DG.prototype.PlaySound );
}
/*
* create empty button
*/
DG.prototype.createButtonEmpty = function (i,buttonName){
newButtonWrapper = $("<span>").attr('class','button ');
newButtonWrapper.attr('id',buttonName+i).appendTo(".numbers");
newButton = $("<button>").attr('disabled','true');
newButton.appendTo("#"+buttonName+i).html( " " );
}
/*
* initialize colors
*/
DG.prototype.initColors = function(){
DG.p1Color = '#333333'
DG.p2Color = '#cc0099'
DG.playerColor = DG.p1Color;
$('body').css({'background-color' : '#262626'})
$(".numberPlayLeftP1").css({'color' : 'white'});
$(".numberPlayLeftP2").css({'color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : DG.p1Color});
DG.prototype.initDarts(true);
}
/*
* repeat a string
*/
DG.prototype.addFullDarts = function(){
return DG.prototype.strRepeat('.', 3);
}
/*
* init darts
*/
DG.prototype.initDarts = function(bBegin = false){
if(bBegin == true){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.addFullDarts();
}else{
$(".numberPlayLeftP1")[0].innerText = "";
}
$(".numberPlayLeftP2")[0].innerText = "";
}
/*
* initialize winner buttons
* - click on yes button on win screen
* - click on no button on win screen
* - click on play again button on win screen
*/
DG.prototype.initWinnerButtons = function(){
$('.yes').on( "click", function() {
DG.prototype.stopSoundWin(DG.currentPlayer);
DG.prototype.init();
});
$('.no').on( "click", function() {
window.location.replace('./')
});
$('.playAgain').on( "click touchstart", function() {
window.location.replace('./')
});
}
/*
* manage the button multiply
*/
DG.prototype.multiManage = function(){
$(".keyMulti").css({'background-color' : DG.p1Color});
}
/*
* manage click on x2 or x3 button
*/
DG.prototype.multiScore = function(evt){
evt.preventDefault();
$(".keyMulti").css({'background-color' : DG.p1Color});
$(".keyMulti").css({'background-color' : DG.p1Color});
$(this).css({'background-color' : 'white'});
DG.multi = parseInt($(this)[0].id);
}
/*
* minus the score
*/
DG.prototype.minusResult = function(ev){
ev.preventDefault();
DG.lastMulti = 1
if ( DG.buttonEnable == true){
$(this).css({'background-color' : 'white'});
}else{
$(this).css({'background-color' : '#333333'});
return true;
}
if(true == DG.isEndGame) return false;
DG.prototype.multiManage();
// get the current pressed button score
DG.keyMark = $(this)[0].innerText;
if ('player1' == DG.currentPlayer){
DG.playerResult = ".p1Result";
}else{
DG.playerResult = ".p2Result";
}
if (DG.numberPlay == 3){
DG.lastScore = $(DG.playerResult).val() ;
}
DG.lastMulti = DG.multi
DG.result = $(DG.playerResult).val();
DG.result -= (DG.multi * DG.keyMark);
DG.prototype.saveData();
// initialize multi
DG.multi = 1
if (DG.result==1){
DG.prototype.endError()
return false;
}
if (DG.result == 0){
if(DG.lastMulti == 2){
DG.prototype.endGame();
}else{
DG.prototype.endError()
}
return true;
}else if (DG.result < 0 ){
DG.prototype.endError()
}else{
DG.numberPlay--;
if (DG.result > 0){
DG.prototype.initDarts();
if (DG.numberPlay == 0){
DG.prototype.remainingDarts();
DG.numberPlay=3;
DG.buttonEnable = false;
}else {
DG.prototype.remainingDarts();
}
// remaining darts
if ( DG.buttonEnable == false){
setTimeout(DG.prototype.changePlayer, 1000);
}else {
$(this).css({'background-color' : 'white'});
}
}
}
$(DG.playerResult).val(DG.result);
}
/*
* ucfirst the name
*/
DG.prototype.ucfirst = function(str) {
var f = str.charAt(0).toUpperCase();
return f + str.substr(1).toLowerCase();
}
/*
* ucfirst the name
*/
DG.prototype.hideSplash = function(str) {
$('.playerStart').hide();
$('.start').hide();
}
/*
* save Player name
* hide splash
*/
DG.prototype.savePlayers = function(ev){
if($('.p1ResultBlock').val() =='') DG.p1Name = "Player 1"
else DG.p1Name = DG.prototype.ucfirst($('.p1ResultBlock').val());
if($('.p2ResultBlock').val() =='') DG.p2Name = "Player 2"
else DG.p2Name = DG.prototype.ucfirst($('.p2ResultBlock').val());
$(".player1Name")[0].innerText = DG.p1Name;
DG.currentPlayerName = DG.p1Name;
$(".player2Name")[0].innerText = DG.p2Name;
setTimeout(DG.prototype.hideSplash, 500);
}
/*
* change player when a bad score was done
* score = 1
* score < 0
* last score was not multiplied by 2
*/
DG.prototype.endError = function(){
DG.sampleError.shootRound();
DG.result=DG.lastScore;
DG.numberPlay = 3;
DG.prototype.initDarts();
DG.buttonEnable = false;
$(DG.playerResult).val(DG.result);
setTimeout(DG.prototype.changePlayer, 500);
}
/*
* avoid double tap zoom on ipad and iphone
*/
$(this).bind('touchstart', function preventZoom(e) {
var t2 = e.timeStamp
, t1 = $(this).data('lastTouch') || t2
, dt = t2 - t1
, fingers = e.originalEvent.touches.length;
$(this).data('lastTouch', t2);
if (!dt || dt > 200 || fingers > 1) return; // not double-tap
e.preventDefault(); // double tap - prevent the zoom
// also synthesize click events we just swallowed up
$(this).trigger('click').trigger('click');
});
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySound = function() {
if ( DG.buttonEnable == true){
DG.sample.shootRound();
}
}
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySoundChangePlayer = function() {
DG.sampleChangePlayer.shootRound();
}
/*
* play sound when won
*/
DG.prototype.playSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.shootRound()
}
/*
* stop sound of winner
*/
DG.prototype.stopSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.stop()
}
/*
* display congratulations
*/
DG.prototype.endGame = function(){
$('.playerWinBackground').show();
DG.prototype.playSoundWin(DG.currentPlayer)
$('.playerWin').show();
//~ $(DG.playerResult).val('win');
DG.prototype.saveData();
DG.isEndGame = true;
if ('player1' == DG.currentPlayer){
DG.scoreP1 ++;
}else {
DG.scoreP2 ++;
}
$('.scorePlayer1')[0].innerText = DG.scoreP1;
$('.scorePlayer2')[0].innerText = DG.scoreP2;
if((DG.scoreP2 > 1)||(DG.scoreP1 > 1)){
$('.yes').hide();
$('.no').hide();
$('.gameStyle').hide();
$('.slash').hide();
$('.playAgain').show();
}
if((DG.scoreP2 + DG.scoreP1 == 2)){
// decider?
$('.gameStyle')[0].innerText = 'Decider ?';
}
if((DG.scoreP2 + DG.scoreP1 == 1)){
// revenge?
$('.gameStyle')[0].innerText = 'Revenge ?';
}
//~ DG.prototype.init();
}
/*
* save data with ajax
*/
DG.prototype.saveData = function(){
url="darts_save.php";
data = 'gameNumber=' + $(".gameNumber")[0].innerText;
data += '&' + DG.currentPlayer + '=' + DG.currentPlayerName;
data += '&keyPressed='+ (DG.multi * DG.keyMark);
data += '&scoreP1=' + $(".p1Result").val();
data += '&scoreP2=' + $(".p2Result").val();
dataType = 'json';
$.ajax({
type: "POST",
url: url,
data: data,
success: function (json) {
}
,
dataType: dataType
});
}
/*
* change the player
*/
DG.prototype.changePlayer = function(){
DG.buttonEnable =true;
DG.prototype.PlaySoundChangePlayer();
if ('player1' == DG.currentPlayer){
DG.currentPlayer = "player2";
DG.currentPlayerName = $(".player2Name")[0].innerText;
$(".p1Result").css({'background-color' : '#333333'});
$(".p2Result").css({'background-color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
}else {
DG.currentPlayer = "player1";
DG.currentPlayerName = $(".player1Name")[0].innerText;
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : '#333333'});
$("button").css({'background-color' : DG.p1Color});
}
DG.prototype.remainingDarts();
}
/*
* repeat a string
*/
DG.prototype.strRepeat = function(str, count){
strOut = ''
for (i=1; i<=count; i++){
strOut = strOut + str;
}
return strOut;
}
/*
* remaining darts
*/
DG.prototype.remainingDarts = function(){
DG.prototype.initDarts();
if ('player1' == DG.currentPlayer){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.strRepeat('.', DG.numberPlay);
}else {
$(".numberPlayLeftP2")[0].innerText = DG.prototype.strRepeat('.', DG.numberPlay);
}
}
| DG | identifier_name |
darts.js | /*
* helping counting score when playing darts
* this is a game use to count darts when
* you re playing in real darts.
* this is free to use and use for demo for the moment
* please send mail to [email protected] if you need info
*
* Updated : 2017/02/04
*/
/*
* load game on document load
*/
$(document).ready(function() {
oDG = new DG()
oDG.initSounds();
oDG.buttonFactory();
oDG.init();
});
/*
* the dart game class
*/
function DG(){
DG.scoreP1 = 0;
DG.scoreP2 = 0;
DG.game = 301
}
/*
* initialize game variables
*/
DG.prototype.init = function(){
DG.buttonEnable = true;
DG.numberPlay = 3;
DG.isEndGame = false;
DG.player1 = true;
DG.multi = 1
DG.lastScore = 0;
DG.keyPressed = 0;
DG.currentPlayer = 'player1'
DG.currentPlayerName = $(".player1Name")[0].innerText;
DG.prototype.initColors()
DG.prototype.initWinnerButtons()
$('.p1Result').val(DG.game);
$('.p2Result').val(DG.game);
$('.playerWinBackground').hide();
$('.playerWin').hide();
}
/*
* initialize game Sounds
*/
DG.prototype.initSounds = function(){
DG.sample = new RapidSoundsSample('medias/dart2.mp3', 0.2);
DG.sampleError = new RapidSoundsSample('medias/error.wav', 0.2);
DG.sampleChangePlayer = new RapidSoundsSample('medias/changePlayer.mp3', 0.5);
DG.sampleplayer1 = new RapidSoundsSample('medias/testoo.mp3', 1);
DG.sampleplayer2 = new RapidSoundsSample('medias/rouge.low.mp3', 1);
}
/*
* button factory
*/
DG.prototype.buttonFactory = function(){
for (i=0; i<=20 ;i++){
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
}
i=25;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
i=50;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
for (i=1; i<=7 ;i++){
DG.prototype.createButtonEmpty(i, 'empty');
}
DG.prototype.createButton("x2", 'keyMulti', 2);
DG.prototype.createButton("x3", 'keyMulti', 3);
$( ".keyPlayer" ).on( "touchstart click", DG.prototype.minusResult );
$( ".keyMulti" ).on( "touchstart click", DG.prototype.multiScore );
$( ".startButton" ).on( "touchstart click", DG.prototype.savePlayers );
}
/*
* create button
*/
DG.prototype.createButton = function (i, buttonName = '', buttonId = ''){
newButtonWrapper = $("<span>").attr('class','button');
newButtonWrapper.attr('id','ok'+i);
newButtonWrapper.appendTo(".numbers");
newButton = $("<button>").attr('class',buttonName);
newButton.attr('id', buttonId);
newButton.appendTo("#ok"+i).html( i ).trigger('create');
$( "#"+buttonId ).on( "touchstart click", DG.prototype.PlaySound );
}
/*
* create empty button
*/
DG.prototype.createButtonEmpty = function (i,buttonName){
newButtonWrapper = $("<span>").attr('class','button ');
newButtonWrapper.attr('id',buttonName+i).appendTo(".numbers");
newButton = $("<button>").attr('disabled','true');
newButton.appendTo("#"+buttonName+i).html( " " );
}
/*
* initialize colors
*/
DG.prototype.initColors = function(){
DG.p1Color = '#333333'
DG.p2Color = '#cc0099'
DG.playerColor = DG.p1Color;
$('body').css({'background-color' : '#262626'})
$(".numberPlayLeftP1").css({'color' : 'white'});
$(".numberPlayLeftP2").css({'color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : DG.p1Color});
DG.prototype.initDarts(true);
}
/*
* repeat a string
*/
DG.prototype.addFullDarts = function(){
return DG.prototype.strRepeat('.', 3);
}
/*
* init darts
*/
DG.prototype.initDarts = function(bBegin = false){
if(bBegin == true){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.addFullDarts();
}else{
$(".numberPlayLeftP1")[0].innerText = "";
}
$(".numberPlayLeftP2")[0].innerText = "";
}
/*
* initialize winner buttons
* - click on yes button on win screen
* - click on no button on win screen
* - click on play again button on win screen
*/
DG.prototype.initWinnerButtons = function(){
$('.yes').on( "click", function() {
DG.prototype.stopSoundWin(DG.currentPlayer);
DG.prototype.init();
});
$('.no').on( "click", function() {
window.location.replace('./')
});
$('.playAgain').on( "click touchstart", function() {
window.location.replace('./')
});
}
/*
* manage the button multiply
*/
DG.prototype.multiManage = function(){
$(".keyMulti").css({'background-color' : DG.p1Color});
}
/*
* manage click on x2 or x3 button
*/
DG.prototype.multiScore = function(evt){
evt.preventDefault();
$(".keyMulti").css({'background-color' : DG.p1Color});
$(".keyMulti").css({'background-color' : DG.p1Color});
$(this).css({'background-color' : 'white'});
DG.multi = parseInt($(this)[0].id);
}
/*
* minus the score
*/
DG.prototype.minusResult = function(ev){
ev.preventDefault();
DG.lastMulti = 1
if ( DG.buttonEnable == true){
$(this).css({'background-color' : 'white'});
}else{
$(this).css({'background-color' : '#333333'});
return true;
}
if(true == DG.isEndGame) return false;
DG.prototype.multiManage();
// get the current pressed button score
DG.keyMark = $(this)[0].innerText;
if ('player1' == DG.currentPlayer){
DG.playerResult = ".p1Result";
}else{
DG.playerResult = ".p2Result";
}
if (DG.numberPlay == 3){
DG.lastScore = $(DG.playerResult).val() ;
}
DG.lastMulti = DG.multi
DG.result = $(DG.playerResult).val();
DG.result -= (DG.multi * DG.keyMark);
DG.prototype.saveData();
// initialize multi
DG.multi = 1
if (DG.result==1){
DG.prototype.endError()
return false;
}
if (DG.result == 0){
if(DG.lastMulti == 2){
DG.prototype.endGame();
}else{
DG.prototype.endError()
}
return true;
}else if (DG.result < 0 ){
DG.prototype.endError()
}else{
DG.numberPlay--;
if (DG.result > 0){
DG.prototype.initDarts();
if (DG.numberPlay == 0){
DG.prototype.remainingDarts();
DG.numberPlay=3;
DG.buttonEnable = false;
}else {
DG.prototype.remainingDarts();
}
// remaining darts
if ( DG.buttonEnable == false){
setTimeout(DG.prototype.changePlayer, 1000);
}else {
$(this).css({'background-color' : 'white'});
}
}
}
$(DG.playerResult).val(DG.result);
}
/*
* ucfirst the name
*/
DG.prototype.ucfirst = function(str) {
var f = str.charAt(0).toUpperCase();
return f + str.substr(1).toLowerCase();
}
/*
* ucfirst the name
*/
DG.prototype.hideSplash = function(str) {
$('.playerStart').hide();
$('.start').hide();
}
/*
* save Player name
* hide splash
*/
DG.prototype.savePlayers = function(ev){
if($('.p1ResultBlock').val() =='') DG.p1Name = "Player 1"
else DG.p1Name = DG.prototype.ucfirst($('.p1ResultBlock').val());
if($('.p2ResultBlock').val() =='') DG.p2Name = "Player 2"
else DG.p2Name = DG.prototype.ucfirst($('.p2ResultBlock').val());
$(".player1Name")[0].innerText = DG.p1Name;
DG.currentPlayerName = DG.p1Name;
$(".player2Name")[0].innerText = DG.p2Name;
setTimeout(DG.prototype.hideSplash, 500);
}
/*
* change player when a bad score was done
* score = 1
* score < 0
* last score was not multiplied by 2
*/
DG.prototype.endError = function(){
DG.sampleError.shootRound();
DG.result=DG.lastScore;
DG.numberPlay = 3;
DG.prototype.initDarts();
DG.buttonEnable = false;
$(DG.playerResult).val(DG.result);
setTimeout(DG.prototype.changePlayer, 500);
}
/*
* avoid double tap zoom on ipad and iphone
*/
$(this).bind('touchstart', function preventZoom(e) {
var t2 = e.timeStamp
, t1 = $(this).data('lastTouch') || t2
, dt = t2 - t1
, fingers = e.originalEvent.touches.length;
$(this).data('lastTouch', t2);
if (!dt || dt > 200 || fingers > 1) return; // not double-tap
e.preventDefault(); // double tap - prevent the zoom
// also synthesize click events we just swallowed up
$(this).trigger('click').trigger('click');
});
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySound = function() {
if ( DG.buttonEnable == true){
DG.sample.shootRound();
}
}
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySoundChangePlayer = function() {
DG.sampleChangePlayer.shootRound();
}
/*
* play sound when won
*/
DG.prototype.playSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.shootRound()
}
/*
* stop sound of winner
*/
DG.prototype.stopSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.stop()
}
/*
* display congratulations
*/
DG.prototype.endGame = function(){
$('.playerWinBackground').show();
DG.prototype.playSoundWin(DG.currentPlayer)
$('.playerWin').show();
//~ $(DG.playerResult).val('win');
DG.prototype.saveData();
DG.isEndGame = true;
if ('player1' == DG.currentPlayer){
DG.scoreP1 ++;
}else {
DG.scoreP2 ++;
}
$('.scorePlayer1')[0].innerText = DG.scoreP1;
$('.scorePlayer2')[0].innerText = DG.scoreP2;
if((DG.scoreP2 > 1)||(DG.scoreP1 > 1)){
$('.yes').hide();
$('.no').hide();
$('.gameStyle').hide();
$('.slash').hide();
$('.playAgain').show();
}
if((DG.scoreP2 + DG.scoreP1 == 2)){
// decider?
$('.gameStyle')[0].innerText = 'Decider ?';
}
if((DG.scoreP2 + DG.scoreP1 == 1)){
// revenge?
$('.gameStyle')[0].innerText = 'Revenge ?';
}
//~ DG.prototype.init();
}
/*
* save data with ajax
*/
DG.prototype.saveData = function(){
url="darts_save.php";
data = 'gameNumber=' + $(".gameNumber")[0].innerText;
data += '&' + DG.currentPlayer + '=' + DG.currentPlayerName;
data += '&keyPressed='+ (DG.multi * DG.keyMark);
data += '&scoreP1=' + $(".p1Result").val();
data += '&scoreP2=' + $(".p2Result").val();
dataType = 'json';
$.ajax({
type: "POST",
url: url,
data: data,
success: function (json) {
}
,
dataType: dataType
});
}
/*
* change the player
*/
DG.prototype.changePlayer = function(){
DG.buttonEnable =true;
DG.prototype.PlaySoundChangePlayer();
if ('player1' == DG.currentPlayer){
DG.currentPlayer = "player2";
DG.currentPlayerName = $(".player2Name")[0].innerText;
$(".p1Result").css({'background-color' : '#333333'});
$(".p2Result").css({'background-color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
}else {
DG.currentPlayer = "player1";
DG.currentPlayerName = $(".player1Name")[0].innerText;
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : '#333333'});
$("button").css({'background-color' : DG.p1Color});
}
DG.prototype.remainingDarts();
}
/*
* repeat a string
*/
DG.prototype.strRepeat = function(str, count){
strOut = ''
for (i=1; i<=count; i++){
strOut = strOut + str;
} | return strOut;
}
/*
* remaining darts
*/
DG.prototype.remainingDarts = function(){
DG.prototype.initDarts();
if ('player1' == DG.currentPlayer){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.strRepeat('.', DG.numberPlay);
}else {
$(".numberPlayLeftP2")[0].innerText = DG.prototype.strRepeat('.', DG.numberPlay);
}
} | random_line_split |
|
darts.js | /*
* helping counting score when playing darts
* this is a game use to count darts when
* you re playing in real darts.
* this is free to use and use for demo for the moment
* please send mail to [email protected] if you need info
*
* Updated : 2017/02/04
*/
/*
* load game on document load
*/
$(document).ready(function() {
oDG = new DG()
oDG.initSounds();
oDG.buttonFactory();
oDG.init();
});
/*
* the dart game class
*/
function DG(){
DG.scoreP1 = 0;
DG.scoreP2 = 0;
DG.game = 301
}
/*
* initialize game variables
*/
DG.prototype.init = function(){
DG.buttonEnable = true;
DG.numberPlay = 3;
DG.isEndGame = false;
DG.player1 = true;
DG.multi = 1
DG.lastScore = 0;
DG.keyPressed = 0;
DG.currentPlayer = 'player1'
DG.currentPlayerName = $(".player1Name")[0].innerText;
DG.prototype.initColors()
DG.prototype.initWinnerButtons()
$('.p1Result').val(DG.game);
$('.p2Result').val(DG.game);
$('.playerWinBackground').hide();
$('.playerWin').hide();
}
/*
* initialize game Sounds
*/
DG.prototype.initSounds = function(){
DG.sample = new RapidSoundsSample('medias/dart2.mp3', 0.2);
DG.sampleError = new RapidSoundsSample('medias/error.wav', 0.2);
DG.sampleChangePlayer = new RapidSoundsSample('medias/changePlayer.mp3', 0.5);
DG.sampleplayer1 = new RapidSoundsSample('medias/testoo.mp3', 1);
DG.sampleplayer2 = new RapidSoundsSample('medias/rouge.low.mp3', 1);
}
/*
* button factory
*/
DG.prototype.buttonFactory = function(){
for (i=0; i<=20 ;i++){
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
}
i=25;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
i=50;
DG.prototype.createButton(i, 'keyPlayer', 'btn' + i);
for (i=1; i<=7 ;i++){
DG.prototype.createButtonEmpty(i, 'empty');
}
DG.prototype.createButton("x2", 'keyMulti', 2);
DG.prototype.createButton("x3", 'keyMulti', 3);
$( ".keyPlayer" ).on( "touchstart click", DG.prototype.minusResult );
$( ".keyMulti" ).on( "touchstart click", DG.prototype.multiScore );
$( ".startButton" ).on( "touchstart click", DG.prototype.savePlayers );
}
/*
* create button
*/
DG.prototype.createButton = function (i, buttonName = '', buttonId = ''){
newButtonWrapper = $("<span>").attr('class','button');
newButtonWrapper.attr('id','ok'+i);
newButtonWrapper.appendTo(".numbers");
newButton = $("<button>").attr('class',buttonName);
newButton.attr('id', buttonId);
newButton.appendTo("#ok"+i).html( i ).trigger('create');
$( "#"+buttonId ).on( "touchstart click", DG.prototype.PlaySound );
}
/*
* create empty button
*/
DG.prototype.createButtonEmpty = function (i,buttonName){
newButtonWrapper = $("<span>").attr('class','button ');
newButtonWrapper.attr('id',buttonName+i).appendTo(".numbers");
newButton = $("<button>").attr('disabled','true');
newButton.appendTo("#"+buttonName+i).html( " " );
}
/*
* initialize colors
*/
DG.prototype.initColors = function(){
DG.p1Color = '#333333'
DG.p2Color = '#cc0099'
DG.playerColor = DG.p1Color;
$('body').css({'background-color' : '#262626'})
$(".numberPlayLeftP1").css({'color' : 'white'});
$(".numberPlayLeftP2").css({'color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : DG.p1Color});
DG.prototype.initDarts(true);
}
/*
* repeat a string
*/
DG.prototype.addFullDarts = function(){
return DG.prototype.strRepeat('.', 3);
}
/*
* init darts
*/
DG.prototype.initDarts = function(bBegin = false){
if(bBegin == true){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.addFullDarts();
}else{
$(".numberPlayLeftP1")[0].innerText = "";
}
$(".numberPlayLeftP2")[0].innerText = "";
}
/*
* initialize winner buttons
* - click on yes button on win screen
* - click on no button on win screen
* - click on play again button on win screen
*/
DG.prototype.initWinnerButtons = function(){
$('.yes').on( "click", function() {
DG.prototype.stopSoundWin(DG.currentPlayer);
DG.prototype.init();
});
$('.no').on( "click", function() {
window.location.replace('./')
});
$('.playAgain').on( "click touchstart", function() {
window.location.replace('./')
});
}
/*
* manage the button multiply
*/
DG.prototype.multiManage = function(){
$(".keyMulti").css({'background-color' : DG.p1Color});
}
/*
* manage click on x2 or x3 button
*/
DG.prototype.multiScore = function(evt){
evt.preventDefault();
$(".keyMulti").css({'background-color' : DG.p1Color});
$(".keyMulti").css({'background-color' : DG.p1Color});
$(this).css({'background-color' : 'white'});
DG.multi = parseInt($(this)[0].id);
}
/*
* minus the score
*/
DG.prototype.minusResult = function(ev){
ev.preventDefault();
DG.lastMulti = 1
if ( DG.buttonEnable == true){
$(this).css({'background-color' : 'white'});
}else{
$(this).css({'background-color' : '#333333'});
return true;
}
if(true == DG.isEndGame) return false;
DG.prototype.multiManage();
// get the current pressed button score
DG.keyMark = $(this)[0].innerText;
if ('player1' == DG.currentPlayer){
DG.playerResult = ".p1Result";
}else{
DG.playerResult = ".p2Result";
}
if (DG.numberPlay == 3) |
DG.lastMulti = DG.multi
DG.result = $(DG.playerResult).val();
DG.result -= (DG.multi * DG.keyMark);
DG.prototype.saveData();
// initialize multi
DG.multi = 1
if (DG.result==1){
DG.prototype.endError()
return false;
}
if (DG.result == 0){
if(DG.lastMulti == 2){
DG.prototype.endGame();
}else{
DG.prototype.endError()
}
return true;
}else if (DG.result < 0 ){
DG.prototype.endError()
}else{
DG.numberPlay--;
if (DG.result > 0){
DG.prototype.initDarts();
if (DG.numberPlay == 0){
DG.prototype.remainingDarts();
DG.numberPlay=3;
DG.buttonEnable = false;
}else {
DG.prototype.remainingDarts();
}
// remaining darts
if ( DG.buttonEnable == false){
setTimeout(DG.prototype.changePlayer, 1000);
}else {
$(this).css({'background-color' : 'white'});
}
}
}
$(DG.playerResult).val(DG.result);
}
/*
* ucfirst the name
*/
DG.prototype.ucfirst = function(str) {
var f = str.charAt(0).toUpperCase();
return f + str.substr(1).toLowerCase();
}
/*
* ucfirst the name
*/
DG.prototype.hideSplash = function(str) {
$('.playerStart').hide();
$('.start').hide();
}
/*
* save Player name
* hide splash
*/
DG.prototype.savePlayers = function(ev){
if($('.p1ResultBlock').val() =='') DG.p1Name = "Player 1"
else DG.p1Name = DG.prototype.ucfirst($('.p1ResultBlock').val());
if($('.p2ResultBlock').val() =='') DG.p2Name = "Player 2"
else DG.p2Name = DG.prototype.ucfirst($('.p2ResultBlock').val());
$(".player1Name")[0].innerText = DG.p1Name;
DG.currentPlayerName = DG.p1Name;
$(".player2Name")[0].innerText = DG.p2Name;
setTimeout(DG.prototype.hideSplash, 500);
}
/*
* change player when a bad score was done
* score = 1
* score < 0
* last score was not multiplied by 2
*/
DG.prototype.endError = function(){
DG.sampleError.shootRound();
DG.result=DG.lastScore;
DG.numberPlay = 3;
DG.prototype.initDarts();
DG.buttonEnable = false;
$(DG.playerResult).val(DG.result);
setTimeout(DG.prototype.changePlayer, 500);
}
/*
* avoid double tap zoom on ipad and iphone
*/
$(this).bind('touchstart', function preventZoom(e) {
var t2 = e.timeStamp
, t1 = $(this).data('lastTouch') || t2
, dt = t2 - t1
, fingers = e.originalEvent.touches.length;
$(this).data('lastTouch', t2);
if (!dt || dt > 200 || fingers > 1) return; // not double-tap
e.preventDefault(); // double tap - prevent the zoom
// also synthesize click events we just swallowed up
$(this).trigger('click').trigger('click');
});
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySound = function() {
if ( DG.buttonEnable == true){
DG.sample.shootRound();
}
}
/*
* play sound calling lib audio.js
*/
DG.prototype.PlaySoundChangePlayer = function() {
DG.sampleChangePlayer.shootRound();
}
/*
* play sound when won
*/
DG.prototype.playSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.shootRound()
}
/*
* stop sound of winner
*/
DG.prototype.stopSoundWin = function(currentPlayer) {
DG.playerSample = eval('DG.sample' + currentPlayer)
DG.playerSample.stop()
}
/*
* display congratulations
*/
DG.prototype.endGame = function(){
$('.playerWinBackground').show();
DG.prototype.playSoundWin(DG.currentPlayer)
$('.playerWin').show();
//~ $(DG.playerResult).val('win');
DG.prototype.saveData();
DG.isEndGame = true;
if ('player1' == DG.currentPlayer){
DG.scoreP1 ++;
}else {
DG.scoreP2 ++;
}
$('.scorePlayer1')[0].innerText = DG.scoreP1;
$('.scorePlayer2')[0].innerText = DG.scoreP2;
if((DG.scoreP2 > 1)||(DG.scoreP1 > 1)){
$('.yes').hide();
$('.no').hide();
$('.gameStyle').hide();
$('.slash').hide();
$('.playAgain').show();
}
if((DG.scoreP2 + DG.scoreP1 == 2)){
// decider?
$('.gameStyle')[0].innerText = 'Decider ?';
}
if((DG.scoreP2 + DG.scoreP1 == 1)){
// revenge?
$('.gameStyle')[0].innerText = 'Revenge ?';
}
//~ DG.prototype.init();
}
/*
* save data with ajax
*/
DG.prototype.saveData = function(){
url="darts_save.php";
data = 'gameNumber=' + $(".gameNumber")[0].innerText;
data += '&' + DG.currentPlayer + '=' + DG.currentPlayerName;
data += '&keyPressed='+ (DG.multi * DG.keyMark);
data += '&scoreP1=' + $(".p1Result").val();
data += '&scoreP2=' + $(".p2Result").val();
dataType = 'json';
$.ajax({
type: "POST",
url: url,
data: data,
success: function (json) {
}
,
dataType: dataType
});
}
/*
* change the player
*/
DG.prototype.changePlayer = function(){
DG.buttonEnable =true;
DG.prototype.PlaySoundChangePlayer();
if ('player1' == DG.currentPlayer){
DG.currentPlayer = "player2";
DG.currentPlayerName = $(".player2Name")[0].innerText;
$(".p1Result").css({'background-color' : '#333333'});
$(".p2Result").css({'background-color' : 'white'});
$("button").css({'background-color' : DG.p1Color});
}else {
DG.currentPlayer = "player1";
DG.currentPlayerName = $(".player1Name")[0].innerText;
$(".p1Result").css({'background-color' : 'white'});
$(".p2Result").css({'background-color' : '#333333'});
$("button").css({'background-color' : DG.p1Color});
}
DG.prototype.remainingDarts();
}
/*
* repeat a string
*/
DG.prototype.strRepeat = function(str, count){
strOut = ''
for (i=1; i<=count; i++){
strOut = strOut + str;
}
return strOut;
}
/*
* remaining darts
*/
DG.prototype.remainingDarts = function(){
DG.prototype.initDarts();
if ('player1' == DG.currentPlayer){
$(".numberPlayLeftP1")[0].innerText = DG.prototype.strRepeat('.', DG.numberPlay);
}else {
$(".numberPlayLeftP2")[0].innerText = DG.prototype.strRepeat('.', DG.numberPlay);
}
}
| {
DG.lastScore = $(DG.playerResult).val() ;
} | conditional_block |
table.ts | /**
* Copyright 2021, Yahoo Holdings Inc.
* Licensed under the terms of the MIT license. See accompanying LICENSE.md file for terms.
*/
import VisualizationSerializer from './visualization';
import { parseMetricName, canonicalizeMetric } from 'navi-data/utils/metric';
import { assert } from '@ember/debug';
import { isEmpty } from '@ember/utils';
import type { RequestV2, Column, Parameters } from 'navi-data/adapters/facts/interface';
import type NaviMetadataService from 'navi-data/services/navi-metadata';
import { getRealDimensionType } from 'navi-core/utils/request';
interface FieldTypes {
metric: 'metric';
threshold: 'metric';
dimension: 'dimension';
dateTime: 'dateTime';
}
type LegacyType = keyof FieldTypes;
type LegacyFieldType = FieldTypes[LegacyType];
interface LegacyColumn<K extends LegacyType> {
type: K;
field?: string | ({ [T in FieldTypes[K]]: string } & { parameters?: Parameters });
displayName?: string;
format?: string;
hasCustomDisplayName?: boolean;
sortDirection?: string;
attributes?: {
canAggregateSubtotal?: boolean;
field?: string;
format?: string;
name?: string;
parameters?: Parameters;
};
}
type LegacyMetadataColumn = LegacyColumn<'metric'> | LegacyColumn<'dateTime'> | LegacyColumn<'dimension'>;
export type TableVisMetadataPayloadV1 = {
type: 'table';
version: 1;
metadata: {
columns: (LegacyMetadataColumn | LegacyColumn<'threshold'>)[];
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
export interface TableColumnAttributes {
canAggregateSubtotal?: boolean;
format?: string;
}
export type TableVisualizationMetadata = {
type: 'table';
version: 2;
metadata: {
columnAttributes: {
[cid: string]: TableColumnAttributes | undefined;
};
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
type Field = { field: string; parameters: Parameters };
/**
*
* @param column the
*/
function getColumnField(column: LegacyColumn<LegacyFieldType>): Field {
if (typeof column.field === 'string') {
let { metric, parameters } = parseMetricName(column.field);
return {
field: metric,
parameters: parameters || {},
};
} else if (typeof column.field === 'object') {
return {
field: column.field[column.type],
parameters: column.field.parameters || {},
};
} else {
return {
field: column.attributes?.name as string,
parameters: column.attributes?.parameters || {},
};
}
}
type ColumnInfo =
| { requestIndex: number; requestColumn: Column; tableIndex: number; tableColumn: LegacyMetadataColumn }
| {
// These could be undefined if the table was not updated properly and tried to display a column that no longer existed
requestIndex: undefined;
requestColumn: undefined;
tableIndex: number;
tableColumn: LegacyMetadataColumn;
}
| {
requestIndex: number;
requestColumn: Column;
// These can be undefined since converting an all grain request will add a request column with no corresponding table column
tableIndex: undefined;
tableColumn: undefined;
};
/**
* Builds a map of column canonical name to both request and visualization data
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
function buildColumnInfo(
request: RequestV2,
visualization: TableVisMetadataPayloadV1,
naviMetadata: NaviMetadataService
): Record<string, ColumnInfo> {
const columnData: Record<string, ColumnInfo> = {};
request.columns.forEach((column, index) => {
const { field, parameters } = column;
const canonicalName = canonicalizeMetric({ metric: field, parameters });
const data = columnData[canonicalName] || {};
data.requestIndex = index;
data.requestColumn = column;
columnData[canonicalName] = data;
});
visualization?.metadata?.columns.forEach((column, index) => {
column.type = column.type === 'threshold' ? 'metric' : column.type;
const newCol = column as LegacyColumn<LegacyFieldType>;
const { field, parameters } = getColumnField(newCol);
let canonicalName = canonicalizeMetric({ metric: field, parameters });
if (newCol.type === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.${canonicalName}(grain=${grain})`;
} else if (newCol.type === 'dimension') {
const type = getRealDimensionType(field, request.dataSource, naviMetadata);
const requestColumn = request.columns.find((c) => c.type === type && c.field === field);
const fieldParam = column.attributes?.field ?? requestColumn?.parameters.field;
assert(`field param must be found for dimension ${canonicalName}`, fieldParam);
const newParams = {
...requestColumn?.parameters,
field: fieldParam,
};
canonicalName = canonicalizeMetric({ metric: field, parameters: newParams });
}
const data = columnData[canonicalName] || {};
data.tableIndex = index;
data.tableColumn = newCol;
columnData[canonicalName] = data;
});
return columnData;
}
/**
* The legacy table visualization would display by checking the following rules
* - 1) If the dimension had `show` fields -> make them individual columns (e.g. [Dim (key), Dim (desc)])
* - 2) If the desc field was available -> show it (with id only on hover)
* - 3) If the id field was available -> show it
* @param request - the requested data for this table
* @param metadata - the metadata service with the datasource already loaded
*/
function injectDimensionFields(request: RequestV2, naviMetadata: NaviMetadataService) {
const newColumns: RequestV2['columns'] = [];
request.columns.forEach((col) => {
const { type, field } = col;
if (type === 'dimension') {
const dimMeta = naviMetadata.getById(type, field, request.dataSource);
// get all show fields for dimension
let showFields = dimMeta?.getFieldsForTag('show').map((f) => f.name) ?? [];
if (showFields.length === 0) {
const allFields = dimMeta?.fields?.map((f) => f.name);
let bestField: string;
if (allFields) {
// Use desc or id if available. If neither match grab the first field
bestField = ['desc', 'id'].find((idOrDesc) => allFields.includes(idOrDesc)) ?? allFields[0];
} else {
bestField = 'desc'; // default to desc
}
showFields = [bestField];
}
showFields.forEach((field) => {
newColumns.push({
...col,
parameters: {
...col.parameters,
field,
},
});
});
} else {
newColumns.push(col);
}
});
request.columns = newColumns;
}
/**
* Normalizes a table visualization by
* - applying table order to request columns
* - moving custom display names to aliases
* - migrates existing attributes to a map based on column id
* - moves subtotal to use column id
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
export function normalizeTableV2(
request: RequestV2,
visualization: TableVisMetadataPayloadV1 | TableVisualizationMetadata,
naviMetadata: NaviMetadataService
): TableVisualizationMetadata |
export default class TableVisualizationSerializer extends VisualizationSerializer {
// TODO: Implement serialize method to strip out unneeded fields
}
// DO NOT DELETE: this is how TypeScript knows how to look up your models.
declare module 'ember-data/types/registries/serializer' {
export default interface SerializerRegistry {
table: TableVisualizationSerializer;
}
}
| {
if (visualization.version === 2) {
return visualization;
}
injectDimensionFields(request, naviMetadata);
const columnData: Record<string, ColumnInfo> = buildColumnInfo(request, visualization, naviMetadata);
// Rearranges request columns to match table order
const missedRequestColumns: Column[] = [];
const reorderedColumns = Object.values(columnData)
.reduce((columns: Column[], columnInfo) => {
const { tableColumn, tableIndex, requestColumn } = columnInfo;
if (requestColumn && tableColumn) {
// this column exists in request and table
assert('Table index must exist if table column exists', tableIndex !== undefined);
if (tableColumn.hasCustomDisplayName) {
// If display name is custom move over to request
requestColumn.alias = tableColumn.displayName;
}
columns[tableIndex] = requestColumn;
} else if (requestColumn !== undefined && tableColumn === undefined) {
// this column only exists in the request
missedRequestColumns.push(requestColumn);
}
return columns;
}, [])
.filter((c) => c); // remove skipped columns
request.columns = [...reorderedColumns, ...missedRequestColumns];
// extract column attributes
const columnAttributes = Object.values(columnData).reduce((columns, columnInfo) => {
const { tableColumn, requestColumn } = columnInfo;
if (tableColumn === undefined || requestColumn === undefined) {
// this column does not exist in the table
return columns;
}
const { attributes } = tableColumn;
assert(
`The request column ${requestColumn.field} should have a present 'cid' field`,
requestColumn.cid !== undefined
);
const canAggregateSubtotal = tableColumn.type === 'metric' ? attributes?.canAggregateSubtotal : undefined;
const format = tableColumn.format !== undefined ? tableColumn.format : attributes?.format;
columns[requestColumn.cid] = {
...(canAggregateSubtotal !== undefined ? { canAggregateSubtotal } : {}),
...(!isEmpty(format) ? { format } : {}),
};
return columns;
}, {} as Record<string, TableColumnAttributes>);
// update subtotal to use column index
const { showTotals } = visualization?.metadata || {};
let subtotal;
if (showTotals?.subtotal) {
let canonicalName;
if (showTotals?.subtotal === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.dateTime(grain=${grain})`;
} else {
const dimensionField = request.columns.find((c) => c.field === showTotals.subtotal)?.parameters.field;
canonicalName = `${showTotals.subtotal}(field=${dimensionField})`;
}
subtotal = columnData[canonicalName].requestColumn?.cid;
}
return {
type: 'table',
version: 2,
metadata: {
columnAttributes,
showTotals: {
...(subtotal !== undefined ? { subtotal } : {}),
...(showTotals?.grandTotal !== undefined ? { grandTotal: showTotals?.grandTotal } : {}),
},
},
};
} | identifier_body |
table.ts | /**
* Copyright 2021, Yahoo Holdings Inc.
* Licensed under the terms of the MIT license. See accompanying LICENSE.md file for terms.
*/
import VisualizationSerializer from './visualization';
import { parseMetricName, canonicalizeMetric } from 'navi-data/utils/metric';
import { assert } from '@ember/debug';
import { isEmpty } from '@ember/utils';
import type { RequestV2, Column, Parameters } from 'navi-data/adapters/facts/interface';
import type NaviMetadataService from 'navi-data/services/navi-metadata';
import { getRealDimensionType } from 'navi-core/utils/request';
interface FieldTypes {
metric: 'metric';
threshold: 'metric';
dimension: 'dimension';
dateTime: 'dateTime';
}
type LegacyType = keyof FieldTypes;
type LegacyFieldType = FieldTypes[LegacyType];
interface LegacyColumn<K extends LegacyType> {
type: K;
field?: string | ({ [T in FieldTypes[K]]: string } & { parameters?: Parameters });
displayName?: string;
format?: string;
hasCustomDisplayName?: boolean;
sortDirection?: string;
attributes?: {
canAggregateSubtotal?: boolean;
field?: string;
format?: string;
name?: string;
parameters?: Parameters;
};
}
type LegacyMetadataColumn = LegacyColumn<'metric'> | LegacyColumn<'dateTime'> | LegacyColumn<'dimension'>;
export type TableVisMetadataPayloadV1 = {
type: 'table';
version: 1;
metadata: {
columns: (LegacyMetadataColumn | LegacyColumn<'threshold'>)[];
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
export interface TableColumnAttributes {
canAggregateSubtotal?: boolean;
format?: string;
}
export type TableVisualizationMetadata = {
type: 'table';
version: 2;
metadata: {
columnAttributes: {
[cid: string]: TableColumnAttributes | undefined;
};
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
type Field = { field: string; parameters: Parameters };
/**
*
* @param column the
*/
function getColumnField(column: LegacyColumn<LegacyFieldType>): Field {
if (typeof column.field === 'string') {
let { metric, parameters } = parseMetricName(column.field);
return {
field: metric,
parameters: parameters || {},
};
} else if (typeof column.field === 'object') {
return {
field: column.field[column.type],
parameters: column.field.parameters || {},
};
} else {
return {
field: column.attributes?.name as string,
parameters: column.attributes?.parameters || {},
};
}
}
type ColumnInfo =
| { requestIndex: number; requestColumn: Column; tableIndex: number; tableColumn: LegacyMetadataColumn }
| {
// These could be undefined if the table was not updated properly and tried to display a column that no longer existed
requestIndex: undefined;
requestColumn: undefined;
tableIndex: number;
tableColumn: LegacyMetadataColumn;
}
| {
requestIndex: number;
requestColumn: Column;
// These can be undefined since converting an all grain request will add a request column with no corresponding table column
tableIndex: undefined;
tableColumn: undefined;
};
/**
* Builds a map of column canonical name to both request and visualization data
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
function buildColumnInfo(
request: RequestV2,
visualization: TableVisMetadataPayloadV1,
naviMetadata: NaviMetadataService
): Record<string, ColumnInfo> {
const columnData: Record<string, ColumnInfo> = {};
request.columns.forEach((column, index) => {
const { field, parameters } = column;
const canonicalName = canonicalizeMetric({ metric: field, parameters });
const data = columnData[canonicalName] || {};
data.requestIndex = index;
data.requestColumn = column;
columnData[canonicalName] = data;
});
visualization?.metadata?.columns.forEach((column, index) => {
column.type = column.type === 'threshold' ? 'metric' : column.type;
const newCol = column as LegacyColumn<LegacyFieldType>;
const { field, parameters } = getColumnField(newCol);
let canonicalName = canonicalizeMetric({ metric: field, parameters });
if (newCol.type === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.${canonicalName}(grain=${grain})`;
} else if (newCol.type === 'dimension') {
const type = getRealDimensionType(field, request.dataSource, naviMetadata);
const requestColumn = request.columns.find((c) => c.type === type && c.field === field);
const fieldParam = column.attributes?.field ?? requestColumn?.parameters.field;
assert(`field param must be found for dimension ${canonicalName}`, fieldParam);
const newParams = {
...requestColumn?.parameters,
field: fieldParam,
};
canonicalName = canonicalizeMetric({ metric: field, parameters: newParams });
}
const data = columnData[canonicalName] || {};
data.tableIndex = index;
data.tableColumn = newCol;
columnData[canonicalName] = data;
});
return columnData;
}
/**
* The legacy table visualization would display by checking the following rules
* - 1) If the dimension had `show` fields -> make them individual columns (e.g. [Dim (key), Dim (desc)])
* - 2) If the desc field was available -> show it (with id only on hover)
* - 3) If the id field was available -> show it
* @param request - the requested data for this table
* @param metadata - the metadata service with the datasource already loaded
*/
function injectDimensionFields(request: RequestV2, naviMetadata: NaviMetadataService) {
const newColumns: RequestV2['columns'] = [];
request.columns.forEach((col) => {
const { type, field } = col;
if (type === 'dimension') {
const dimMeta = naviMetadata.getById(type, field, request.dataSource);
// get all show fields for dimension
let showFields = dimMeta?.getFieldsForTag('show').map((f) => f.name) ?? [];
if (showFields.length === 0) {
const allFields = dimMeta?.fields?.map((f) => f.name);
let bestField: string;
if (allFields) {
// Use desc or id if available. If neither match grab the first field
bestField = ['desc', 'id'].find((idOrDesc) => allFields.includes(idOrDesc)) ?? allFields[0];
} else {
bestField = 'desc'; // default to desc
}
showFields = [bestField];
}
showFields.forEach((field) => {
newColumns.push({
...col,
parameters: {
...col.parameters,
field,
},
});
});
} else {
newColumns.push(col);
}
});
request.columns = newColumns;
}
/**
* Normalizes a table visualization by
* - applying table order to request columns
* - moving custom display names to aliases
* - migrates existing attributes to a map based on column id
* - moves subtotal to use column id
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
export function normalizeTableV2(
request: RequestV2,
visualization: TableVisMetadataPayloadV1 | TableVisualizationMetadata,
naviMetadata: NaviMetadataService
): TableVisualizationMetadata {
if (visualization.version === 2) {
return visualization;
}
injectDimensionFields(request, naviMetadata);
const columnData: Record<string, ColumnInfo> = buildColumnInfo(request, visualization, naviMetadata);
// Rearranges request columns to match table order
const missedRequestColumns: Column[] = [];
const reorderedColumns = Object.values(columnData)
.reduce((columns: Column[], columnInfo) => {
const { tableColumn, tableIndex, requestColumn } = columnInfo;
if (requestColumn && tableColumn) {
// this column exists in request and table
assert('Table index must exist if table column exists', tableIndex !== undefined);
if (tableColumn.hasCustomDisplayName) |
columns[tableIndex] = requestColumn;
} else if (requestColumn !== undefined && tableColumn === undefined) {
// this column only exists in the request
missedRequestColumns.push(requestColumn);
}
return columns;
}, [])
.filter((c) => c); // remove skipped columns
request.columns = [...reorderedColumns, ...missedRequestColumns];
// extract column attributes
const columnAttributes = Object.values(columnData).reduce((columns, columnInfo) => {
const { tableColumn, requestColumn } = columnInfo;
if (tableColumn === undefined || requestColumn === undefined) {
// this column does not exist in the table
return columns;
}
const { attributes } = tableColumn;
assert(
`The request column ${requestColumn.field} should have a present 'cid' field`,
requestColumn.cid !== undefined
);
const canAggregateSubtotal = tableColumn.type === 'metric' ? attributes?.canAggregateSubtotal : undefined;
const format = tableColumn.format !== undefined ? tableColumn.format : attributes?.format;
columns[requestColumn.cid] = {
...(canAggregateSubtotal !== undefined ? { canAggregateSubtotal } : {}),
...(!isEmpty(format) ? { format } : {}),
};
return columns;
}, {} as Record<string, TableColumnAttributes>);
// update subtotal to use column index
const { showTotals } = visualization?.metadata || {};
let subtotal;
if (showTotals?.subtotal) {
let canonicalName;
if (showTotals?.subtotal === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.dateTime(grain=${grain})`;
} else {
const dimensionField = request.columns.find((c) => c.field === showTotals.subtotal)?.parameters.field;
canonicalName = `${showTotals.subtotal}(field=${dimensionField})`;
}
subtotal = columnData[canonicalName].requestColumn?.cid;
}
return {
type: 'table',
version: 2,
metadata: {
columnAttributes,
showTotals: {
...(subtotal !== undefined ? { subtotal } : {}),
...(showTotals?.grandTotal !== undefined ? { grandTotal: showTotals?.grandTotal } : {}),
},
},
};
}
export default class TableVisualizationSerializer extends VisualizationSerializer {
// TODO: Implement serialize method to strip out unneeded fields
}
// DO NOT DELETE: this is how TypeScript knows how to look up your models.
declare module 'ember-data/types/registries/serializer' {
export default interface SerializerRegistry {
table: TableVisualizationSerializer;
}
}
| {
// If display name is custom move over to request
requestColumn.alias = tableColumn.displayName;
} | conditional_block |
table.ts | /**
* Copyright 2021, Yahoo Holdings Inc.
* Licensed under the terms of the MIT license. See accompanying LICENSE.md file for terms.
*/
import VisualizationSerializer from './visualization';
import { parseMetricName, canonicalizeMetric } from 'navi-data/utils/metric';
import { assert } from '@ember/debug';
import { isEmpty } from '@ember/utils';
import type { RequestV2, Column, Parameters } from 'navi-data/adapters/facts/interface';
import type NaviMetadataService from 'navi-data/services/navi-metadata';
import { getRealDimensionType } from 'navi-core/utils/request';
interface FieldTypes {
metric: 'metric';
threshold: 'metric';
dimension: 'dimension';
dateTime: 'dateTime';
}
type LegacyType = keyof FieldTypes;
type LegacyFieldType = FieldTypes[LegacyType];
interface LegacyColumn<K extends LegacyType> {
type: K;
field?: string | ({ [T in FieldTypes[K]]: string } & { parameters?: Parameters });
displayName?: string;
format?: string;
hasCustomDisplayName?: boolean;
sortDirection?: string;
attributes?: {
canAggregateSubtotal?: boolean;
field?: string;
format?: string;
name?: string;
parameters?: Parameters;
};
}
type LegacyMetadataColumn = LegacyColumn<'metric'> | LegacyColumn<'dateTime'> | LegacyColumn<'dimension'>;
export type TableVisMetadataPayloadV1 = {
type: 'table';
version: 1;
metadata: {
columns: (LegacyMetadataColumn | LegacyColumn<'threshold'>)[];
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
export interface TableColumnAttributes {
canAggregateSubtotal?: boolean;
format?: string;
}
export type TableVisualizationMetadata = {
type: 'table';
version: 2;
metadata: {
columnAttributes: {
[cid: string]: TableColumnAttributes | undefined;
};
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
type Field = { field: string; parameters: Parameters };
/**
*
* @param column the
*/
function getColumnField(column: LegacyColumn<LegacyFieldType>): Field {
if (typeof column.field === 'string') {
let { metric, parameters } = parseMetricName(column.field);
return {
field: metric,
parameters: parameters || {},
};
} else if (typeof column.field === 'object') {
return {
field: column.field[column.type],
parameters: column.field.parameters || {},
};
} else {
return {
field: column.attributes?.name as string,
parameters: column.attributes?.parameters || {},
};
}
}
type ColumnInfo =
| { requestIndex: number; requestColumn: Column; tableIndex: number; tableColumn: LegacyMetadataColumn }
| {
// These could be undefined if the table was not updated properly and tried to display a column that no longer existed
requestIndex: undefined;
requestColumn: undefined;
tableIndex: number;
tableColumn: LegacyMetadataColumn;
}
| {
requestIndex: number;
requestColumn: Column;
// These can be undefined since converting an all grain request will add a request column with no corresponding table column
tableIndex: undefined;
tableColumn: undefined;
};
/**
* Builds a map of column canonical name to both request and visualization data
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
function buildColumnInfo(
request: RequestV2,
visualization: TableVisMetadataPayloadV1,
naviMetadata: NaviMetadataService
): Record<string, ColumnInfo> {
const columnData: Record<string, ColumnInfo> = {};
request.columns.forEach((column, index) => {
const { field, parameters } = column;
const canonicalName = canonicalizeMetric({ metric: field, parameters });
const data = columnData[canonicalName] || {};
data.requestIndex = index;
data.requestColumn = column;
columnData[canonicalName] = data;
});
visualization?.metadata?.columns.forEach((column, index) => {
column.type = column.type === 'threshold' ? 'metric' : column.type;
const newCol = column as LegacyColumn<LegacyFieldType>;
const { field, parameters } = getColumnField(newCol);
let canonicalName = canonicalizeMetric({ metric: field, parameters });
if (newCol.type === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.${canonicalName}(grain=${grain})`;
} else if (newCol.type === 'dimension') {
const type = getRealDimensionType(field, request.dataSource, naviMetadata);
const requestColumn = request.columns.find((c) => c.type === type && c.field === field);
const fieldParam = column.attributes?.field ?? requestColumn?.parameters.field;
assert(`field param must be found for dimension ${canonicalName}`, fieldParam);
const newParams = {
...requestColumn?.parameters,
field: fieldParam,
};
canonicalName = canonicalizeMetric({ metric: field, parameters: newParams });
}
const data = columnData[canonicalName] || {};
data.tableIndex = index;
data.tableColumn = newCol;
columnData[canonicalName] = data;
});
return columnData;
}
/**
* The legacy table visualization would display by checking the following rules
* - 1) If the dimension had `show` fields -> make them individual columns (e.g. [Dim (key), Dim (desc)])
* - 2) If the desc field was available -> show it (with id only on hover)
* - 3) If the id field was available -> show it
* @param request - the requested data for this table
* @param metadata - the metadata service with the datasource already loaded
*/
function | (request: RequestV2, naviMetadata: NaviMetadataService) {
const newColumns: RequestV2['columns'] = [];
request.columns.forEach((col) => {
const { type, field } = col;
if (type === 'dimension') {
const dimMeta = naviMetadata.getById(type, field, request.dataSource);
// get all show fields for dimension
let showFields = dimMeta?.getFieldsForTag('show').map((f) => f.name) ?? [];
if (showFields.length === 0) {
const allFields = dimMeta?.fields?.map((f) => f.name);
let bestField: string;
if (allFields) {
// Use desc or id if available. If neither match grab the first field
bestField = ['desc', 'id'].find((idOrDesc) => allFields.includes(idOrDesc)) ?? allFields[0];
} else {
bestField = 'desc'; // default to desc
}
showFields = [bestField];
}
showFields.forEach((field) => {
newColumns.push({
...col,
parameters: {
...col.parameters,
field,
},
});
});
} else {
newColumns.push(col);
}
});
request.columns = newColumns;
}
/**
* Normalizes a table visualization by
* - applying table order to request columns
* - moving custom display names to aliases
* - migrates existing attributes to a map based on column id
* - moves subtotal to use column id
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
export function normalizeTableV2(
request: RequestV2,
visualization: TableVisMetadataPayloadV1 | TableVisualizationMetadata,
naviMetadata: NaviMetadataService
): TableVisualizationMetadata {
if (visualization.version === 2) {
return visualization;
}
injectDimensionFields(request, naviMetadata);
const columnData: Record<string, ColumnInfo> = buildColumnInfo(request, visualization, naviMetadata);
// Rearranges request columns to match table order
const missedRequestColumns: Column[] = [];
const reorderedColumns = Object.values(columnData)
.reduce((columns: Column[], columnInfo) => {
const { tableColumn, tableIndex, requestColumn } = columnInfo;
if (requestColumn && tableColumn) {
// this column exists in request and table
assert('Table index must exist if table column exists', tableIndex !== undefined);
if (tableColumn.hasCustomDisplayName) {
// If display name is custom move over to request
requestColumn.alias = tableColumn.displayName;
}
columns[tableIndex] = requestColumn;
} else if (requestColumn !== undefined && tableColumn === undefined) {
// this column only exists in the request
missedRequestColumns.push(requestColumn);
}
return columns;
}, [])
.filter((c) => c); // remove skipped columns
request.columns = [...reorderedColumns, ...missedRequestColumns];
// extract column attributes
const columnAttributes = Object.values(columnData).reduce((columns, columnInfo) => {
const { tableColumn, requestColumn } = columnInfo;
if (tableColumn === undefined || requestColumn === undefined) {
// this column does not exist in the table
return columns;
}
const { attributes } = tableColumn;
assert(
`The request column ${requestColumn.field} should have a present 'cid' field`,
requestColumn.cid !== undefined
);
const canAggregateSubtotal = tableColumn.type === 'metric' ? attributes?.canAggregateSubtotal : undefined;
const format = tableColumn.format !== undefined ? tableColumn.format : attributes?.format;
columns[requestColumn.cid] = {
...(canAggregateSubtotal !== undefined ? { canAggregateSubtotal } : {}),
...(!isEmpty(format) ? { format } : {}),
};
return columns;
}, {} as Record<string, TableColumnAttributes>);
// update subtotal to use column index
const { showTotals } = visualization?.metadata || {};
let subtotal;
if (showTotals?.subtotal) {
let canonicalName;
if (showTotals?.subtotal === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.dateTime(grain=${grain})`;
} else {
const dimensionField = request.columns.find((c) => c.field === showTotals.subtotal)?.parameters.field;
canonicalName = `${showTotals.subtotal}(field=${dimensionField})`;
}
subtotal = columnData[canonicalName].requestColumn?.cid;
}
return {
type: 'table',
version: 2,
metadata: {
columnAttributes,
showTotals: {
...(subtotal !== undefined ? { subtotal } : {}),
...(showTotals?.grandTotal !== undefined ? { grandTotal: showTotals?.grandTotal } : {}),
},
},
};
}
export default class TableVisualizationSerializer extends VisualizationSerializer {
// TODO: Implement serialize method to strip out unneeded fields
}
// DO NOT DELETE: this is how TypeScript knows how to look up your models.
declare module 'ember-data/types/registries/serializer' {
export default interface SerializerRegistry {
table: TableVisualizationSerializer;
}
}
| injectDimensionFields | identifier_name |
table.ts | /**
* Copyright 2021, Yahoo Holdings Inc.
* Licensed under the terms of the MIT license. See accompanying LICENSE.md file for terms.
*/
import VisualizationSerializer from './visualization';
import { parseMetricName, canonicalizeMetric } from 'navi-data/utils/metric';
import { assert } from '@ember/debug';
import { isEmpty } from '@ember/utils';
import type { RequestV2, Column, Parameters } from 'navi-data/adapters/facts/interface';
import type NaviMetadataService from 'navi-data/services/navi-metadata';
import { getRealDimensionType } from 'navi-core/utils/request';
interface FieldTypes {
metric: 'metric';
threshold: 'metric';
dimension: 'dimension';
dateTime: 'dateTime';
}
type LegacyType = keyof FieldTypes;
type LegacyFieldType = FieldTypes[LegacyType];
interface LegacyColumn<K extends LegacyType> {
type: K;
field?: string | ({ [T in FieldTypes[K]]: string } & { parameters?: Parameters });
displayName?: string;
format?: string;
hasCustomDisplayName?: boolean;
sortDirection?: string;
attributes?: {
canAggregateSubtotal?: boolean;
field?: string;
format?: string;
name?: string;
parameters?: Parameters;
};
}
type LegacyMetadataColumn = LegacyColumn<'metric'> | LegacyColumn<'dateTime'> | LegacyColumn<'dimension'>;
export type TableVisMetadataPayloadV1 = {
type: 'table';
version: 1;
metadata: {
columns: (LegacyMetadataColumn | LegacyColumn<'threshold'>)[];
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
export interface TableColumnAttributes {
canAggregateSubtotal?: boolean;
format?: string;
}
export type TableVisualizationMetadata = {
type: 'table';
version: 2;
metadata: {
columnAttributes: {
[cid: string]: TableColumnAttributes | undefined;
};
showTotals?: {
subtotal?: string;
grandTotal?: boolean;
};
};
};
type Field = { field: string; parameters: Parameters };
/**
*
* @param column the
*/
function getColumnField(column: LegacyColumn<LegacyFieldType>): Field {
if (typeof column.field === 'string') {
let { metric, parameters } = parseMetricName(column.field);
return {
field: metric,
parameters: parameters || {},
};
} else if (typeof column.field === 'object') {
return {
field: column.field[column.type],
parameters: column.field.parameters || {},
};
} else {
return {
field: column.attributes?.name as string,
parameters: column.attributes?.parameters || {},
};
}
}
type ColumnInfo =
| { requestIndex: number; requestColumn: Column; tableIndex: number; tableColumn: LegacyMetadataColumn }
| {
// These could be undefined if the table was not updated properly and tried to display a column that no longer existed
requestIndex: undefined;
requestColumn: undefined;
tableIndex: number;
tableColumn: LegacyMetadataColumn;
}
| {
requestIndex: number;
requestColumn: Column;
// These can be undefined since converting an all grain request will add a request column with no corresponding table column
tableIndex: undefined;
tableColumn: undefined;
};
/**
* Builds a map of column canonical name to both request and visualization data
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
function buildColumnInfo(
request: RequestV2,
visualization: TableVisMetadataPayloadV1,
naviMetadata: NaviMetadataService
): Record<string, ColumnInfo> {
const columnData: Record<string, ColumnInfo> = {};
request.columns.forEach((column, index) => {
const { field, parameters } = column;
const canonicalName = canonicalizeMetric({ metric: field, parameters });
const data = columnData[canonicalName] || {};
data.requestIndex = index;
data.requestColumn = column;
columnData[canonicalName] = data;
});
visualization?.metadata?.columns.forEach((column, index) => {
column.type = column.type === 'threshold' ? 'metric' : column.type;
const newCol = column as LegacyColumn<LegacyFieldType>;
const { field, parameters } = getColumnField(newCol);
let canonicalName = canonicalizeMetric({ metric: field, parameters });
if (newCol.type === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.${canonicalName}(grain=${grain})`;
} else if (newCol.type === 'dimension') {
const type = getRealDimensionType(field, request.dataSource, naviMetadata);
const requestColumn = request.columns.find((c) => c.type === type && c.field === field);
const fieldParam = column.attributes?.field ?? requestColumn?.parameters.field;
assert(`field param must be found for dimension ${canonicalName}`, fieldParam);
const newParams = {
...requestColumn?.parameters,
field: fieldParam,
};
canonicalName = canonicalizeMetric({ metric: field, parameters: newParams });
}
const data = columnData[canonicalName] || {};
data.tableIndex = index;
data.tableColumn = newCol;
columnData[canonicalName] = data;
});
return columnData;
}
/**
* The legacy table visualization would display by checking the following rules
* - 1) If the dimension had `show` fields -> make them individual columns (e.g. [Dim (key), Dim (desc)])
* - 2) If the desc field was available -> show it (with id only on hover)
* - 3) If the id field was available -> show it
* @param request - the requested data for this table
* @param metadata - the metadata service with the datasource already loaded
*/
function injectDimensionFields(request: RequestV2, naviMetadata: NaviMetadataService) {
const newColumns: RequestV2['columns'] = [];
request.columns.forEach((col) => {
const { type, field } = col;
if (type === 'dimension') {
const dimMeta = naviMetadata.getById(type, field, request.dataSource);
// get all show fields for dimension
let showFields = dimMeta?.getFieldsForTag('show').map((f) => f.name) ?? [];
if (showFields.length === 0) {
const allFields = dimMeta?.fields?.map((f) => f.name);
let bestField: string;
if (allFields) {
// Use desc or id if available. If neither match grab the first field
bestField = ['desc', 'id'].find((idOrDesc) => allFields.includes(idOrDesc)) ?? allFields[0];
} else {
bestField = 'desc'; // default to desc
}
showFields = [bestField];
}
showFields.forEach((field) => {
newColumns.push({
...col,
parameters: {
...col.parameters,
field,
},
});
});
} else {
newColumns.push(col);
}
});
request.columns = newColumns;
}
/**
* Normalizes a table visualization by
* - applying table order to request columns
* - moving custom display names to aliases
* - migrates existing attributes to a map based on column id
* - moves subtotal to use column id
* @param request the requested data for this table
* @param visualization the existing visualization metadata
*/
export function normalizeTableV2(
request: RequestV2,
visualization: TableVisMetadataPayloadV1 | TableVisualizationMetadata,
naviMetadata: NaviMetadataService
): TableVisualizationMetadata {
if (visualization.version === 2) {
return visualization;
}
injectDimensionFields(request, naviMetadata);
const columnData: Record<string, ColumnInfo> = buildColumnInfo(request, visualization, naviMetadata);
// Rearranges request columns to match table order
const missedRequestColumns: Column[] = [];
const reorderedColumns = Object.values(columnData)
.reduce((columns: Column[], columnInfo) => {
const { tableColumn, tableIndex, requestColumn } = columnInfo;
if (requestColumn && tableColumn) {
// this column exists in request and table
assert('Table index must exist if table column exists', tableIndex !== undefined);
if (tableColumn.hasCustomDisplayName) {
// If display name is custom move over to request
requestColumn.alias = tableColumn.displayName;
}
columns[tableIndex] = requestColumn;
} else if (requestColumn !== undefined && tableColumn === undefined) {
// this column only exists in the request
missedRequestColumns.push(requestColumn);
}
return columns;
}, [])
.filter((c) => c); // remove skipped columns
request.columns = [...reorderedColumns, ...missedRequestColumns];
| if (tableColumn === undefined || requestColumn === undefined) {
// this column does not exist in the table
return columns;
}
const { attributes } = tableColumn;
assert(
`The request column ${requestColumn.field} should have a present 'cid' field`,
requestColumn.cid !== undefined
);
const canAggregateSubtotal = tableColumn.type === 'metric' ? attributes?.canAggregateSubtotal : undefined;
const format = tableColumn.format !== undefined ? tableColumn.format : attributes?.format;
columns[requestColumn.cid] = {
...(canAggregateSubtotal !== undefined ? { canAggregateSubtotal } : {}),
...(!isEmpty(format) ? { format } : {}),
};
return columns;
}, {} as Record<string, TableColumnAttributes>);
// update subtotal to use column index
const { showTotals } = visualization?.metadata || {};
let subtotal;
if (showTotals?.subtotal) {
let canonicalName;
if (showTotals?.subtotal === 'dateTime') {
const { table } = request;
const grain = request.columns.find((c) => c.field === `${table}.dateTime`)?.parameters.grain;
canonicalName = `${table}.dateTime(grain=${grain})`;
} else {
const dimensionField = request.columns.find((c) => c.field === showTotals.subtotal)?.parameters.field;
canonicalName = `${showTotals.subtotal}(field=${dimensionField})`;
}
subtotal = columnData[canonicalName].requestColumn?.cid;
}
return {
type: 'table',
version: 2,
metadata: {
columnAttributes,
showTotals: {
...(subtotal !== undefined ? { subtotal } : {}),
...(showTotals?.grandTotal !== undefined ? { grandTotal: showTotals?.grandTotal } : {}),
},
},
};
}
export default class TableVisualizationSerializer extends VisualizationSerializer {
// TODO: Implement serialize method to strip out unneeded fields
}
// DO NOT DELETE: this is how TypeScript knows how to look up your models.
declare module 'ember-data/types/registries/serializer' {
export default interface SerializerRegistry {
table: TableVisualizationSerializer;
}
} | // extract column attributes
const columnAttributes = Object.values(columnData).reduce((columns, columnInfo) => {
const { tableColumn, requestColumn } = columnInfo; | random_line_split |
lib.rs | pub use glam::*;
use image::DynamicImage;
pub use std::time;
pub use wgpu::util::DeviceExt;
use wgpu::ShaderModule;
pub use winit::{
dpi::{PhysicalSize, Size},
event::{Event, *},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowAttributes},
};
pub type Index = u16;
#[repr(u32)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum SampleCount {
Single = 1,
Msaa4x = 4,
}
impl From<u32> for SampleCount {
fn from(sample_count: u32) -> Self {
match sample_count {
1 => Self::Single,
4 => Self::Msaa4x,
_ => panic!("a sample count of {} is invalid", sample_count),
}
}
}
pub struct FrameBuffer {
texture: wgpu::Texture,
sample_count: u32,
}
impl FrameBuffer {
pub fn new(device: &wgpu::Device, config: &SurfaceHandlerConfiguration) -> Self {
let sample_count = config.sample_count as u32;
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Mutlisampled Texture"),
sample_count,
size: wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count, .. }) = self.frame_buffer {
sample_count.into()
} else |
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn new(window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
fn create_buffer<T>(
&self,
label: &str,
contents: &[T],
usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState {
self.surface_handler.multisample_state()
}
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window(&self) -> &Window {
&self.window
}
const DURATION_500MS: time::Duration = time::Duration::from_millis(500);
pub fn run(
self,
mut event_handler: impl 'static + FnMut(time::Duration, &Window, Event<()>, &mut ControlFlow),
) -> ! {
let mut last_update_instant = time::Instant::now();
let mut last_fps_update_instant = time::Instant::now();
let mut update_count = 0u32;
let event_loop = self.event_loop;
let window = self.window;
event_loop.run(move |event, _, control_flow| {
let now = time::Instant::now();
let duration_since_last_update = now.duration_since(last_update_instant);
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
},
Event::MainEventsCleared => {
last_update_instant = now;
let duration_since_last_fps_update =
now.duration_since(last_fps_update_instant);
if duration_since_last_fps_update > Self::DURATION_500MS {
// print!(
// "\r{: >12} fps",
// update_count as f32 / duration_since_last_fps_update.as_secs_f32(),
// );
// use std::io::Write;
// std::io::stdout().flush().unwrap_or(());
last_fps_update_instant = now;
update_count = 0;
}
window.request_redraw();
update_count += 1;
}
_ => {}
}
event_handler(dbg!(duration_since_last_update), &window, event, control_flow);
})
}
}
#[macro_export]
macro_rules! size_of {
($T:ty) => {
std::mem::size_of::<$T>()
};
}
pub struct RenderBundle {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
#[macro_export]
macro_rules! include_image {
($file:expr $(,)?) => {
image::load_from_memory(include_bytes!($file)).expect("load image error")
};
}
pub const ALPHA_BLEND_STATE: Option<wgpu::BlendState> = Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
});
pub const CLEAR_WHITE_OPERATIONS: wgpu::Operations<wgpu::Color> = wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
};
| {
SampleCount::Single
} | conditional_block |
lib.rs | pub use glam::*;
use image::DynamicImage;
pub use std::time;
pub use wgpu::util::DeviceExt;
use wgpu::ShaderModule;
pub use winit::{
dpi::{PhysicalSize, Size},
event::{Event, *},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowAttributes},
};
pub type Index = u16;
#[repr(u32)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum SampleCount {
Single = 1,
Msaa4x = 4,
}
impl From<u32> for SampleCount {
fn from(sample_count: u32) -> Self {
match sample_count {
1 => Self::Single,
4 => Self::Msaa4x,
_ => panic!("a sample count of {} is invalid", sample_count),
}
}
}
pub struct FrameBuffer {
texture: wgpu::Texture,
sample_count: u32,
}
impl FrameBuffer {
pub fn new(device: &wgpu::Device, config: &SurfaceHandlerConfiguration) -> Self {
let sample_count = config.sample_count as u32;
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Mutlisampled Texture"),
sample_count,
size: wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count, .. }) = self.frame_buffer {
sample_count.into()
} else {
SampleCount::Single
}
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn | (window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
fn create_buffer<T>(
&self,
label: &str,
contents: &[T],
usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState {
self.surface_handler.multisample_state()
}
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window(&self) -> &Window {
&self.window
}
const DURATION_500MS: time::Duration = time::Duration::from_millis(500);
pub fn run(
self,
mut event_handler: impl 'static + FnMut(time::Duration, &Window, Event<()>, &mut ControlFlow),
) -> ! {
let mut last_update_instant = time::Instant::now();
let mut last_fps_update_instant = time::Instant::now();
let mut update_count = 0u32;
let event_loop = self.event_loop;
let window = self.window;
event_loop.run(move |event, _, control_flow| {
let now = time::Instant::now();
let duration_since_last_update = now.duration_since(last_update_instant);
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
},
Event::MainEventsCleared => {
last_update_instant = now;
let duration_since_last_fps_update =
now.duration_since(last_fps_update_instant);
if duration_since_last_fps_update > Self::DURATION_500MS {
// print!(
// "\r{: >12} fps",
// update_count as f32 / duration_since_last_fps_update.as_secs_f32(),
// );
// use std::io::Write;
// std::io::stdout().flush().unwrap_or(());
last_fps_update_instant = now;
update_count = 0;
}
window.request_redraw();
update_count += 1;
}
_ => {}
}
event_handler(dbg!(duration_since_last_update), &window, event, control_flow);
})
}
}
#[macro_export]
macro_rules! size_of {
($T:ty) => {
std::mem::size_of::<$T>()
};
}
pub struct RenderBundle {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
#[macro_export]
macro_rules! include_image {
($file:expr $(,)?) => {
image::load_from_memory(include_bytes!($file)).expect("load image error")
};
}
pub const ALPHA_BLEND_STATE: Option<wgpu::BlendState> = Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
});
pub const CLEAR_WHITE_OPERATIONS: wgpu::Operations<wgpu::Color> = wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
};
| new | identifier_name |
lib.rs | pub use glam::*;
use image::DynamicImage;
pub use std::time;
pub use wgpu::util::DeviceExt;
use wgpu::ShaderModule;
pub use winit::{
dpi::{PhysicalSize, Size},
event::{Event, *},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowAttributes},
};
pub type Index = u16;
#[repr(u32)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum SampleCount {
Single = 1,
Msaa4x = 4,
}
impl From<u32> for SampleCount {
fn from(sample_count: u32) -> Self {
match sample_count {
1 => Self::Single,
4 => Self::Msaa4x,
_ => panic!("a sample count of {} is invalid", sample_count),
}
}
}
pub struct FrameBuffer {
texture: wgpu::Texture,
sample_count: u32,
}
impl FrameBuffer {
pub fn new(device: &wgpu::Device, config: &SurfaceHandlerConfiguration) -> Self {
let sample_count = config.sample_count as u32;
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Mutlisampled Texture"),
sample_count,
size: wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count, .. }) = self.frame_buffer {
sample_count.into()
} else {
SampleCount::Single
}
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn new(window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
fn create_buffer<T>(
&self,
label: &str,
contents: &[T],
usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState |
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window(&self) -> &Window {
&self.window
}
const DURATION_500MS: time::Duration = time::Duration::from_millis(500);
pub fn run(
self,
mut event_handler: impl 'static + FnMut(time::Duration, &Window, Event<()>, &mut ControlFlow),
) -> ! {
let mut last_update_instant = time::Instant::now();
let mut last_fps_update_instant = time::Instant::now();
let mut update_count = 0u32;
let event_loop = self.event_loop;
let window = self.window;
event_loop.run(move |event, _, control_flow| {
let now = time::Instant::now();
let duration_since_last_update = now.duration_since(last_update_instant);
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
},
Event::MainEventsCleared => {
last_update_instant = now;
let duration_since_last_fps_update =
now.duration_since(last_fps_update_instant);
if duration_since_last_fps_update > Self::DURATION_500MS {
// print!(
// "\r{: >12} fps",
// update_count as f32 / duration_since_last_fps_update.as_secs_f32(),
// );
// use std::io::Write;
// std::io::stdout().flush().unwrap_or(());
last_fps_update_instant = now;
update_count = 0;
}
window.request_redraw();
update_count += 1;
}
_ => {}
}
event_handler(dbg!(duration_since_last_update), &window, event, control_flow);
})
}
}
#[macro_export]
macro_rules! size_of {
($T:ty) => {
std::mem::size_of::<$T>()
};
}
pub struct RenderBundle {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
#[macro_export]
macro_rules! include_image {
($file:expr $(,)?) => {
image::load_from_memory(include_bytes!($file)).expect("load image error")
};
}
pub const ALPHA_BLEND_STATE: Option<wgpu::BlendState> = Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
});
pub const CLEAR_WHITE_OPERATIONS: wgpu::Operations<wgpu::Color> = wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
};
| {
self.surface_handler.multisample_state()
} | identifier_body |
lib.rs | pub use glam::*;
use image::DynamicImage;
pub use std::time;
pub use wgpu::util::DeviceExt;
use wgpu::ShaderModule;
pub use winit::{
dpi::{PhysicalSize, Size},
event::{Event, *},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowAttributes},
};
pub type Index = u16;
#[repr(u32)]
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum SampleCount {
Single = 1,
Msaa4x = 4,
}
impl From<u32> for SampleCount {
fn from(sample_count: u32) -> Self {
match sample_count {
1 => Self::Single,
4 => Self::Msaa4x,
_ => panic!("a sample count of {} is invalid", sample_count),
}
}
}
pub struct FrameBuffer {
texture: wgpu::Texture,
sample_count: u32,
}
impl FrameBuffer {
pub fn new(device: &wgpu::Device, config: &SurfaceHandlerConfiguration) -> Self {
let sample_count = config.sample_count as u32;
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Mutlisampled Texture"),
sample_count,
size: wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
});
Self {
texture,
sample_count,
}
}
pub fn create_view(&self) -> wgpu::TextureView {
self.texture.create_view(&Default::default())
}
}
pub struct SurfaceHandler {
surface: wgpu::Surface,
surface_configuration: wgpu::SurfaceConfiguration,
frame_buffer: Option<FrameBuffer>,
}
pub struct SurfaceHandlerConfiguration {
pub width: u32,
pub height: u32,
pub sample_count: SampleCount,
}
impl SurfaceHandler {
pub fn multisample_state(&self) -> wgpu::MultisampleState {
wgpu::MultisampleState {
count: self.sample_count() as u32,
..Default::default()
}
}
pub fn sample_count(&self) -> SampleCount {
if let Some(FrameBuffer { sample_count, .. }) = self.frame_buffer {
sample_count.into()
} else {
SampleCount::Single
}
}
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.configure(
&device,
&SurfaceHandlerConfiguration {
width,
height,
sample_count: self.sample_count(),
},
);
}
pub fn configure(&mut self, device: &wgpu::Device, config: &SurfaceHandlerConfiguration) {
self.surface_configuration = wgpu::SurfaceConfiguration {
width: config.width,
height: config.height,
..self.surface_configuration
};
self.surface.configure(&device, &self.surface_configuration);
match config.sample_count {
SampleCount::Single => {
self.frame_buffer = None;
}
SampleCount::Msaa4x => self.frame_buffer = Some(FrameBuffer::new(&device, &config)),
}
}
pub fn create_view_and_resolve_target(
&self,
surface_texture: &wgpu::SurfaceTexture,
) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
let surface_texture_view = surface_texture.texture.create_view(&Default::default());
if let Some(ref frame_buffer) = self.frame_buffer {
(frame_buffer.create_view(), Some(surface_texture_view))
} else {
(surface_texture_view, None)
}
}
}
pub struct Gpu {
device: wgpu::Device,
queue: wgpu::Queue,
surface_handler: SurfaceHandler,
}
impl Gpu {
pub fn new(window: &Window) -> Self {
pollster::block_on(Self::new_async(window))
}
pub async fn new_async(window: &Window) -> Self {
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
compatible_surface: Some(&surface),
power_preference: wgpu::PowerPreference::HighPerformance,
})
.await
.expect("request adapter error");
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor::default(), None)
.await
.expect("request device error");
let preferred_texture_format = surface
.get_preferred_format(&adapter)
.expect("get preferred format error");
let window_size = window.inner_size();
let surface_configuration = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: preferred_texture_format,
width: window_size.width,
height: window_size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &surface_configuration);
let frame_buffer = FrameBuffer::new(
&device,
&SurfaceHandlerConfiguration {
width: window_size.width,
height: window_size.height,
sample_count: SampleCount::Msaa4x,
},
);
let surface_handler = SurfaceHandler {
surface,
surface_configuration,
frame_buffer: Some(frame_buffer),
};
Self {
device,
queue,
surface_handler,
}
}
pub fn resize_surface(&mut self, width: u32, height: u32) {
self.surface_handler.resize(&self.device, width, height);
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
fn as_buffer_contents<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(
slice.as_ptr() as *const u8,
std::mem::size_of::<T>() * slice.len(),
)
}
}
| usage: wgpu::BufferUsages,
) -> wgpu::Buffer {
self.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(label),
contents: Self::as_buffer_contents(contents),
usage,
})
}
pub fn create_index_buffer(&self, contents: &[Index]) -> wgpu::Buffer {
self.create_buffer(
"Index Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::INDEX,
)
}
pub fn create_vertex_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Vertex Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::VERTEX,
)
}
pub fn create_uniform_buffer<T>(&self, contents: &[T]) -> wgpu::Buffer {
self.create_buffer(
"Uniform Buffer",
Self::as_buffer_contents(contents),
wgpu::BufferUsages::UNIFORM,
)
}
pub fn create_texture_from_image(&self, image: DynamicImage) -> wgpu::Texture {
use image::GenericImageView;
let image_buffer = image.as_rgba8().expect("image format error");
let dimensions = image.dimensions();
let texture_extent_3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth_or_array_layers: 1,
};
let texture = self.device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent_3d,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
self.queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
image_buffer,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(dimensions.0 << 2),
rows_per_image: std::num::NonZeroU32::new(dimensions.1),
},
texture_extent_3d,
);
texture
}
pub fn preferred_texture_format(&self) -> wgpu::TextureFormat {
self.surface_handler.surface_configuration.format
}
pub fn multisample_state(&self) -> wgpu::MultisampleState {
self.surface_handler.multisample_state()
}
pub fn create_render_pass_resources(&self) -> Result<RenderPassResources, wgpu::SurfaceError> {
Ok(RenderPassResources {
command_encoder: self.device.create_command_encoder(&Default::default()),
surface_texture: self.surface_handler.surface.get_current_frame()?.output,
gpu: &self,
})
}
}
pub struct RenderPassResources<'a> {
pub command_encoder: wgpu::CommandEncoder,
surface_texture: wgpu::SurfaceTexture,
gpu: &'a Gpu,
}
impl RenderPassResources<'_> {
pub fn create_view_and_resolve_target(&self) -> (wgpu::TextureView, Option<wgpu::TextureView>) {
self.gpu
.surface_handler
.create_view_and_resolve_target(&self.surface_texture)
}
}
pub struct MainLoop {
event_loop: EventLoop<()>,
window: Window,
}
impl MainLoop {
pub fn new(title: &str) -> MainLoop {
let event_loop = EventLoop::new();
let mut window_builder = winit::window::WindowBuilder::new();
window_builder.window = WindowAttributes {
title: title.to_owned(),
min_inner_size: Some(Size::Physical(PhysicalSize {
width: 16,
height: 16,
})),
inner_size: Some(Size::Physical(PhysicalSize {
width: 16 * 2u32.pow(6),
height: 9 * 2u32.pow(6),
})),
..Default::default()
};
let window = window_builder.build(&event_loop).unwrap();
Self { event_loop, window }
}
pub fn window(&self) -> &Window {
&self.window
}
const DURATION_500MS: time::Duration = time::Duration::from_millis(500);
pub fn run(
self,
mut event_handler: impl 'static + FnMut(time::Duration, &Window, Event<()>, &mut ControlFlow),
) -> ! {
let mut last_update_instant = time::Instant::now();
let mut last_fps_update_instant = time::Instant::now();
let mut update_count = 0u32;
let event_loop = self.event_loop;
let window = self.window;
event_loop.run(move |event, _, control_flow| {
let now = time::Instant::now();
let duration_since_last_update = now.duration_since(last_update_instant);
match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
},
Event::MainEventsCleared => {
last_update_instant = now;
let duration_since_last_fps_update =
now.duration_since(last_fps_update_instant);
if duration_since_last_fps_update > Self::DURATION_500MS {
// print!(
// "\r{: >12} fps",
// update_count as f32 / duration_since_last_fps_update.as_secs_f32(),
// );
// use std::io::Write;
// std::io::stdout().flush().unwrap_or(());
last_fps_update_instant = now;
update_count = 0;
}
window.request_redraw();
update_count += 1;
}
_ => {}
}
event_handler(dbg!(duration_since_last_update), &window, event, control_flow);
})
}
}
#[macro_export]
macro_rules! size_of {
($T:ty) => {
std::mem::size_of::<$T>()
};
}
pub struct RenderBundle {
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
#[macro_export]
macro_rules! include_image {
($file:expr $(,)?) => {
image::load_from_memory(include_bytes!($file)).expect("load image error")
};
}
pub const ALPHA_BLEND_STATE: Option<wgpu::BlendState> = Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
});
pub const CLEAR_WHITE_OPERATIONS: wgpu::Operations<wgpu::Color> = wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
}; | fn create_buffer<T>(
&self,
label: &str,
contents: &[T], | random_line_split |
cloudLibUtils.js | /**
* Copyright 2023 F5 Networks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
const fs = require('fs');
const semver = require('semver');
const promiseUtil = require('@f5devcentral/atg-shared-utilities').promiseUtils;
const log = require('../log');
const util = require('./util');
const iappUtil = require('./iappUtil');
const constants = require('../constants');
const DEVICE_TYPES = require('../constants').DEVICE_TYPES;
const BUILD_TYPES = require('../constants').BUILD_TYPES;
const SOURCE_PATH = '/var/config/rest/iapps/f5-appsvcs/packages';
const IAPP_DIR = '/var/config/rest/iapps/f5-appsvcs';
const RETRY_OPTIONS = {
retries: 5,
delay: 1000
};
const readFile = function (path) {
return new Promise((resolve, reject) => {
fs.readFile(path, (error, data) => {
if (error) reject(error);
else resolve(data);
});
});
};
const getIControlPromise = (context, iControlOptions, failureMessage, options) => {
const promiseOptions = Object.assign({}, { checkStatus: true }, options);
return util.iControlRequest(context, iControlOptions)
.then((response) => {
if (promiseOptions.checkStatus && response.statusCode !== 200 && response.statusCode !== 202) {
throw new Error(`${failureMessage}: ${response.statusCode}`);
}
return response;
});
};
const install = function (context) {
log.info('Installing service discovery worker');
return Promise.resolve()
.then(() => getDiscoveryRpm(context, 'packageName'))
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName))
.then(() => installDiscoveryRpm(context))
.then(() => waitForDiscoveryInit(context));
};
function getDiscoveryRpm(context, property) {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Get query',
crude: true,
send: JSON.stringify({ operation: 'QUERY' })
};
const args = [context, options, 'Failed to get discovery RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${JSON.parse(response.body).id}`,
method: 'GET',
ctype: 'application/json',
why: 'Get from response id',
crude: true
};
return getRPMInfo(context, opts, 1)
.then((info) => (property ? (info || {})[property] : info));
});
}
function getRPMInfo(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.getRPMName: Aborting after max retry attempts');
return undefined;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => getRPMInfo(context, options, attempts + 1));
}
let discoveryRpm;
body.queryResponse.forEach((pack) => {
if (pack.name === 'f5-service-discovery') {
discoveryRpm = pack;
}
});
return discoveryRpm;
})
.catch(() => getRPMInfo(context, options, attempts + 1));
}
function checkUninstallTask(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.checkUninstallTask: Aborting after max retry attempts');
return false;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => checkUninstallTask(context, options, attempts + 1));
}
return true;
})
.catch(() => checkUninstallTask(context, options, attempts + 1));
}
function | (context, discoveryRpm) {
// not installed
if (typeof discoveryRpm === 'undefined') {
return Promise.resolve(true);
}
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Uninstall discovery worker',
send: JSON.stringify({ operation: 'UNINSTALL', packageName: discoveryRpm }),
crude: true
};
log.debug('Uninstalling service discovery worker');
const args = [context, options, 'Failed to uninstall RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const uninstallTaskId = JSON.parse(response.body).id;
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${uninstallTaskId}`,
method: 'GET',
ctype: 'application/json',
why: 'Get status of uninstall',
crude: true
};
return checkUninstallTask(context, opts, 1);
})
.then((uninstalled) => {
if (!uninstalled) {
log.debug('Warning: Uninstall may not have completely finished.');
}
return undefined;
})
.catch((e) => {
log.debug(`Error during discoveryWorker uninstall: ${e.message} at ${e.stackTrace}`);
return undefined;
});
}
function copyDiscoveryRpm(context) {
const fileName = fs.readdirSync(SOURCE_PATH).find((name) => name.indexOf('f5-service-discovery') >= 0);
return new Promise((resolve, reject) => {
iappUtil.copyToHost(
context,
`${SOURCE_PATH}/${fileName}`,
(error) => {
if (error) reject(error);
else resolve(fileName);
}
);
});
}
function installDiscoveryRpm(context) {
// TODO add version checking
return promiseUtil.retryPromise(copyDiscoveryRpm, RETRY_OPTIONS, [context])
.then((fileName) => {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Install discovery worker',
crude: true,
send: JSON.stringify({
operation: 'INSTALL',
packageFilePath: `/var/config/rest/downloads/${fileName}`
})
};
log.debug('Installing discovery worker');
// There is no status code returned for this request
const args = [context, options, 'Failed to install discovery RPM', { checkStatus: false }];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args);
});
}
function waitForDiscoveryInit(context) {
const options = {
path: '/mgmt/shared/service-discovery/info',
method: 'GET',
why: 'Get discovery worker info',
crude: true
};
const args = [context, options, 'Failed waiting for discovery to start'];
return promiseUtil.retryPromise(getIControlPromise, { retries: 60, delay: 1000 }, args);
}
function checkVersions(desiredVersions, foundVersions) {
if (desiredVersions.length !== foundVersions.length) {
let message = `Length of desired versions (${desiredVersions.length}) `;
message += `does not equal length of found versions (${foundVersions.length})`;
throw new Error(message);
}
return desiredVersions.every((desired, i) => semver.eq(desired, foundVersions[i]));
}
function getDesiredVersions() {
return readFile(`${IAPP_DIR}/lib/versions.json`)
.then((data) => JSON.parse(data));
}
function findCloudLibVersions(context) {
const versions = {};
const requests = [];
requests.push(
getDiscoveryRpm(context)
.then((rpmInfo) => {
versions.discoveryWorker = rpmInfo ? `${rpmInfo.version}-${rpmInfo.release}` : '0.0.0';
})
);
return Promise.all(requests).then(() => versions);
}
function getFoundVersions(context) {
return Promise.resolve({})
.then((versions) => findCloudLibVersions(context).then((results) => Object.assign(versions, results)));
}
function needCloudLibsInstall(context, fromStartup) {
if (context.host.deviceType === DEVICE_TYPES.BIG_IQ) {
return false;
}
// We can't install SD when running in a container on startup (no target),
// But we still need to when it's during a request
if (fromStartup && context.host.deviceType === DEVICE_TYPES.CONTAINER) {
return false;
}
return true;
}
function getIsInstalled(context) {
if (!needCloudLibsInstall(context)) {
return Promise.resolve(true);
}
function toArray(versions) {
return [
versions.discoveryWorker
];
}
let desiredVersions = [];
let foundVersions = [];
log.debug('Checking cloud-libs versions');
return getDesiredVersions()
.then((o) => {
log.debug(`Desired versions: ${JSON.stringify(o)}`);
desiredVersions = toArray(o);
})
.then(() => getFoundVersions(context))
.then((o) => {
log.debug(`Discovered versions: ${JSON.stringify(o)}`);
foundVersions = toArray(o);
})
.then(() => checkVersions(desiredVersions, foundVersions))
.then((result) => {
log.debug(`Versions match: ${result}`);
return result;
});
}
function ensureInstall(context) {
return getIsInstalled(context)
.then((isInstalled) => (isInstalled ? Promise.resolve() : install(context)));
}
function ensureUninstall(context) {
return getDiscoveryRpm(context, 'packageName')
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName));
}
function cleanupStoredDecl(context) {
if (context.target.deviceType !== DEVICE_TYPES.BIG_IP || context.host.buildType !== BUILD_TYPES.CLOUD) {
const message = 'cleanupStoredDecl can only be called when AS3 is running on a bigip!';
log.error(message);
throw new Error(message);
}
const cmd = `rm -f ${constants.encryptedDeclLocation} ${constants.encryptedDeclCounter}`;
return util.executeBashCommandExec(cmd)
.catch((error) => {
log.error(`An error occured while deleting stored declaration: ${error}`);
});
}
let IS_AVAILABLE;
function getIsAvailable() {
if (typeof IS_AVAILABLE !== 'undefined') {
return Promise.resolve(IS_AVAILABLE);
}
return new Promise((resolve) => {
fs.access(SOURCE_PATH, fs.R_OK, (error) => {
if (error) {
log.debug(`cloud-lib directory ${SOURCE_PATH} not found/readable`);
resolve(false);
return;
}
resolve(true);
});
})
.then((isAvailable) => {
IS_AVAILABLE = isAvailable;
return isAvailable;
});
}
function decryptFromRemote(context, secret) {
const postOptions = {
path: '/mgmt/shared/service-discovery/encryption',
method: 'POST',
send: JSON.stringify({
action: 'decrypt',
data: secret
})
};
return util.iControlRequest(context, postOptions)
.then((response) => response.result)
.catch((e) => {
e.message = `Failed decrypting cloud credentials: ${e.message}`;
throw e;
});
}
module.exports = {
checkVersions,
getIsAvailable,
getIsInstalled,
install,
ensureInstall,
ensureUninstall,
cleanupStoredDecl,
needCloudLibsInstall,
getDiscoveryRpm,
uninstallDiscoveryRpm,
decryptFromRemote,
waitForDiscoveryInit
};
| uninstallDiscoveryRpm | identifier_name |
cloudLibUtils.js | /**
* Copyright 2023 F5 Networks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
const fs = require('fs');
const semver = require('semver');
const promiseUtil = require('@f5devcentral/atg-shared-utilities').promiseUtils;
const log = require('../log');
const util = require('./util');
const iappUtil = require('./iappUtil');
const constants = require('../constants');
const DEVICE_TYPES = require('../constants').DEVICE_TYPES;
const BUILD_TYPES = require('../constants').BUILD_TYPES;
const SOURCE_PATH = '/var/config/rest/iapps/f5-appsvcs/packages';
const IAPP_DIR = '/var/config/rest/iapps/f5-appsvcs';
const RETRY_OPTIONS = {
retries: 5,
delay: 1000
};
const readFile = function (path) {
return new Promise((resolve, reject) => {
fs.readFile(path, (error, data) => {
if (error) reject(error);
else resolve(data);
});
});
};
const getIControlPromise = (context, iControlOptions, failureMessage, options) => {
const promiseOptions = Object.assign({}, { checkStatus: true }, options);
return util.iControlRequest(context, iControlOptions)
.then((response) => {
if (promiseOptions.checkStatus && response.statusCode !== 200 && response.statusCode !== 202) {
throw new Error(`${failureMessage}: ${response.statusCode}`);
}
return response;
});
};
const install = function (context) {
log.info('Installing service discovery worker');
return Promise.resolve()
.then(() => getDiscoveryRpm(context, 'packageName'))
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName))
.then(() => installDiscoveryRpm(context))
.then(() => waitForDiscoveryInit(context));
};
function getDiscoveryRpm(context, property) {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Get query',
crude: true,
send: JSON.stringify({ operation: 'QUERY' })
};
const args = [context, options, 'Failed to get discovery RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${JSON.parse(response.body).id}`,
method: 'GET',
ctype: 'application/json',
why: 'Get from response id',
crude: true
};
return getRPMInfo(context, opts, 1)
.then((info) => (property ? (info || {})[property] : info));
});
}
function getRPMInfo(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.getRPMName: Aborting after max retry attempts');
return undefined;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => getRPMInfo(context, options, attempts + 1));
}
let discoveryRpm;
body.queryResponse.forEach((pack) => {
if (pack.name === 'f5-service-discovery') {
discoveryRpm = pack;
}
});
return discoveryRpm;
})
.catch(() => getRPMInfo(context, options, attempts + 1));
}
function checkUninstallTask(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.checkUninstallTask: Aborting after max retry attempts');
return false;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => checkUninstallTask(context, options, attempts + 1));
}
return true;
})
.catch(() => checkUninstallTask(context, options, attempts + 1));
}
function uninstallDiscoveryRpm(context, discoveryRpm) {
// not installed
if (typeof discoveryRpm === 'undefined') {
return Promise.resolve(true);
}
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Uninstall discovery worker',
send: JSON.stringify({ operation: 'UNINSTALL', packageName: discoveryRpm }),
crude: true
};
log.debug('Uninstalling service discovery worker');
const args = [context, options, 'Failed to uninstall RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const uninstallTaskId = JSON.parse(response.body).id;
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${uninstallTaskId}`,
method: 'GET',
ctype: 'application/json',
why: 'Get status of uninstall',
crude: true
};
return checkUninstallTask(context, opts, 1);
})
.then((uninstalled) => {
if (!uninstalled) {
log.debug('Warning: Uninstall may not have completely finished.');
}
return undefined;
})
.catch((e) => {
log.debug(`Error during discoveryWorker uninstall: ${e.message} at ${e.stackTrace}`);
return undefined;
});
}
function copyDiscoveryRpm(context) {
const fileName = fs.readdirSync(SOURCE_PATH).find((name) => name.indexOf('f5-service-discovery') >= 0);
return new Promise((resolve, reject) => {
iappUtil.copyToHost(
context,
`${SOURCE_PATH}/${fileName}`,
(error) => {
if (error) reject(error);
else resolve(fileName);
}
);
});
}
function installDiscoveryRpm(context) {
// TODO add version checking
return promiseUtil.retryPromise(copyDiscoveryRpm, RETRY_OPTIONS, [context])
.then((fileName) => {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Install discovery worker',
crude: true,
send: JSON.stringify({
operation: 'INSTALL',
packageFilePath: `/var/config/rest/downloads/${fileName}`
})
};
log.debug('Installing discovery worker');
// There is no status code returned for this request
const args = [context, options, 'Failed to install discovery RPM', { checkStatus: false }];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args);
});
}
function waitForDiscoveryInit(context) {
const options = {
path: '/mgmt/shared/service-discovery/info',
method: 'GET',
why: 'Get discovery worker info',
crude: true
};
const args = [context, options, 'Failed waiting for discovery to start'];
return promiseUtil.retryPromise(getIControlPromise, { retries: 60, delay: 1000 }, args);
}
function checkVersions(desiredVersions, foundVersions) {
if (desiredVersions.length !== foundVersions.length) {
let message = `Length of desired versions (${desiredVersions.length}) `;
message += `does not equal length of found versions (${foundVersions.length})`;
throw new Error(message);
}
return desiredVersions.every((desired, i) => semver.eq(desired, foundVersions[i]));
}
function getDesiredVersions() {
return readFile(`${IAPP_DIR}/lib/versions.json`)
.then((data) => JSON.parse(data));
}
function findCloudLibVersions(context) {
const versions = {};
const requests = [];
requests.push(
getDiscoveryRpm(context)
.then((rpmInfo) => {
versions.discoveryWorker = rpmInfo ? `${rpmInfo.version}-${rpmInfo.release}` : '0.0.0';
})
);
return Promise.all(requests).then(() => versions);
}
function getFoundVersions(context) {
return Promise.resolve({})
.then((versions) => findCloudLibVersions(context).then((results) => Object.assign(versions, results)));
}
function needCloudLibsInstall(context, fromStartup) {
if (context.host.deviceType === DEVICE_TYPES.BIG_IQ) {
return false;
}
// We can't install SD when running in a container on startup (no target),
// But we still need to when it's during a request
if (fromStartup && context.host.deviceType === DEVICE_TYPES.CONTAINER) {
return false;
}
return true;
}
function getIsInstalled(context) {
if (!needCloudLibsInstall(context)) {
return Promise.resolve(true);
}
function toArray(versions) {
return [
versions.discoveryWorker
];
}
let desiredVersions = [];
let foundVersions = [];
log.debug('Checking cloud-libs versions');
return getDesiredVersions()
.then((o) => {
log.debug(`Desired versions: ${JSON.stringify(o)}`);
desiredVersions = toArray(o);
})
.then(() => getFoundVersions(context))
.then((o) => {
log.debug(`Discovered versions: ${JSON.stringify(o)}`);
foundVersions = toArray(o);
})
.then(() => checkVersions(desiredVersions, foundVersions))
.then((result) => {
log.debug(`Versions match: ${result}`);
return result;
});
}
function ensureInstall(context) |
function ensureUninstall(context) {
return getDiscoveryRpm(context, 'packageName')
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName));
}
function cleanupStoredDecl(context) {
if (context.target.deviceType !== DEVICE_TYPES.BIG_IP || context.host.buildType !== BUILD_TYPES.CLOUD) {
const message = 'cleanupStoredDecl can only be called when AS3 is running on a bigip!';
log.error(message);
throw new Error(message);
}
const cmd = `rm -f ${constants.encryptedDeclLocation} ${constants.encryptedDeclCounter}`;
return util.executeBashCommandExec(cmd)
.catch((error) => {
log.error(`An error occured while deleting stored declaration: ${error}`);
});
}
let IS_AVAILABLE;
function getIsAvailable() {
if (typeof IS_AVAILABLE !== 'undefined') {
return Promise.resolve(IS_AVAILABLE);
}
return new Promise((resolve) => {
fs.access(SOURCE_PATH, fs.R_OK, (error) => {
if (error) {
log.debug(`cloud-lib directory ${SOURCE_PATH} not found/readable`);
resolve(false);
return;
}
resolve(true);
});
})
.then((isAvailable) => {
IS_AVAILABLE = isAvailable;
return isAvailable;
});
}
function decryptFromRemote(context, secret) {
const postOptions = {
path: '/mgmt/shared/service-discovery/encryption',
method: 'POST',
send: JSON.stringify({
action: 'decrypt',
data: secret
})
};
return util.iControlRequest(context, postOptions)
.then((response) => response.result)
.catch((e) => {
e.message = `Failed decrypting cloud credentials: ${e.message}`;
throw e;
});
}
module.exports = {
checkVersions,
getIsAvailable,
getIsInstalled,
install,
ensureInstall,
ensureUninstall,
cleanupStoredDecl,
needCloudLibsInstall,
getDiscoveryRpm,
uninstallDiscoveryRpm,
decryptFromRemote,
waitForDiscoveryInit
};
| {
return getIsInstalled(context)
.then((isInstalled) => (isInstalled ? Promise.resolve() : install(context)));
} | identifier_body |
cloudLibUtils.js | /**
* Copyright 2023 F5 Networks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
const fs = require('fs');
| const log = require('../log');
const util = require('./util');
const iappUtil = require('./iappUtil');
const constants = require('../constants');
const DEVICE_TYPES = require('../constants').DEVICE_TYPES;
const BUILD_TYPES = require('../constants').BUILD_TYPES;
const SOURCE_PATH = '/var/config/rest/iapps/f5-appsvcs/packages';
const IAPP_DIR = '/var/config/rest/iapps/f5-appsvcs';
const RETRY_OPTIONS = {
retries: 5,
delay: 1000
};
const readFile = function (path) {
return new Promise((resolve, reject) => {
fs.readFile(path, (error, data) => {
if (error) reject(error);
else resolve(data);
});
});
};
const getIControlPromise = (context, iControlOptions, failureMessage, options) => {
const promiseOptions = Object.assign({}, { checkStatus: true }, options);
return util.iControlRequest(context, iControlOptions)
.then((response) => {
if (promiseOptions.checkStatus && response.statusCode !== 200 && response.statusCode !== 202) {
throw new Error(`${failureMessage}: ${response.statusCode}`);
}
return response;
});
};
const install = function (context) {
log.info('Installing service discovery worker');
return Promise.resolve()
.then(() => getDiscoveryRpm(context, 'packageName'))
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName))
.then(() => installDiscoveryRpm(context))
.then(() => waitForDiscoveryInit(context));
};
function getDiscoveryRpm(context, property) {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Get query',
crude: true,
send: JSON.stringify({ operation: 'QUERY' })
};
const args = [context, options, 'Failed to get discovery RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${JSON.parse(response.body).id}`,
method: 'GET',
ctype: 'application/json',
why: 'Get from response id',
crude: true
};
return getRPMInfo(context, opts, 1)
.then((info) => (property ? (info || {})[property] : info));
});
}
function getRPMInfo(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.getRPMName: Aborting after max retry attempts');
return undefined;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => getRPMInfo(context, options, attempts + 1));
}
let discoveryRpm;
body.queryResponse.forEach((pack) => {
if (pack.name === 'f5-service-discovery') {
discoveryRpm = pack;
}
});
return discoveryRpm;
})
.catch(() => getRPMInfo(context, options, attempts + 1));
}
function checkUninstallTask(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.checkUninstallTask: Aborting after max retry attempts');
return false;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => checkUninstallTask(context, options, attempts + 1));
}
return true;
})
.catch(() => checkUninstallTask(context, options, attempts + 1));
}
function uninstallDiscoveryRpm(context, discoveryRpm) {
// not installed
if (typeof discoveryRpm === 'undefined') {
return Promise.resolve(true);
}
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Uninstall discovery worker',
send: JSON.stringify({ operation: 'UNINSTALL', packageName: discoveryRpm }),
crude: true
};
log.debug('Uninstalling service discovery worker');
const args = [context, options, 'Failed to uninstall RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const uninstallTaskId = JSON.parse(response.body).id;
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${uninstallTaskId}`,
method: 'GET',
ctype: 'application/json',
why: 'Get status of uninstall',
crude: true
};
return checkUninstallTask(context, opts, 1);
})
.then((uninstalled) => {
if (!uninstalled) {
log.debug('Warning: Uninstall may not have completely finished.');
}
return undefined;
})
.catch((e) => {
log.debug(`Error during discoveryWorker uninstall: ${e.message} at ${e.stackTrace}`);
return undefined;
});
}
function copyDiscoveryRpm(context) {
const fileName = fs.readdirSync(SOURCE_PATH).find((name) => name.indexOf('f5-service-discovery') >= 0);
return new Promise((resolve, reject) => {
iappUtil.copyToHost(
context,
`${SOURCE_PATH}/${fileName}`,
(error) => {
if (error) reject(error);
else resolve(fileName);
}
);
});
}
function installDiscoveryRpm(context) {
// TODO add version checking
return promiseUtil.retryPromise(copyDiscoveryRpm, RETRY_OPTIONS, [context])
.then((fileName) => {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Install discovery worker',
crude: true,
send: JSON.stringify({
operation: 'INSTALL',
packageFilePath: `/var/config/rest/downloads/${fileName}`
})
};
log.debug('Installing discovery worker');
// There is no status code returned for this request
const args = [context, options, 'Failed to install discovery RPM', { checkStatus: false }];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args);
});
}
function waitForDiscoveryInit(context) {
const options = {
path: '/mgmt/shared/service-discovery/info',
method: 'GET',
why: 'Get discovery worker info',
crude: true
};
const args = [context, options, 'Failed waiting for discovery to start'];
return promiseUtil.retryPromise(getIControlPromise, { retries: 60, delay: 1000 }, args);
}
function checkVersions(desiredVersions, foundVersions) {
if (desiredVersions.length !== foundVersions.length) {
let message = `Length of desired versions (${desiredVersions.length}) `;
message += `does not equal length of found versions (${foundVersions.length})`;
throw new Error(message);
}
return desiredVersions.every((desired, i) => semver.eq(desired, foundVersions[i]));
}
function getDesiredVersions() {
return readFile(`${IAPP_DIR}/lib/versions.json`)
.then((data) => JSON.parse(data));
}
function findCloudLibVersions(context) {
const versions = {};
const requests = [];
requests.push(
getDiscoveryRpm(context)
.then((rpmInfo) => {
versions.discoveryWorker = rpmInfo ? `${rpmInfo.version}-${rpmInfo.release}` : '0.0.0';
})
);
return Promise.all(requests).then(() => versions);
}
function getFoundVersions(context) {
return Promise.resolve({})
.then((versions) => findCloudLibVersions(context).then((results) => Object.assign(versions, results)));
}
function needCloudLibsInstall(context, fromStartup) {
if (context.host.deviceType === DEVICE_TYPES.BIG_IQ) {
return false;
}
// We can't install SD when running in a container on startup (no target),
// But we still need to when it's during a request
if (fromStartup && context.host.deviceType === DEVICE_TYPES.CONTAINER) {
return false;
}
return true;
}
function getIsInstalled(context) {
if (!needCloudLibsInstall(context)) {
return Promise.resolve(true);
}
function toArray(versions) {
return [
versions.discoveryWorker
];
}
let desiredVersions = [];
let foundVersions = [];
log.debug('Checking cloud-libs versions');
return getDesiredVersions()
.then((o) => {
log.debug(`Desired versions: ${JSON.stringify(o)}`);
desiredVersions = toArray(o);
})
.then(() => getFoundVersions(context))
.then((o) => {
log.debug(`Discovered versions: ${JSON.stringify(o)}`);
foundVersions = toArray(o);
})
.then(() => checkVersions(desiredVersions, foundVersions))
.then((result) => {
log.debug(`Versions match: ${result}`);
return result;
});
}
function ensureInstall(context) {
return getIsInstalled(context)
.then((isInstalled) => (isInstalled ? Promise.resolve() : install(context)));
}
function ensureUninstall(context) {
return getDiscoveryRpm(context, 'packageName')
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName));
}
function cleanupStoredDecl(context) {
if (context.target.deviceType !== DEVICE_TYPES.BIG_IP || context.host.buildType !== BUILD_TYPES.CLOUD) {
const message = 'cleanupStoredDecl can only be called when AS3 is running on a bigip!';
log.error(message);
throw new Error(message);
}
const cmd = `rm -f ${constants.encryptedDeclLocation} ${constants.encryptedDeclCounter}`;
return util.executeBashCommandExec(cmd)
.catch((error) => {
log.error(`An error occured while deleting stored declaration: ${error}`);
});
}
let IS_AVAILABLE;
function getIsAvailable() {
if (typeof IS_AVAILABLE !== 'undefined') {
return Promise.resolve(IS_AVAILABLE);
}
return new Promise((resolve) => {
fs.access(SOURCE_PATH, fs.R_OK, (error) => {
if (error) {
log.debug(`cloud-lib directory ${SOURCE_PATH} not found/readable`);
resolve(false);
return;
}
resolve(true);
});
})
.then((isAvailable) => {
IS_AVAILABLE = isAvailable;
return isAvailable;
});
}
function decryptFromRemote(context, secret) {
const postOptions = {
path: '/mgmt/shared/service-discovery/encryption',
method: 'POST',
send: JSON.stringify({
action: 'decrypt',
data: secret
})
};
return util.iControlRequest(context, postOptions)
.then((response) => response.result)
.catch((e) => {
e.message = `Failed decrypting cloud credentials: ${e.message}`;
throw e;
});
}
module.exports = {
checkVersions,
getIsAvailable,
getIsInstalled,
install,
ensureInstall,
ensureUninstall,
cleanupStoredDecl,
needCloudLibsInstall,
getDiscoveryRpm,
uninstallDiscoveryRpm,
decryptFromRemote,
waitForDiscoveryInit
}; | const semver = require('semver');
const promiseUtil = require('@f5devcentral/atg-shared-utilities').promiseUtils; | random_line_split |
cloudLibUtils.js | /**
* Copyright 2023 F5 Networks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
const fs = require('fs');
const semver = require('semver');
const promiseUtil = require('@f5devcentral/atg-shared-utilities').promiseUtils;
const log = require('../log');
const util = require('./util');
const iappUtil = require('./iappUtil');
const constants = require('../constants');
const DEVICE_TYPES = require('../constants').DEVICE_TYPES;
const BUILD_TYPES = require('../constants').BUILD_TYPES;
const SOURCE_PATH = '/var/config/rest/iapps/f5-appsvcs/packages';
const IAPP_DIR = '/var/config/rest/iapps/f5-appsvcs';
const RETRY_OPTIONS = {
retries: 5,
delay: 1000
};
const readFile = function (path) {
return new Promise((resolve, reject) => {
fs.readFile(path, (error, data) => {
if (error) reject(error);
else resolve(data);
});
});
};
const getIControlPromise = (context, iControlOptions, failureMessage, options) => {
const promiseOptions = Object.assign({}, { checkStatus: true }, options);
return util.iControlRequest(context, iControlOptions)
.then((response) => {
if (promiseOptions.checkStatus && response.statusCode !== 200 && response.statusCode !== 202) |
return response;
});
};
const install = function (context) {
log.info('Installing service discovery worker');
return Promise.resolve()
.then(() => getDiscoveryRpm(context, 'packageName'))
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName))
.then(() => installDiscoveryRpm(context))
.then(() => waitForDiscoveryInit(context));
};
function getDiscoveryRpm(context, property) {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Get query',
crude: true,
send: JSON.stringify({ operation: 'QUERY' })
};
const args = [context, options, 'Failed to get discovery RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${JSON.parse(response.body).id}`,
method: 'GET',
ctype: 'application/json',
why: 'Get from response id',
crude: true
};
return getRPMInfo(context, opts, 1)
.then((info) => (property ? (info || {})[property] : info));
});
}
function getRPMInfo(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.getRPMName: Aborting after max retry attempts');
return undefined;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => getRPMInfo(context, options, attempts + 1));
}
let discoveryRpm;
body.queryResponse.forEach((pack) => {
if (pack.name === 'f5-service-discovery') {
discoveryRpm = pack;
}
});
return discoveryRpm;
})
.catch(() => getRPMInfo(context, options, attempts + 1));
}
function checkUninstallTask(context, options, attempts) {
if (attempts >= 5) {
log.debug('cloudLibUtils.checkUninstallTask: Aborting after max retry attempts');
return false;
}
return util.iControlRequest(context, options)
.then((res) => {
const body = JSON.parse(res.body);
if (body.status !== 'FINISHED') {
return promiseUtil.delay(200)
.then(() => checkUninstallTask(context, options, attempts + 1));
}
return true;
})
.catch(() => checkUninstallTask(context, options, attempts + 1));
}
function uninstallDiscoveryRpm(context, discoveryRpm) {
// not installed
if (typeof discoveryRpm === 'undefined') {
return Promise.resolve(true);
}
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Uninstall discovery worker',
send: JSON.stringify({ operation: 'UNINSTALL', packageName: discoveryRpm }),
crude: true
};
log.debug('Uninstalling service discovery worker');
const args = [context, options, 'Failed to uninstall RPM'];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args)
// give the request a moment to complete
.then((response) => promiseUtil.delay(200, response))
.then((response) => {
const uninstallTaskId = JSON.parse(response.body).id;
const opts = {
path: `/mgmt/shared/iapp/package-management-tasks/${uninstallTaskId}`,
method: 'GET',
ctype: 'application/json',
why: 'Get status of uninstall',
crude: true
};
return checkUninstallTask(context, opts, 1);
})
.then((uninstalled) => {
if (!uninstalled) {
log.debug('Warning: Uninstall may not have completely finished.');
}
return undefined;
})
.catch((e) => {
log.debug(`Error during discoveryWorker uninstall: ${e.message} at ${e.stackTrace}`);
return undefined;
});
}
function copyDiscoveryRpm(context) {
const fileName = fs.readdirSync(SOURCE_PATH).find((name) => name.indexOf('f5-service-discovery') >= 0);
return new Promise((resolve, reject) => {
iappUtil.copyToHost(
context,
`${SOURCE_PATH}/${fileName}`,
(error) => {
if (error) reject(error);
else resolve(fileName);
}
);
});
}
function installDiscoveryRpm(context) {
// TODO add version checking
return promiseUtil.retryPromise(copyDiscoveryRpm, RETRY_OPTIONS, [context])
.then((fileName) => {
const options = {
path: '/mgmt/shared/iapp/package-management-tasks',
method: 'POST',
ctype: 'application/json',
why: 'Install discovery worker',
crude: true,
send: JSON.stringify({
operation: 'INSTALL',
packageFilePath: `/var/config/rest/downloads/${fileName}`
})
};
log.debug('Installing discovery worker');
// There is no status code returned for this request
const args = [context, options, 'Failed to install discovery RPM', { checkStatus: false }];
return promiseUtil.retryPromise(getIControlPromise, RETRY_OPTIONS, args);
});
}
function waitForDiscoveryInit(context) {
const options = {
path: '/mgmt/shared/service-discovery/info',
method: 'GET',
why: 'Get discovery worker info',
crude: true
};
const args = [context, options, 'Failed waiting for discovery to start'];
return promiseUtil.retryPromise(getIControlPromise, { retries: 60, delay: 1000 }, args);
}
function checkVersions(desiredVersions, foundVersions) {
if (desiredVersions.length !== foundVersions.length) {
let message = `Length of desired versions (${desiredVersions.length}) `;
message += `does not equal length of found versions (${foundVersions.length})`;
throw new Error(message);
}
return desiredVersions.every((desired, i) => semver.eq(desired, foundVersions[i]));
}
function getDesiredVersions() {
return readFile(`${IAPP_DIR}/lib/versions.json`)
.then((data) => JSON.parse(data));
}
function findCloudLibVersions(context) {
const versions = {};
const requests = [];
requests.push(
getDiscoveryRpm(context)
.then((rpmInfo) => {
versions.discoveryWorker = rpmInfo ? `${rpmInfo.version}-${rpmInfo.release}` : '0.0.0';
})
);
return Promise.all(requests).then(() => versions);
}
function getFoundVersions(context) {
return Promise.resolve({})
.then((versions) => findCloudLibVersions(context).then((results) => Object.assign(versions, results)));
}
function needCloudLibsInstall(context, fromStartup) {
if (context.host.deviceType === DEVICE_TYPES.BIG_IQ) {
return false;
}
// We can't install SD when running in a container on startup (no target),
// But we still need to when it's during a request
if (fromStartup && context.host.deviceType === DEVICE_TYPES.CONTAINER) {
return false;
}
return true;
}
function getIsInstalled(context) {
if (!needCloudLibsInstall(context)) {
return Promise.resolve(true);
}
function toArray(versions) {
return [
versions.discoveryWorker
];
}
let desiredVersions = [];
let foundVersions = [];
log.debug('Checking cloud-libs versions');
return getDesiredVersions()
.then((o) => {
log.debug(`Desired versions: ${JSON.stringify(o)}`);
desiredVersions = toArray(o);
})
.then(() => getFoundVersions(context))
.then((o) => {
log.debug(`Discovered versions: ${JSON.stringify(o)}`);
foundVersions = toArray(o);
})
.then(() => checkVersions(desiredVersions, foundVersions))
.then((result) => {
log.debug(`Versions match: ${result}`);
return result;
});
}
function ensureInstall(context) {
return getIsInstalled(context)
.then((isInstalled) => (isInstalled ? Promise.resolve() : install(context)));
}
function ensureUninstall(context) {
return getDiscoveryRpm(context, 'packageName')
.then((discoveryRpmName) => uninstallDiscoveryRpm(context, discoveryRpmName));
}
function cleanupStoredDecl(context) {
if (context.target.deviceType !== DEVICE_TYPES.BIG_IP || context.host.buildType !== BUILD_TYPES.CLOUD) {
const message = 'cleanupStoredDecl can only be called when AS3 is running on a bigip!';
log.error(message);
throw new Error(message);
}
const cmd = `rm -f ${constants.encryptedDeclLocation} ${constants.encryptedDeclCounter}`;
return util.executeBashCommandExec(cmd)
.catch((error) => {
log.error(`An error occured while deleting stored declaration: ${error}`);
});
}
let IS_AVAILABLE;
function getIsAvailable() {
if (typeof IS_AVAILABLE !== 'undefined') {
return Promise.resolve(IS_AVAILABLE);
}
return new Promise((resolve) => {
fs.access(SOURCE_PATH, fs.R_OK, (error) => {
if (error) {
log.debug(`cloud-lib directory ${SOURCE_PATH} not found/readable`);
resolve(false);
return;
}
resolve(true);
});
})
.then((isAvailable) => {
IS_AVAILABLE = isAvailable;
return isAvailable;
});
}
function decryptFromRemote(context, secret) {
const postOptions = {
path: '/mgmt/shared/service-discovery/encryption',
method: 'POST',
send: JSON.stringify({
action: 'decrypt',
data: secret
})
};
return util.iControlRequest(context, postOptions)
.then((response) => response.result)
.catch((e) => {
e.message = `Failed decrypting cloud credentials: ${e.message}`;
throw e;
});
}
module.exports = {
checkVersions,
getIsAvailable,
getIsInstalled,
install,
ensureInstall,
ensureUninstall,
cleanupStoredDecl,
needCloudLibsInstall,
getDiscoveryRpm,
uninstallDiscoveryRpm,
decryptFromRemote,
waitForDiscoveryInit
};
| {
throw new Error(`${failureMessage}: ${response.statusCode}`);
} | conditional_block |
ad_grabber_util.py | from time import sleep
from uuid import uuid1
from pprint import pprint
from shutil import copy2
from multiprocessing import Process, Queue, Pool, Manager
from ad_grabber_classes import *
from adregex import *
from pygraph.classes.digraph import digraph
import os
import json
import jsonpickle
import subprocess
import cPickle
import logging
LOG = logging.getLogger("logAdGrabber")
ADREGEX = AdRegEx()
def check_duplicate(fp1, fp2):
"""takes two files, does a diff on them, returns True if same"""
try:
subprocess.check_output(['diff', fp1, fp2])
return True
except subprocess.CalledProcessError:
return False
def identify_uniq_ads(session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
# bin by dimensions
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
#LOG.debug('did not manage to curl the scripts for bug:%s' % bug)
error_bugs.append(bug)
continue
if bug.is_ad(): # give zerofucks to non-ads
height = '999'
width = '999'
if bug_filetype == 'swf':
# choose from the swf media bin
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump', '-X',
bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump', '-Y',
bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError :
LOG.exception("swfdump error on file %s" % bug_filepath)
else:
# choose from the img media bin
target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output(['identify', '-format', '"%h"',\
bug_filepath]).strip()
width = subprocess.check_output(['identify', '-format','"%w"',\
bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception("identify error on file %s" % bug_filepath)
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
# check all the images in the bin with the dimensions
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.get_filepath()):
dup = m
break
if dup:
# check if the duplicate ad came from a different test site
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else :
ads[dup] = {test_site : bug_count}
# delete old bug reference, add new one and point to duplicated
# bug
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site : bug_count}
# tally up the results
except KeyError: # The bin hasn't been created
target_bin[dimension] = [bug]
ads[bug] = {test_site : bug_count}
# else:
# notads
return ads,error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try :
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
# Relative location = Location of the ad within this current session
# Global location, added when an ad is matched with existing ads in DB
fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(new_uuidname,
name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
# write to a file in runinfo_dir to tell automation script this run is done
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
def write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites,
test_sites, num_of_refresh, export_folder):
train_category = training_sites.keys()[0]
train_sites_to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
# to be read and inserted into db
totalads = 0 # total number of ads seen during this session
totaluniqads = len(ads) # does not support multicategories at this point
# for each category, for each test site, count total number of ads seen
totalad_category = {}
# for each category, for each test site, count total number of uniq ads seen
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:
bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\
Site-Context, BugCount, BugSrc\n')
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0 # for each site
uniq_ads = [] # for each site
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(uuid, test_site,
refresh_num, train_category, 'N/A', bugcount, bug.get_src()))
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads # global count for total ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:
# write some metadata information about this session
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(train_category,
test_site, num_of_visit, totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results,out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1 # assign unique number to ads for export to mturk
#short_listed_companies = ['google adsense', 'doubleclick']
with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:
# write the titles
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.format(\
'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\
'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))
# make sure we only add one ad
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
#TODO check bug_type in ffext
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\
\n'.format(file_name, bug.get_name(), bug.get_filetype(),
'' ,test_site, refresh_num, train_category, 'N/A', bugcount,
bug.get_src()))
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or \
file_type.startswith('ASCII') or \
file_type.startswith('UTF-8 Unicode English') or \
file_type.startswith('very short') :
bug_type = 'text'
elif (file_type.endswith('1 x 1') and file_type.startswith('GIF')):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
def curl_worker_legacy(args):
|
def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
# uses a pool of 'curl' workers
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\
dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, \
((output_dir, saved_location, save_to_path, bug, curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == "STOP":
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
#LOG.debug(task)
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
# subprocess.call(['curl', '-o', path , bug.get_src()])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e :
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
def build_nodes(jsonData):
"""
This function takes a JSON encoded output of the firefox addon and builds a
call graph for the javascript/HTML redirections
@rtype nodes: dict
@return: A graph of redirection chains
"""
nodes = {}
def _process_cookiestr(cookieStr):
"""
parses a dictionary of req/resp calls to extract the cookie information
returns a list of cookies set on this domain
"""
cookie_list = []
# parses cookie str if a cookie has been set
for cookie in cookieStr.split('\n'):
c = {}
for cook in cookie.split(';'):
token = cook.split('=', 1)
if len(token) < 2:
# usually this is just a flag e.g HTTPOnly, HTTPSOnly
continue
c[token[0]] = token[1]
cookie_list.append(c)
return cookie_list
def _check_node(d):
try:
domain_node = nodes[d]
except KeyError:
isBug, bug_name, bug_type = ADREGEX.search(domain)
domain_node = WebNode(domain, isBug, bug_name, bug_type)
nodes[d] = domain_node
return domain_node
#jsonData contains all the domains and all the req/resp pairs made to them
#iterating over the domains first
for domain, dval in jsonData.items():
# but first check if a node for this domain has been created or not
domain_node = _check_node(domain)
cookie_list = []
# iterating thru all the req/resp pairs on a domain
for info in dval:
domainPath = info['domainPath']
referrerPath = info['referrerPath']
referrer = info['referrer']
cookieBool = info['cookie']
parsed_cookie = None
if cookieBool:
cookieStr = info['cookiestr']
parsed_cookie = _process_cookiestr(cookieStr)
cookie_list.append(parsed_cookie)
domain_node.add_reqresp({'domainPath' : domainPath,
'referrer' : referrer,
'referrerPath' : referrerPath,
'cookieList' : parsed_cookie
})
# making sure that we also create the node for the referrer
referrer_node = _check_node(referrer)
referrer_node.add_child(domain_node)
domain_node.add_parent(referrer_node)
domain_node.set_cookies(cookie_list)
return nodes
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {} # keep track of worker processes
input_queue = Queue() # asynchronously feed workers task to do
worker_output_queue = Queue() # output queue from workers
ack_queue = Queue()
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
# Directory is created, Okay to pass
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,\
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
# uses a pool nodesurl' workers
# curl_worker_pool = Pool(processes=8)
# manager = Manager()
# curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers):
# send stop signal
input_queue.put(("STOP",))
stopped = 0
while stopped < len(workers_dict):
ack = ack_queue.get()
p = workers_dict[ack]
p.join(timeout=1)
if p.is_alive():
p.terminate()
LOG.debug('terminating process %d' % ack)
stopped += 1
while not worker_output_queue.empty():
# receive results from the worker
cbug = worker_output_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
return
| output_dir = args[0]
saved_file_name = args[1]
path = args[2]
bug = args[3]
curl_result_queue = args[4]
# subprocess.call(['curl', '-o', path , bug.get_src() ])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
# Use the unix tool 'file' to check filetype
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
curl_result_queue.put(bug) | identifier_body |
ad_grabber_util.py | from time import sleep
from uuid import uuid1
from pprint import pprint
from shutil import copy2
from multiprocessing import Process, Queue, Pool, Manager
from ad_grabber_classes import *
from adregex import *
from pygraph.classes.digraph import digraph
import os
import json
import jsonpickle
import subprocess
import cPickle
import logging
LOG = logging.getLogger("logAdGrabber")
ADREGEX = AdRegEx()
def check_duplicate(fp1, fp2):
"""takes two files, does a diff on them, returns True if same"""
try:
subprocess.check_output(['diff', fp1, fp2])
return True
except subprocess.CalledProcessError:
return False
def identify_uniq_ads(session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
# bin by dimensions
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
#LOG.debug('did not manage to curl the scripts for bug:%s' % bug)
error_bugs.append(bug)
continue
if bug.is_ad(): # give zerofucks to non-ads
height = '999'
width = '999'
if bug_filetype == 'swf':
# choose from the swf media bin
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump', '-X',
bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump', '-Y',
bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError :
LOG.exception("swfdump error on file %s" % bug_filepath)
else:
# choose from the img media bin
|
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
# check all the images in the bin with the dimensions
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.get_filepath()):
dup = m
break
if dup:
# check if the duplicate ad came from a different test site
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else :
ads[dup] = {test_site : bug_count}
# delete old bug reference, add new one and point to duplicated
# bug
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site : bug_count}
# tally up the results
except KeyError: # The bin hasn't been created
target_bin[dimension] = [bug]
ads[bug] = {test_site : bug_count}
# else:
# notads
return ads,error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try :
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
# Relative location = Location of the ad within this current session
# Global location, added when an ad is matched with existing ads in DB
fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(new_uuidname,
name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
# write to a file in runinfo_dir to tell automation script this run is done
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
def write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites,
test_sites, num_of_refresh, export_folder):
train_category = training_sites.keys()[0]
train_sites_to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
# to be read and inserted into db
totalads = 0 # total number of ads seen during this session
totaluniqads = len(ads) # does not support multicategories at this point
# for each category, for each test site, count total number of ads seen
totalad_category = {}
# for each category, for each test site, count total number of uniq ads seen
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:
bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\
Site-Context, BugCount, BugSrc\n')
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0 # for each site
uniq_ads = [] # for each site
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(uuid, test_site,
refresh_num, train_category, 'N/A', bugcount, bug.get_src()))
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads # global count for total ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:
# write some metadata information about this session
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(train_category,
test_site, num_of_visit, totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results,out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1 # assign unique number to ads for export to mturk
#short_listed_companies = ['google adsense', 'doubleclick']
with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:
# write the titles
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.format(\
'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\
'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))
# make sure we only add one ad
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
#TODO check bug_type in ffext
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\
\n'.format(file_name, bug.get_name(), bug.get_filetype(),
'' ,test_site, refresh_num, train_category, 'N/A', bugcount,
bug.get_src()))
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or \
file_type.startswith('ASCII') or \
file_type.startswith('UTF-8 Unicode English') or \
file_type.startswith('very short') :
bug_type = 'text'
elif (file_type.endswith('1 x 1') and file_type.startswith('GIF')):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
def curl_worker_legacy(args):
output_dir = args[0]
saved_file_name = args[1]
path = args[2]
bug = args[3]
curl_result_queue = args[4]
# subprocess.call(['curl', '-o', path , bug.get_src() ])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
# Use the unix tool 'file' to check filetype
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
curl_result_queue.put(bug)
def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
# uses a pool of 'curl' workers
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\
dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, \
((output_dir, saved_location, save_to_path, bug, curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == "STOP":
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
#LOG.debug(task)
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
# subprocess.call(['curl', '-o', path , bug.get_src()])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e :
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
def build_nodes(jsonData):
"""
This function takes a JSON encoded output of the firefox addon and builds a
call graph for the javascript/HTML redirections
@rtype nodes: dict
@return: A graph of redirection chains
"""
nodes = {}
def _process_cookiestr(cookieStr):
"""
parses a dictionary of req/resp calls to extract the cookie information
returns a list of cookies set on this domain
"""
cookie_list = []
# parses cookie str if a cookie has been set
for cookie in cookieStr.split('\n'):
c = {}
for cook in cookie.split(';'):
token = cook.split('=', 1)
if len(token) < 2:
# usually this is just a flag e.g HTTPOnly, HTTPSOnly
continue
c[token[0]] = token[1]
cookie_list.append(c)
return cookie_list
def _check_node(d):
try:
domain_node = nodes[d]
except KeyError:
isBug, bug_name, bug_type = ADREGEX.search(domain)
domain_node = WebNode(domain, isBug, bug_name, bug_type)
nodes[d] = domain_node
return domain_node
#jsonData contains all the domains and all the req/resp pairs made to them
#iterating over the domains first
for domain, dval in jsonData.items():
# but first check if a node for this domain has been created or not
domain_node = _check_node(domain)
cookie_list = []
# iterating thru all the req/resp pairs on a domain
for info in dval:
domainPath = info['domainPath']
referrerPath = info['referrerPath']
referrer = info['referrer']
cookieBool = info['cookie']
parsed_cookie = None
if cookieBool:
cookieStr = info['cookiestr']
parsed_cookie = _process_cookiestr(cookieStr)
cookie_list.append(parsed_cookie)
domain_node.add_reqresp({'domainPath' : domainPath,
'referrer' : referrer,
'referrerPath' : referrerPath,
'cookieList' : parsed_cookie
})
# making sure that we also create the node for the referrer
referrer_node = _check_node(referrer)
referrer_node.add_child(domain_node)
domain_node.add_parent(referrer_node)
domain_node.set_cookies(cookie_list)
return nodes
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {} # keep track of worker processes
input_queue = Queue() # asynchronously feed workers task to do
worker_output_queue = Queue() # output queue from workers
ack_queue = Queue()
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
# Directory is created, Okay to pass
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,\
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
# uses a pool nodesurl' workers
# curl_worker_pool = Pool(processes=8)
# manager = Manager()
# curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers):
# send stop signal
input_queue.put(("STOP",))
stopped = 0
while stopped < len(workers_dict):
ack = ack_queue.get()
p = workers_dict[ack]
p.join(timeout=1)
if p.is_alive():
p.terminate()
LOG.debug('terminating process %d' % ack)
stopped += 1
while not worker_output_queue.empty():
# receive results from the worker
cbug = worker_output_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
return
| target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output(['identify', '-format', '"%h"',\
bug_filepath]).strip()
width = subprocess.check_output(['identify', '-format','"%w"',\
bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception("identify error on file %s" % bug_filepath) | conditional_block |
ad_grabber_util.py | from time import sleep
from uuid import uuid1
from pprint import pprint
from shutil import copy2
from multiprocessing import Process, Queue, Pool, Manager
from ad_grabber_classes import *
from adregex import *
from pygraph.classes.digraph import digraph
import os
import json
import jsonpickle
import subprocess
import cPickle
import logging
LOG = logging.getLogger("logAdGrabber")
ADREGEX = AdRegEx()
def check_duplicate(fp1, fp2):
"""takes two files, does a diff on them, returns True if same"""
try:
subprocess.check_output(['diff', fp1, fp2])
return True
except subprocess.CalledProcessError:
return False
def | (session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
# bin by dimensions
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
#LOG.debug('did not manage to curl the scripts for bug:%s' % bug)
error_bugs.append(bug)
continue
if bug.is_ad(): # give zerofucks to non-ads
height = '999'
width = '999'
if bug_filetype == 'swf':
# choose from the swf media bin
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump', '-X',
bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump', '-Y',
bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError :
LOG.exception("swfdump error on file %s" % bug_filepath)
else:
# choose from the img media bin
target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output(['identify', '-format', '"%h"',\
bug_filepath]).strip()
width = subprocess.check_output(['identify', '-format','"%w"',\
bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception("identify error on file %s" % bug_filepath)
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
# check all the images in the bin with the dimensions
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.get_filepath()):
dup = m
break
if dup:
# check if the duplicate ad came from a different test site
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else :
ads[dup] = {test_site : bug_count}
# delete old bug reference, add new one and point to duplicated
# bug
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site : bug_count}
# tally up the results
except KeyError: # The bin hasn't been created
target_bin[dimension] = [bug]
ads[bug] = {test_site : bug_count}
# else:
# notads
return ads,error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try :
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
# Relative location = Location of the ad within this current session
# Global location, added when an ad is matched with existing ads in DB
fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(new_uuidname,
name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
# write to a file in runinfo_dir to tell automation script this run is done
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
def write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites,
test_sites, num_of_refresh, export_folder):
train_category = training_sites.keys()[0]
train_sites_to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
# to be read and inserted into db
totalads = 0 # total number of ads seen during this session
totaluniqads = len(ads) # does not support multicategories at this point
# for each category, for each test site, count total number of ads seen
totalad_category = {}
# for each category, for each test site, count total number of uniq ads seen
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:
bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\
Site-Context, BugCount, BugSrc\n')
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0 # for each site
uniq_ads = [] # for each site
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(uuid, test_site,
refresh_num, train_category, 'N/A', bugcount, bug.get_src()))
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads # global count for total ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:
# write some metadata information about this session
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(train_category,
test_site, num_of_visit, totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results,out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1 # assign unique number to ads for export to mturk
#short_listed_companies = ['google adsense', 'doubleclick']
with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:
# write the titles
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.format(\
'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\
'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))
# make sure we only add one ad
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
#TODO check bug_type in ffext
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\
\n'.format(file_name, bug.get_name(), bug.get_filetype(),
'' ,test_site, refresh_num, train_category, 'N/A', bugcount,
bug.get_src()))
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or \
file_type.startswith('ASCII') or \
file_type.startswith('UTF-8 Unicode English') or \
file_type.startswith('very short') :
bug_type = 'text'
elif (file_type.endswith('1 x 1') and file_type.startswith('GIF')):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
def curl_worker_legacy(args):
output_dir = args[0]
saved_file_name = args[1]
path = args[2]
bug = args[3]
curl_result_queue = args[4]
# subprocess.call(['curl', '-o', path , bug.get_src() ])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
# Use the unix tool 'file' to check filetype
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
curl_result_queue.put(bug)
def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
# uses a pool of 'curl' workers
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\
dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, \
((output_dir, saved_location, save_to_path, bug, curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == "STOP":
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
#LOG.debug(task)
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
# subprocess.call(['curl', '-o', path , bug.get_src()])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e :
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
def build_nodes(jsonData):
"""
This function takes a JSON encoded output of the firefox addon and builds a
call graph for the javascript/HTML redirections
@rtype nodes: dict
@return: A graph of redirection chains
"""
nodes = {}
def _process_cookiestr(cookieStr):
"""
parses a dictionary of req/resp calls to extract the cookie information
returns a list of cookies set on this domain
"""
cookie_list = []
# parses cookie str if a cookie has been set
for cookie in cookieStr.split('\n'):
c = {}
for cook in cookie.split(';'):
token = cook.split('=', 1)
if len(token) < 2:
# usually this is just a flag e.g HTTPOnly, HTTPSOnly
continue
c[token[0]] = token[1]
cookie_list.append(c)
return cookie_list
def _check_node(d):
try:
domain_node = nodes[d]
except KeyError:
isBug, bug_name, bug_type = ADREGEX.search(domain)
domain_node = WebNode(domain, isBug, bug_name, bug_type)
nodes[d] = domain_node
return domain_node
#jsonData contains all the domains and all the req/resp pairs made to them
#iterating over the domains first
for domain, dval in jsonData.items():
# but first check if a node for this domain has been created or not
domain_node = _check_node(domain)
cookie_list = []
# iterating thru all the req/resp pairs on a domain
for info in dval:
domainPath = info['domainPath']
referrerPath = info['referrerPath']
referrer = info['referrer']
cookieBool = info['cookie']
parsed_cookie = None
if cookieBool:
cookieStr = info['cookiestr']
parsed_cookie = _process_cookiestr(cookieStr)
cookie_list.append(parsed_cookie)
domain_node.add_reqresp({'domainPath' : domainPath,
'referrer' : referrer,
'referrerPath' : referrerPath,
'cookieList' : parsed_cookie
})
# making sure that we also create the node for the referrer
referrer_node = _check_node(referrer)
referrer_node.add_child(domain_node)
domain_node.add_parent(referrer_node)
domain_node.set_cookies(cookie_list)
return nodes
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {} # keep track of worker processes
input_queue = Queue() # asynchronously feed workers task to do
worker_output_queue = Queue() # output queue from workers
ack_queue = Queue()
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
# Directory is created, Okay to pass
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,\
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
# uses a pool nodesurl' workers
# curl_worker_pool = Pool(processes=8)
# manager = Manager()
# curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers):
# send stop signal
input_queue.put(("STOP",))
stopped = 0
while stopped < len(workers_dict):
ack = ack_queue.get()
p = workers_dict[ack]
p.join(timeout=1)
if p.is_alive():
p.terminate()
LOG.debug('terminating process %d' % ack)
stopped += 1
while not worker_output_queue.empty():
# receive results from the worker
cbug = worker_output_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
return
| identify_uniq_ads | identifier_name |
ad_grabber_util.py | from time import sleep
from uuid import uuid1
from pprint import pprint
from shutil import copy2
from multiprocessing import Process, Queue, Pool, Manager
from ad_grabber_classes import *
from adregex import *
from pygraph.classes.digraph import digraph
import os
import json
import jsonpickle
import subprocess
import cPickle
import logging
LOG = logging.getLogger("logAdGrabber")
ADREGEX = AdRegEx()
def check_duplicate(fp1, fp2):
"""takes two files, does a diff on them, returns True if same"""
try:
subprocess.check_output(['diff', fp1, fp2])
return True
except subprocess.CalledProcessError:
return False
def identify_uniq_ads(session_results):
"""
i) Identify duplicate ads
ii) bin the ads by their dimensions
iii) Keep track of the test sites and have many times they have displayed this
ad
"""
# bin by dimensions
ads = {}
notads = {}
swf_bin = {}
img_bin = {}
error_bugs = []
for train_category, cat_dict in session_results.items():
for test_site, bug_dict_list in cat_dict.items():
for index_count in range(len(bug_dict_list)):
bug_dict = bug_dict_list[index_count]
for bug, bug_count in bug_dict.items():
bug_filetype = bug.get_filetype()
bug_filepath = bug.get_filepath()
if bug_filepath == '':
#LOG.debug('did not manage to curl the scripts for bug:%s' % bug)
error_bugs.append(bug)
continue
if bug.is_ad(): # give zerofucks to non-ads
height = '999'
width = '999'
if bug_filetype == 'swf':
# choose from the swf media bin
target_bin = swf_bin
try:
width = subprocess.check_output(['swfdump', '-X',
bug_filepath]).split(' ')[-1].strip()
height = subprocess.check_output(['swfdump', '-Y',
bug_filepath]).split(' ')[-1].strip()
except subprocess.CalledProcessError :
LOG.exception("swfdump error on file %s" % bug_filepath)
else:
# choose from the img media bin
target_bin = img_bin
LOG.debug(bug_filepath)
try:
height = subprocess.check_output(['identify', '-format', '"%h"',\
bug_filepath]).strip()
width = subprocess.check_output(['identify', '-format','"%w"',\
bug_filepath]).strip()
except subprocess.CalledProcessError:
LOG.exception("identify error on file %s" % bug_filepath)
try:
bug.set_dimension(height, width)
dimension = '%s-%s' % (height, width)
# check all the images in the bin with the dimensions
m_list = target_bin[dimension]
dup = None
for m in m_list:
if check_duplicate(bug_filepath, m.get_filepath()):
dup = m
break
if dup:
# check if the duplicate ad came from a different test site
if test_site in ads[dup]:
ads[dup][test_site] += bug_count
else :
ads[dup] = {test_site : bug_count}
# delete old bug reference, add new one and point to duplicated
# bug
del bug_dict[bug]
bug_dict[dup] = bug_count
else:
target_bin[dimension].append(bug)
ads[bug] = {test_site : bug_count}
# tally up the results
except KeyError: # The bin hasn't been created
target_bin[dimension] = [bug]
ads[bug] = {test_site : bug_count}
# else:
# notads
return ads,error_bugs
def export_uniq_ads(ads, out_folder, rel_folder):
"""
Takes all the uniq ads seen in this session and writes its metadata
information to a csv file
"""
try :
os.makedirs(out_folder)
os.makedirs(os.path.join(out_folder, rel_folder))
except OSError:
LOG.debug('Creating output folder')
fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')
# Relative location = Location of the ad within this current session
# Global location, added when an ad is matched with existing ads in DB
fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\n')
for bug in ads.keys():
height, width = bug.get_dimension()
filepath = bug.get_filepath()
name = bug.get_name()
src = bug.get_src()
filetype = bug.get_filetype()
new_uuidname = '%s.%s' % (uuid1(), filetype)
bug.set_uuid(new_uuidname)
new_filepath = os.path.join(out_folder, new_uuidname)
rel_filepath = os.path.join(rel_folder, new_uuidname)
copy2(filepath, new_filepath)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(new_uuidname,
name, filetype, height, width, rel_filepath, src))
fwtr.close()
return ads
def write_run_info(RUNINFO_DIR, session_date):
# write to a file in runinfo_dir to tell automation script this run is done
fp = os.path.join(RUNINFO_DIR, '%s.info' % session_date)
with open(fp, 'w') as fwtr:
fwtr.write('OK')
def write_session_info(vmid, machineid, profile, session_date, train_mode, training_sites,
test_sites, num_of_refresh, export_folder):
train_category = training_sites.keys()[0]
train_sites_to_visit = training_sites[train_category]
with open(os.path.join(export_folder, 'session_info.csv'), 'w') as fwtr:
fwtr.write('session_str : %s\n' % session_date)
fwtr.write('machine_info : %s\n' % machineid)
fwtr.write('vmid : %s\n' % vmid)
fwtr.write('profile : %s\n' % profile)
fwtr.write('train_mode : %s\n' % train_mode)
fwtr.write('num_of_refresh : %d\n' % num_of_refresh)
fwtr.write('training_topic : %s\n' % train_category)
fwtr.write('training_sites : ')
for site in train_sites_to_visit:
fwtr.write('%s, ' % site)
fwtr.write('\nnum_of_train_sites : %d\n' % len(train_sites_to_visit))
fwtr.write('test_sites : ')
for site in test_sites:
fwtr.write('%s, ' % site[1])
fwtr.write('\nnum_of_test_sites : %d\n' % len(test_sites))
def generate_stats(results, ads, vmid, session_date, export_folder, process_ex_time):
"""
Generates stats on
- uniq ads seen on the test sites
- total number of ads seen on the test sites
- total number of ads seen on all test sites
- total number of uniq ads seen on all test sites
"""
try:
os.makedirs(export_folder)
except OSError:
pass
# to be read and inserted into db
totalads = 0 # total number of ads seen during this session
totaluniqads = len(ads) # does not support multicategories at this point
# for each category, for each test site, count total number of ads seen
totalad_category = {}
# for each category, for each test site, count total number of uniq ads seen
uniqad_category = {}
with open(os.path.join(export_folder, 'session_bugs.csv'), 'w') as bugs_wtr:
bugs_wtr.write('#Ad-UID, Website-URL, Refresh-Num, Training-Topic,\
Site-Context, BugCount, BugSrc\n')
for train_category, cat_dict in results.items():
totalad_category[train_category] = {}
uniqad_category[train_category] = {}
for test_site, bug_dict_list in cat_dict.items():
total_ads = 0 # for each site
uniq_ads = [] # for each site
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if bug.is_ad():
uuid = bug.get_uuid()
bugs_wtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(uuid, test_site,
refresh_num, train_category, 'N/A', bugcount, bug.get_src()))
total_ads += bugcount
if bug not in uniq_ads:
uniq_ads.append(bug)
totalad_category[train_category][test_site] = total_ads
uniqad_category[train_category][test_site] = len(uniq_ads)
totalads += total_ads # global count for total ads
with open(os.path.join(export_folder, 'session_stats.csv'), 'w') as ses_wtr:
# write some metadata information about this session
ses_wtr.write('#VMID: %s\n' % vmid)
ses_wtr.write('#Session-Date: %s\n' % session_date)
ses_wtr.write('#Time to complete: %s\n' % process_ex_time)
ses_wtr.write('#Training Categories: %s\n' % str(results.keys()))
ses_wtr.write('#Total Number of ads: %d\n' % totalads)
ses_wtr.write('#Total Uniq ads: %d\n\n' % totaluniqads)
ses_wtr.write('#TrainingTopic, Test-Site, NumberOfVisit, TotalAds, UniqAds\n')
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
num_of_visit = len(bug_dict_list)
ses_wtr.write('{0}, {1}, {2}, {3}, {4}\n'.format(train_category,
test_site, num_of_visit, totalad_category[train_category][test_site],
uniqad_category[train_category][test_site]))
def export_ads(results,out_folder):
"""
This function creates a csv file which contains all the unique ads seen in
each test site (including all the refreshes)
TODO update the doc
results is a dictionary of the following
results = { Category : Value, ... }
value = { test_site_url : [ result1, result2, ... resultN], ... }
resultN : { WebBug : count, ... }
"""
try:
os.makedirs(out_folder)
except OSError:
LOG.debug('Creating output file folder ...')
export_ad_counter = 1 # assign unique number to ads for export to mturk
#short_listed_companies = ['google adsense', 'doubleclick']
with open(os.path.join(out_folder,'ad_labelling.csv'), 'w') as fwtr:
# write the titles
fwtr.write('#{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\n'.format(\
'Ad#', 'Company', 'FileType', 'Ad-Category', 'Website-URL',\
'Refresh-Num','Training-Topic', 'Context-of-site', 'Total', 'Ad-src'))
# make sure we only add one ad
for train_category, cat_dict in results.items():
for test_site, bug_dict_list in cat_dict.items():
for refresh_num in range(len(bug_dict_list)):
bug_dict = bug_dict_list[refresh_num]
for bug, bugcount in bug_dict.items():
if not bug.is_ad():
#TODO check bug_type in ffext
continue
if bug.get_filetype() in ['swf', 'png', 'gif', 'jpg']:
file_name = '%d.%s' % (export_ad_counter, bug.get_filetype())
new_location = os.path.join(out_folder, file_name)
copy2(bug.get_filepath(), new_location)
fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7} , {8}, {9},\
\n'.format(file_name, bug.get_name(), bug.get_filetype(),
'' ,test_site, refresh_num, train_category, 'N/A', bugcount,
bug.get_src()))
export_ad_counter += 1
def get_bug_type(file_type):
is_ad = False
bug_type = 'text'
if file_type.startswith('HTML') or \
file_type.startswith('ASCII') or \
file_type.startswith('UTF-8 Unicode English') or \
file_type.startswith('very short') :
bug_type = 'text'
elif (file_type.endswith('1 x 1') and file_type.startswith('GIF')):
bug_type = 'gif'
elif file_type.startswith('PNG'):
bug_type = 'png'
is_ad = True
elif file_type.startswith('GIF'):
bug_type = 'gif'
is_ad = True
elif file_type.startswith('Macromedia Flash'):
bug_type = 'swf'
is_ad = True
elif file_type.startswith('JPEG'):
bug_type = 'jpg'
is_ad = True
return bug_type, is_ad
def parse_buginfo(entry):
"""
Takes the json decoded bug information and inserts it into a WebBug instance
"""
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
def curl_worker_legacy(args):
output_dir = args[0]
saved_file_name = args[1]
path = args[2]
bug = args[3]
curl_result_queue = args[4]
# subprocess.call(['curl', '-o', path , bug.get_src() ])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
# Use the unix tool 'file' to check filetype
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
curl_result_queue.put(bug)
def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
pass
# uses a pool of 'curl' workers
curl_worker_pool = Pool(processes=num_of_workers)
manager = Manager()
curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('Timing out on get from queue...')
break
for entry in found_bugs:
bugname = entry['bug']['name'].replace(' ','').replace('/','_')
bugsrc = entry['ent']['policyContentLocation']
bugpattern = entry['bug']['pattern']
try :
bugaffiliation = entry['bug']['affiliation']
except KeyError:
bugaffiliation = ""
bugtype = entry['bug']['type']
bugpathname = entry['ent']['pathname']
bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,
bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\
dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
obj = curl_worker_pool.apply_async(curl_worker_legacy, \
((output_dir, saved_location, save_to_path, bug, curl_result_queue),))
try:
sleep(0.5)
curl_worker_pool.join()
curl_worker_pool.close()
curl_worker_pool.terminate()
except Exception:
LOG.debug('Closing pool')
while not curl_result_queue.empty():
cbug = curl_result_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
def curl_worker(output_dir, input_queue, worker_output_queue, worker_id,\
ack_queue):
while True:
try:
task = input_queue.get()
if len(task) == 1 and task[0] == "STOP":
LOG.debug('curl_worker %d received stop' % worker_id)
break
except Exception:
LOG.error('Error:')
#LOG.debug(task)
saved_file_name = task[0]
path = task[1]
bug = task[2]
try:
# subprocess.call(['curl', '-o', path , bug.get_src()])
subprocess.call(['wget', '-t', '1', '-q', '-T', '3', '-O', path , bug.get_src()])
subpr_out = subprocess.check_output(['file', '-b', path]).strip()
except Exception as e :
LOG.debug('Exception captured %s\n\n' % e)
filetype, is_ad = get_bug_type(subpr_out)
if is_ad:
new_path = os.path.join(output_dir, '%s.%s' % (saved_file_name, filetype))
else:
new_path = os.path.join(output_dir, 'notad', '%s.%s' % (saved_file_name,\
filetype))
os.rename(path, new_path)
bug.set_is_ad(is_ad)
bug.set_filetype(filetype)
bug.set_filepath(new_path)
worker_output_queue.put(bug)
ack_queue.put(worker_id)
return
def build_nodes(jsonData):
"""
This function takes a JSON encoded output of the firefox addon and builds a
call graph for the javascript/HTML redirections
@rtype nodes: dict
@return: A graph of redirection chains
"""
nodes = {}
def _process_cookiestr(cookieStr):
"""
parses a dictionary of req/resp calls to extract the cookie information
returns a list of cookies set on this domain
"""
cookie_list = []
# parses cookie str if a cookie has been set
for cookie in cookieStr.split('\n'):
c = {}
for cook in cookie.split(';'):
token = cook.split('=', 1)
if len(token) < 2:
# usually this is just a flag e.g HTTPOnly, HTTPSOnly
continue
c[token[0]] = token[1]
cookie_list.append(c)
return cookie_list
def _check_node(d):
try:
domain_node = nodes[d]
except KeyError:
isBug, bug_name, bug_type = ADREGEX.search(domain)
domain_node = WebNode(domain, isBug, bug_name, bug_type)
nodes[d] = domain_node
return domain_node
#jsonData contains all the domains and all the req/resp pairs made to them
#iterating over the domains first
for domain, dval in jsonData.items():
# but first check if a node for this domain has been created or not
domain_node = _check_node(domain)
cookie_list = []
# iterating thru all the req/resp pairs on a domain
for info in dval:
domainPath = info['domainPath']
referrerPath = info['referrerPath']
referrer = info['referrer']
cookieBool = info['cookie']
parsed_cookie = None
if cookieBool:
cookieStr = info['cookiestr']
parsed_cookie = _process_cookiestr(cookieStr)
cookie_list.append(parsed_cookie)
domain_node.add_reqresp({'domainPath' : domainPath,
'referrer' : referrer,
'referrerPath' : referrerPath,
'cookieList' : parsed_cookie
})
# making sure that we also create the node for the referrer
referrer_node = _check_node(referrer)
referrer_node.add_child(domain_node)
domain_node.add_parent(referrer_node)
domain_node.set_cookies(cookie_list)
return nodes
def filter_results(extQueue, timeout_value, url):
"""
This function takes the JSON output of the firefox addon, and matches the
request URL against a list of known tracker/ads regexes.
Returns data structure containing request/resp info
Returns None if did not receive results from FF addon
"""
from Queue import Empty
try:
LOG.debug('Timeout value in filter_result :%d' % timeout_value)
nodes = extQueue.get(True, timeout=timeout_value)
except Empty as e:
LOG.info('Did not receive any results from FF plugin for %s' % url)
nodes = None
finally:
while not extQueue.empty():
extQueue.get()
return nodes
def process_results(refresh_count, output_dir, ext_queue, result_queue,
num_of_workers=8):
"""
This function goes through all the bugs identified by the firefox plugin and
aggregates each bug's occurence in a given page. The aggregation is necessary
for duplicate ads on the same page
"""
workers_dict = {} # keep track of worker processes
input_queue = Queue() # asynchronously feed workers task to do
worker_output_queue = Queue() # output queue from workers
ack_queue = Queue()
bug_dict = {} # dict to keep track of how many duplicates of each bug, if
# exists
try:
# separate the non-ads from the ads for ease of handchecking
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, 'notad'))
except OSError:
# Directory is created, Okay to pass
pass
for i in range(num_of_workers):
p = Process(target=curl_worker, args=(output_dir, input_queue,\
worker_output_queue, i, ack_queue))
p.start()
workers_dict[i] = p
# uses a pool nodesurl' workers
# curl_worker_pool = Pool(processes=8)
# manager = Manager()
# curl_result_queue = manager.Queue()
dl_counter = 0 # keep track of how many bugs downloaded
while True:
try:
found_bugs = json.loads(ext_queue.get(block=True, timeout=2))
except Exception:
LOG.debug('No more bugs found, break out of queue')
break
for entry in found_bugs:
bug = parse_buginfo(entry)
try:
# matched an entry in the bugdict, incr count and continue
bug_dict[bug] += 1
continue
except KeyError:
bug_dict[bug] = 1
try:
saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)
dl_counter += 1
save_to_path = os.path.join( output_dir, '%s' % saved_location)
input_queue.put((saved_location, save_to_path, bug))
except Exception as e:
LOG.exception('%s' % e)
for i in range(num_of_workers): |
stopped = 0
while stopped < len(workers_dict):
ack = ack_queue.get()
p = workers_dict[ack]
p.join(timeout=1)
if p.is_alive():
p.terminate()
LOG.debug('terminating process %d' % ack)
stopped += 1
while not worker_output_queue.empty():
# receive results from the worker
cbug = worker_output_queue.get()
# ugly code here
bugcount = bug_dict[cbug]
del bug_dict[cbug]
bug_dict[cbug] = bugcount
with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:
cPickle.dump(bug_dict, fwtr)
result_queue.put(bug_dict)
return | # send stop signal
input_queue.put(("STOP",)) | random_line_split |
model.go | package smallben
import (
"bytes"
"encoding/base64"
"encoding/gob"
"encoding/json"
"errors"
"github.com/robfig/cron/v3"
"time"
)
// Job is the struct used to interact with SmallBen.
type Job struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64
// cronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
cronID int64
// CronExpression specifies the scheduling of the job.
CronExpression string
// paused specifies whether this job has been paused.
// Only used when returning this struct.
paused bool
// createdAt specifies when this rawJob has been created.
createdAt time.Time
// updatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
updatedAt time.Time
// Job is the real unit of work to be executed
Job CronJob
// JobInput is the additional input to pass to the inner Job.
JobInput map[string]interface{}
}
// CreatedAt returns the time when this Job has been added to the scheduler.
func (j *Job) CreatedAt() time.Time {
return j.createdAt
}
// UpdatedAt returns the last time this Job has been updated, i.e.,
// paused, resumed, schedule changed.
func (j *Job) UpdatedAt() time.Time {
return j.updatedAt
}
// Paused returns whether this Job is currently paused
// or not.
func (j *Job) Paused() bool {
return j.paused
}
// toJobWithSchedule converts Job to a JobWithSchedule object.
// It returns an error in case the parsing of the cron expression fails.
func (j *Job) toJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: 0,
CronExpression: j.CronExpression,
Paused: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
schedule: schedule,
run: j.Job,
runInput: CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
OtherInputs: j.JobInput,
CronExpression: j.CronExpression,
},
}
return result, nil
}
// RawJob models a raw rawJob coming from the database.
type RawJob struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64 `gorm:"primaryKey,column:id"`
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64 `gorm:"column:group_id"`
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64 `gorm:"column:super_group_id"`
// CronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
CronID int64 `gorm:"column:cron_id"`
// CronExpression specifies the scheduling of the job.
CronExpression string `gorm:"column:cron_expression"`
// Paused specifies whether this rawJob has been paused.
Paused bool `gorm:"column:paused"`
// CreatedAt specifies when this rawJob has been created.
CreatedAt time.Time `gorm:"column:created_at"`
// UpdatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
UpdatedAt time.Time `gorm:"column:updated_at"`
// SerializedJob is the base64(gob-encoded byte array)
// of the interface executing this rawJob
SerializedJob string `gorm:"column:serialized_job"`
// SerializedJobInput is the base64(gob-encoded byte array)
// of the map containing the argument for the job.
SerializedJobInput string `gorm:"column:serialized_job_input"`
}
func (j *RawJob) TableName() string {
return "jobs"
}
// JobWithSchedule is a RawJob object
// with a cron.Schedule object in it.
// The schedule can be accessed by using the Schedule() method.
// This object should be created only by calling the method
// toJobWithSchedule().
type JobWithSchedule struct {
rawJob RawJob
schedule cron.Schedule
run CronJob
runInput CronJobInput
}
// decodeSerializedFields decode j.serializedJob and j.SerializedJobInput.
func (j *RawJob) decodeSerializedFields() (CronJob, CronJobInput, error) {
var decoder *gob.Decoder
var err error
// decode from base64 the serialized job
decodedJob, err := base64.StdEncoding.DecodeString(j.SerializedJob)
if err != nil {
return nil, CronJobInput{}, err
}
// decode the interface executing the rawJob
decoder = gob.NewDecoder(bytes.NewBuffer(decodedJob))
var runJob CronJob
if err = decoder.Decode(&runJob); err != nil {
return nil, CronJobInput{}, err
}
// decode the input from json
var jobInputMap map[string]interface{}
if err := json.Unmarshal([]byte(j.SerializedJobInput), &jobInputMap); err != nil {
return nil, CronJobInput{}, err
}
// and build the overall object containing all the
// inputs will be passed to the Job
runJobInput := CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronExpression: j.CronExpression,
OtherInputs: jobInputMap,
}
return runJob, runJobInput, nil
}
// toJob converts j to a Job instance.
func (j *RawJob) toJob() (Job, error) {
job, jobInput, err := j.decodeSerializedFields()
if err != nil {
return Job{}, err
}
result := Job{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
cronID: j.CronID,
CronExpression: j.CronExpression,
paused: j.Paused,
createdAt: j.CreatedAt,
updatedAt: j.UpdatedAt,
Job: job,
JobInput: jobInput.OtherInputs,
}
return result, nil
}
// ToJobWithSchedule returns a JobWithSchedule object from the current RawJob,
// by copy. It returns errors in case the given schedule is not valid,
// or in case the conversion of the rawJob interface/input fails.
// It does NOT copy the byte arrays from j.
func (j *RawJob) ToJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
runJob, runJobInput, err := j.decodeSerializedFields()
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: j.CronID,
CronExpression: j.CronExpression,
Paused: j.Paused,
CreatedAt: j.CreatedAt,
UpdatedAt: j.UpdatedAt,
},
schedule: schedule,
run: runJob,
runInput: runJobInput,
}
return result, nil
}
// encodeJob encodes `job`. A separate function is needed because we need to pass
// a POINTER to interface.
func encodeJob(encoder *gob.Encoder, job CronJob) error {
return encoder.Encode(&job)
}
// BuildJob builds the raw version of the inner job, by encoding
// it. In particular, the encoding is done as follows:
// - for the serialized job, it is encoded in Gob and then in base64
// - for the job input, it is encoded in json.
//This is needed since, when converting from a `RawJob` to a `JobWithSchedule`,
// the binary serialization of the Job is not kept in memory.
func (j *JobWithSchedule) BuildJob() (RawJob, error) {
var bufferJob bytes.Buffer
encoderJob := gob.NewEncoder(&bufferJob)
// encode the CronJob interface keeping the unit of work
// to execute. We need to use the encodeJob method
// due to how gob interface encoding works.
if err := encodeJob(encoderJob, j.run); err != nil {
return RawJob{}, err
}
// finally, encode the bytes to base64
j.rawJob.SerializedJob = base64.StdEncoding.EncodeToString(bufferJob.Bytes())
// now, encode the job input
if err := j.encodeJobInput(); err != nil {
return RawJob{}, err
}
return j.rawJob, nil
}
// encodeJobInput encodes j.rawJob.SerializedJobInput.
func (j *JobWithSchedule) encodeJobInput() error {
encodedInput, err := json.Marshal(j.runInput.OtherInputs)
if err != nil {
return err
}
j.rawJob.SerializedJobInput = string(encodedInput)
return nil
}
// getIdsFromJobRawList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobRawList(jobs []RawJob) []int64 {
ids := make([]int64, len(jobs))
for i, test := range jobs {
ids[i] = test.ID
}
return ids
}
// getIdsFromJobsWithScheduleList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobsWithScheduleList(jobs []JobWithSchedule) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.rawJob.ID
}
return ids
}
// getIdsFromJobList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobList(jobs []Job) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.ID
}
return ids
}
// UpdateOption is the struct used to update
// a Job.
//
// An update on a Job consists in changing
// the schedule, by using the field `CronExpression`,
// or the Job input, by using the field `JobOtherInputs`.
//
// If none of those fields are specified, i.e., they are
// both nil, the struct is considered invalid.
type UpdateOption struct {
// JobID is the ID of the Job to update.
JobID int64
// CronExpression is the new schedule of the Job.
// If nil, it is ignored, i.e., the schedule
// is not changed.
CronExpression *string
// JobOtherInputs is the new OtherInputs of the Job.
// If nil, it is ignored, i.e.,
// the Job input is not changed.
JobOtherInputs *map[string]interface{}
}
func (u *UpdateOption) schedule() (cron.Schedule, error) {
return cron.ParseStandard(*u.CronExpression)
}
var (
// ErrUpdateOptionInvalid is returned when the fields
// of UpdateOption are invalid.
// This error is returned when the combination
// of the fields is not valid (i.e., both nil).
// For error in the CronExpression field,
// the specific error set by the library is returned,
ErrUpdateOptionInvalid = errors.New("invalid option")
)
// Valid returns whether the fields in this struct
// are valid. If the struct is valid, no errors
// are returned.
//
// UpdateOption is considered valid if
// at least one field between CronExpression and JobOtherInputs
// are not nil, and the cron string can be parsed.
func (u *UpdateOption) Valid() error {
if u.CronExpression == nil && u.JobOtherInputs == nil {
return ErrUpdateOptionInvalid
}
if u.CronExpression != nil {
if _, err := u.schedule(); err != nil |
}
return nil
}
// getIdsFromUpdateScheduleList basically does schedules.map(rawJob -> rawJob.id)
func getIdsFromUpdateScheduleList(schedules []UpdateOption) []int64 {
ids := make([]int64, len(schedules))
for i, test := range schedules {
ids[i] = test.JobID
}
return ids
}
// CronJobInput is the input passed to the Run function.
type CronJobInput struct {
// JobID is the ID of the current job.
JobID int64
// GroupID is the GroupID of the current job.
GroupID int64
// SuperGroupID is the SuperGroupID of the current job.
SuperGroupID int64
// CronExpression is the interval of execution, as specified on job creation.
CronExpression string
// OtherInputs contains the other inputs of the job.
OtherInputs map[string]interface{}
}
// CronJob is the interface jobs have to implement.
// It contains only one single method, `Run`.
type CronJob interface {
Run(input CronJobInput)
}
| {
return err
} | conditional_block |
model.go | package smallben
import (
"bytes"
"encoding/base64"
"encoding/gob"
"encoding/json"
"errors"
"github.com/robfig/cron/v3"
"time"
)
// Job is the struct used to interact with SmallBen.
type Job struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64
// cronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
cronID int64
// CronExpression specifies the scheduling of the job.
CronExpression string
// paused specifies whether this job has been paused.
// Only used when returning this struct.
paused bool
// createdAt specifies when this rawJob has been created.
createdAt time.Time
// updatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
updatedAt time.Time
// Job is the real unit of work to be executed
Job CronJob
// JobInput is the additional input to pass to the inner Job.
JobInput map[string]interface{}
}
// CreatedAt returns the time when this Job has been added to the scheduler.
func (j *Job) CreatedAt() time.Time {
return j.createdAt
}
// UpdatedAt returns the last time this Job has been updated, i.e.,
// paused, resumed, schedule changed.
func (j *Job) UpdatedAt() time.Time {
return j.updatedAt
}
// Paused returns whether this Job is currently paused
// or not.
func (j *Job) Paused() bool {
return j.paused
}
// toJobWithSchedule converts Job to a JobWithSchedule object.
// It returns an error in case the parsing of the cron expression fails.
func (j *Job) toJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: 0,
CronExpression: j.CronExpression,
Paused: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
schedule: schedule,
run: j.Job,
runInput: CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
OtherInputs: j.JobInput,
CronExpression: j.CronExpression,
},
}
return result, nil
}
// RawJob models a raw rawJob coming from the database.
type RawJob struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64 `gorm:"primaryKey,column:id"`
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64 `gorm:"column:group_id"`
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64 `gorm:"column:super_group_id"`
// CronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
CronID int64 `gorm:"column:cron_id"`
// CronExpression specifies the scheduling of the job.
CronExpression string `gorm:"column:cron_expression"`
// Paused specifies whether this rawJob has been paused.
Paused bool `gorm:"column:paused"`
// CreatedAt specifies when this rawJob has been created.
CreatedAt time.Time `gorm:"column:created_at"`
// UpdatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
UpdatedAt time.Time `gorm:"column:updated_at"`
// SerializedJob is the base64(gob-encoded byte array)
// of the interface executing this rawJob
SerializedJob string `gorm:"column:serialized_job"`
// SerializedJobInput is the base64(gob-encoded byte array)
// of the map containing the argument for the job.
SerializedJobInput string `gorm:"column:serialized_job_input"`
}
func (j *RawJob) TableName() string {
return "jobs"
}
// JobWithSchedule is a RawJob object
// with a cron.Schedule object in it.
// The schedule can be accessed by using the Schedule() method.
// This object should be created only by calling the method
// toJobWithSchedule().
type JobWithSchedule struct {
rawJob RawJob
schedule cron.Schedule
run CronJob
runInput CronJobInput
}
// decodeSerializedFields decode j.serializedJob and j.SerializedJobInput.
func (j *RawJob) decodeSerializedFields() (CronJob, CronJobInput, error) {
var decoder *gob.Decoder
var err error
// decode from base64 the serialized job
decodedJob, err := base64.StdEncoding.DecodeString(j.SerializedJob)
if err != nil {
return nil, CronJobInput{}, err
}
// decode the interface executing the rawJob
decoder = gob.NewDecoder(bytes.NewBuffer(decodedJob))
var runJob CronJob
if err = decoder.Decode(&runJob); err != nil {
return nil, CronJobInput{}, err
}
// decode the input from json
var jobInputMap map[string]interface{}
if err := json.Unmarshal([]byte(j.SerializedJobInput), &jobInputMap); err != nil {
return nil, CronJobInput{}, err
}
// and build the overall object containing all the
// inputs will be passed to the Job
runJobInput := CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronExpression: j.CronExpression,
OtherInputs: jobInputMap,
}
return runJob, runJobInput, nil
}
// toJob converts j to a Job instance.
func (j *RawJob) toJob() (Job, error) {
job, jobInput, err := j.decodeSerializedFields()
if err != nil {
return Job{}, err
}
result := Job{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
cronID: j.CronID,
CronExpression: j.CronExpression,
paused: j.Paused,
createdAt: j.CreatedAt,
updatedAt: j.UpdatedAt,
Job: job,
JobInput: jobInput.OtherInputs,
}
return result, nil
}
// ToJobWithSchedule returns a JobWithSchedule object from the current RawJob,
// by copy. It returns errors in case the given schedule is not valid,
// or in case the conversion of the rawJob interface/input fails.
// It does NOT copy the byte arrays from j.
func (j *RawJob) ToJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
runJob, runJobInput, err := j.decodeSerializedFields()
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: j.CronID,
CronExpression: j.CronExpression,
Paused: j.Paused,
CreatedAt: j.CreatedAt,
UpdatedAt: j.UpdatedAt,
},
schedule: schedule,
run: runJob,
runInput: runJobInput,
}
return result, nil
}
// encodeJob encodes `job`. A separate function is needed because we need to pass
// a POINTER to interface.
func encodeJob(encoder *gob.Encoder, job CronJob) error {
return encoder.Encode(&job)
}
// BuildJob builds the raw version of the inner job, by encoding
// it. In particular, the encoding is done as follows:
// - for the serialized job, it is encoded in Gob and then in base64
// - for the job input, it is encoded in json.
//This is needed since, when converting from a `RawJob` to a `JobWithSchedule`,
// the binary serialization of the Job is not kept in memory.
func (j *JobWithSchedule) BuildJob() (RawJob, error) {
var bufferJob bytes.Buffer
encoderJob := gob.NewEncoder(&bufferJob)
// encode the CronJob interface keeping the unit of work
// to execute. We need to use the encodeJob method
// due to how gob interface encoding works.
if err := encodeJob(encoderJob, j.run); err != nil {
return RawJob{}, err
}
// finally, encode the bytes to base64
j.rawJob.SerializedJob = base64.StdEncoding.EncodeToString(bufferJob.Bytes())
// now, encode the job input
if err := j.encodeJobInput(); err != nil {
return RawJob{}, err
}
return j.rawJob, nil
}
// encodeJobInput encodes j.rawJob.SerializedJobInput.
func (j *JobWithSchedule) encodeJobInput() error {
encodedInput, err := json.Marshal(j.runInput.OtherInputs)
if err != nil {
return err
}
j.rawJob.SerializedJobInput = string(encodedInput)
return nil
}
// getIdsFromJobRawList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobRawList(jobs []RawJob) []int64 {
ids := make([]int64, len(jobs))
for i, test := range jobs {
ids[i] = test.ID
}
return ids
}
// getIdsFromJobsWithScheduleList basically does jobs.map(rawJob -> rawJob.id)
func | (jobs []JobWithSchedule) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.rawJob.ID
}
return ids
}
// getIdsFromJobList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobList(jobs []Job) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.ID
}
return ids
}
// UpdateOption is the struct used to update
// a Job.
//
// An update on a Job consists in changing
// the schedule, by using the field `CronExpression`,
// or the Job input, by using the field `JobOtherInputs`.
//
// If none of those fields are specified, i.e., they are
// both nil, the struct is considered invalid.
type UpdateOption struct {
// JobID is the ID of the Job to update.
JobID int64
// CronExpression is the new schedule of the Job.
// If nil, it is ignored, i.e., the schedule
// is not changed.
CronExpression *string
// JobOtherInputs is the new OtherInputs of the Job.
// If nil, it is ignored, i.e.,
// the Job input is not changed.
JobOtherInputs *map[string]interface{}
}
func (u *UpdateOption) schedule() (cron.Schedule, error) {
return cron.ParseStandard(*u.CronExpression)
}
var (
// ErrUpdateOptionInvalid is returned when the fields
// of UpdateOption are invalid.
// This error is returned when the combination
// of the fields is not valid (i.e., both nil).
// For error in the CronExpression field,
// the specific error set by the library is returned,
ErrUpdateOptionInvalid = errors.New("invalid option")
)
// Valid returns whether the fields in this struct
// are valid. If the struct is valid, no errors
// are returned.
//
// UpdateOption is considered valid if
// at least one field between CronExpression and JobOtherInputs
// are not nil, and the cron string can be parsed.
func (u *UpdateOption) Valid() error {
if u.CronExpression == nil && u.JobOtherInputs == nil {
return ErrUpdateOptionInvalid
}
if u.CronExpression != nil {
if _, err := u.schedule(); err != nil {
return err
}
}
return nil
}
// getIdsFromUpdateScheduleList basically does schedules.map(rawJob -> rawJob.id)
func getIdsFromUpdateScheduleList(schedules []UpdateOption) []int64 {
ids := make([]int64, len(schedules))
for i, test := range schedules {
ids[i] = test.JobID
}
return ids
}
// CronJobInput is the input passed to the Run function.
type CronJobInput struct {
// JobID is the ID of the current job.
JobID int64
// GroupID is the GroupID of the current job.
GroupID int64
// SuperGroupID is the SuperGroupID of the current job.
SuperGroupID int64
// CronExpression is the interval of execution, as specified on job creation.
CronExpression string
// OtherInputs contains the other inputs of the job.
OtherInputs map[string]interface{}
}
// CronJob is the interface jobs have to implement.
// It contains only one single method, `Run`.
type CronJob interface {
Run(input CronJobInput)
}
| getIdsFromJobsWithScheduleList | identifier_name |
model.go | package smallben
import (
"bytes"
"encoding/base64"
"encoding/gob"
"encoding/json"
"errors"
"github.com/robfig/cron/v3"
"time"
)
// Job is the struct used to interact with SmallBen.
type Job struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64
// cronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
cronID int64
// CronExpression specifies the scheduling of the job.
CronExpression string
// paused specifies whether this job has been paused.
// Only used when returning this struct.
paused bool
// createdAt specifies when this rawJob has been created.
createdAt time.Time
// updatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
updatedAt time.Time
// Job is the real unit of work to be executed
Job CronJob
// JobInput is the additional input to pass to the inner Job.
JobInput map[string]interface{}
}
// CreatedAt returns the time when this Job has been added to the scheduler.
func (j *Job) CreatedAt() time.Time {
return j.createdAt
}
// UpdatedAt returns the last time this Job has been updated, i.e.,
// paused, resumed, schedule changed.
func (j *Job) UpdatedAt() time.Time {
return j.updatedAt
}
// Paused returns whether this Job is currently paused
// or not.
func (j *Job) Paused() bool {
return j.paused
}
// toJobWithSchedule converts Job to a JobWithSchedule object.
// It returns an error in case the parsing of the cron expression fails.
func (j *Job) toJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: 0,
CronExpression: j.CronExpression,
Paused: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(), | run: j.Job,
runInput: CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
OtherInputs: j.JobInput,
CronExpression: j.CronExpression,
},
}
return result, nil
}
// RawJob models a raw rawJob coming from the database.
type RawJob struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64 `gorm:"primaryKey,column:id"`
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64 `gorm:"column:group_id"`
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64 `gorm:"column:super_group_id"`
// CronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
CronID int64 `gorm:"column:cron_id"`
// CronExpression specifies the scheduling of the job.
CronExpression string `gorm:"column:cron_expression"`
// Paused specifies whether this rawJob has been paused.
Paused bool `gorm:"column:paused"`
// CreatedAt specifies when this rawJob has been created.
CreatedAt time.Time `gorm:"column:created_at"`
// UpdatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
UpdatedAt time.Time `gorm:"column:updated_at"`
// SerializedJob is the base64(gob-encoded byte array)
// of the interface executing this rawJob
SerializedJob string `gorm:"column:serialized_job"`
// SerializedJobInput is the base64(gob-encoded byte array)
// of the map containing the argument for the job.
SerializedJobInput string `gorm:"column:serialized_job_input"`
}
func (j *RawJob) TableName() string {
return "jobs"
}
// JobWithSchedule is a RawJob object
// with a cron.Schedule object in it.
// The schedule can be accessed by using the Schedule() method.
// This object should be created only by calling the method
// toJobWithSchedule().
type JobWithSchedule struct {
rawJob RawJob
schedule cron.Schedule
run CronJob
runInput CronJobInput
}
// decodeSerializedFields decode j.serializedJob and j.SerializedJobInput.
func (j *RawJob) decodeSerializedFields() (CronJob, CronJobInput, error) {
var decoder *gob.Decoder
var err error
// decode from base64 the serialized job
decodedJob, err := base64.StdEncoding.DecodeString(j.SerializedJob)
if err != nil {
return nil, CronJobInput{}, err
}
// decode the interface executing the rawJob
decoder = gob.NewDecoder(bytes.NewBuffer(decodedJob))
var runJob CronJob
if err = decoder.Decode(&runJob); err != nil {
return nil, CronJobInput{}, err
}
// decode the input from json
var jobInputMap map[string]interface{}
if err := json.Unmarshal([]byte(j.SerializedJobInput), &jobInputMap); err != nil {
return nil, CronJobInput{}, err
}
// and build the overall object containing all the
// inputs will be passed to the Job
runJobInput := CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronExpression: j.CronExpression,
OtherInputs: jobInputMap,
}
return runJob, runJobInput, nil
}
// toJob converts j to a Job instance.
func (j *RawJob) toJob() (Job, error) {
job, jobInput, err := j.decodeSerializedFields()
if err != nil {
return Job{}, err
}
result := Job{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
cronID: j.CronID,
CronExpression: j.CronExpression,
paused: j.Paused,
createdAt: j.CreatedAt,
updatedAt: j.UpdatedAt,
Job: job,
JobInput: jobInput.OtherInputs,
}
return result, nil
}
// ToJobWithSchedule returns a JobWithSchedule object from the current RawJob,
// by copy. It returns errors in case the given schedule is not valid,
// or in case the conversion of the rawJob interface/input fails.
// It does NOT copy the byte arrays from j.
func (j *RawJob) ToJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
runJob, runJobInput, err := j.decodeSerializedFields()
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: j.CronID,
CronExpression: j.CronExpression,
Paused: j.Paused,
CreatedAt: j.CreatedAt,
UpdatedAt: j.UpdatedAt,
},
schedule: schedule,
run: runJob,
runInput: runJobInput,
}
return result, nil
}
// encodeJob encodes `job`. A separate function is needed because we need to pass
// a POINTER to interface.
func encodeJob(encoder *gob.Encoder, job CronJob) error {
return encoder.Encode(&job)
}
// BuildJob builds the raw version of the inner job, by encoding
// it. In particular, the encoding is done as follows:
// - for the serialized job, it is encoded in Gob and then in base64
// - for the job input, it is encoded in json.
//This is needed since, when converting from a `RawJob` to a `JobWithSchedule`,
// the binary serialization of the Job is not kept in memory.
func (j *JobWithSchedule) BuildJob() (RawJob, error) {
var bufferJob bytes.Buffer
encoderJob := gob.NewEncoder(&bufferJob)
// encode the CronJob interface keeping the unit of work
// to execute. We need to use the encodeJob method
// due to how gob interface encoding works.
if err := encodeJob(encoderJob, j.run); err != nil {
return RawJob{}, err
}
// finally, encode the bytes to base64
j.rawJob.SerializedJob = base64.StdEncoding.EncodeToString(bufferJob.Bytes())
// now, encode the job input
if err := j.encodeJobInput(); err != nil {
return RawJob{}, err
}
return j.rawJob, nil
}
// encodeJobInput encodes j.rawJob.SerializedJobInput.
func (j *JobWithSchedule) encodeJobInput() error {
encodedInput, err := json.Marshal(j.runInput.OtherInputs)
if err != nil {
return err
}
j.rawJob.SerializedJobInput = string(encodedInput)
return nil
}
// getIdsFromJobRawList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobRawList(jobs []RawJob) []int64 {
ids := make([]int64, len(jobs))
for i, test := range jobs {
ids[i] = test.ID
}
return ids
}
// getIdsFromJobsWithScheduleList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobsWithScheduleList(jobs []JobWithSchedule) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.rawJob.ID
}
return ids
}
// getIdsFromJobList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobList(jobs []Job) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.ID
}
return ids
}
// UpdateOption is the struct used to update
// a Job.
//
// An update on a Job consists in changing
// the schedule, by using the field `CronExpression`,
// or the Job input, by using the field `JobOtherInputs`.
//
// If none of those fields are specified, i.e., they are
// both nil, the struct is considered invalid.
type UpdateOption struct {
// JobID is the ID of the Job to update.
JobID int64
// CronExpression is the new schedule of the Job.
// If nil, it is ignored, i.e., the schedule
// is not changed.
CronExpression *string
// JobOtherInputs is the new OtherInputs of the Job.
// If nil, it is ignored, i.e.,
// the Job input is not changed.
JobOtherInputs *map[string]interface{}
}
func (u *UpdateOption) schedule() (cron.Schedule, error) {
return cron.ParseStandard(*u.CronExpression)
}
var (
// ErrUpdateOptionInvalid is returned when the fields
// of UpdateOption are invalid.
// This error is returned when the combination
// of the fields is not valid (i.e., both nil).
// For error in the CronExpression field,
// the specific error set by the library is returned,
ErrUpdateOptionInvalid = errors.New("invalid option")
)
// Valid returns whether the fields in this struct
// are valid. If the struct is valid, no errors
// are returned.
//
// UpdateOption is considered valid if
// at least one field between CronExpression and JobOtherInputs
// are not nil, and the cron string can be parsed.
func (u *UpdateOption) Valid() error {
if u.CronExpression == nil && u.JobOtherInputs == nil {
return ErrUpdateOptionInvalid
}
if u.CronExpression != nil {
if _, err := u.schedule(); err != nil {
return err
}
}
return nil
}
// getIdsFromUpdateScheduleList basically does schedules.map(rawJob -> rawJob.id)
func getIdsFromUpdateScheduleList(schedules []UpdateOption) []int64 {
ids := make([]int64, len(schedules))
for i, test := range schedules {
ids[i] = test.JobID
}
return ids
}
// CronJobInput is the input passed to the Run function.
type CronJobInput struct {
// JobID is the ID of the current job.
JobID int64
// GroupID is the GroupID of the current job.
GroupID int64
// SuperGroupID is the SuperGroupID of the current job.
SuperGroupID int64
// CronExpression is the interval of execution, as specified on job creation.
CronExpression string
// OtherInputs contains the other inputs of the job.
OtherInputs map[string]interface{}
}
// CronJob is the interface jobs have to implement.
// It contains only one single method, `Run`.
type CronJob interface {
Run(input CronJobInput)
} | },
schedule: schedule, | random_line_split |
model.go | package smallben
import (
"bytes"
"encoding/base64"
"encoding/gob"
"encoding/json"
"errors"
"github.com/robfig/cron/v3"
"time"
)
// Job is the struct used to interact with SmallBen.
type Job struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64
// cronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
cronID int64
// CronExpression specifies the scheduling of the job.
CronExpression string
// paused specifies whether this job has been paused.
// Only used when returning this struct.
paused bool
// createdAt specifies when this rawJob has been created.
createdAt time.Time
// updatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
updatedAt time.Time
// Job is the real unit of work to be executed
Job CronJob
// JobInput is the additional input to pass to the inner Job.
JobInput map[string]interface{}
}
// CreatedAt returns the time when this Job has been added to the scheduler.
func (j *Job) CreatedAt() time.Time {
return j.createdAt
}
// UpdatedAt returns the last time this Job has been updated, i.e.,
// paused, resumed, schedule changed.
func (j *Job) UpdatedAt() time.Time {
return j.updatedAt
}
// Paused returns whether this Job is currently paused
// or not.
func (j *Job) Paused() bool {
return j.paused
}
// toJobWithSchedule converts Job to a JobWithSchedule object.
// It returns an error in case the parsing of the cron expression fails.
func (j *Job) toJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: 0,
CronExpression: j.CronExpression,
Paused: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
schedule: schedule,
run: j.Job,
runInput: CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
OtherInputs: j.JobInput,
CronExpression: j.CronExpression,
},
}
return result, nil
}
// RawJob models a raw rawJob coming from the database.
type RawJob struct {
// ID is a unique ID identifying the rawJob object.
// It is chosen by the user.
ID int64 `gorm:"primaryKey,column:id"`
// GroupID is the ID of the group this rawJob is inserted in.
GroupID int64 `gorm:"column:group_id"`
// SuperGroupID specifies the ID of the super group
// where this group is contained in.
SuperGroupID int64 `gorm:"column:super_group_id"`
// CronID is the ID of the cron rawJob as assigned by the scheduler
// internally.
CronID int64 `gorm:"column:cron_id"`
// CronExpression specifies the scheduling of the job.
CronExpression string `gorm:"column:cron_expression"`
// Paused specifies whether this rawJob has been paused.
Paused bool `gorm:"column:paused"`
// CreatedAt specifies when this rawJob has been created.
CreatedAt time.Time `gorm:"column:created_at"`
// UpdatedAt specifies the last time this object has been updated,
// i.e., paused/resumed/schedule updated.
UpdatedAt time.Time `gorm:"column:updated_at"`
// SerializedJob is the base64(gob-encoded byte array)
// of the interface executing this rawJob
SerializedJob string `gorm:"column:serialized_job"`
// SerializedJobInput is the base64(gob-encoded byte array)
// of the map containing the argument for the job.
SerializedJobInput string `gorm:"column:serialized_job_input"`
}
func (j *RawJob) TableName() string {
return "jobs"
}
// JobWithSchedule is a RawJob object
// with a cron.Schedule object in it.
// The schedule can be accessed by using the Schedule() method.
// This object should be created only by calling the method
// toJobWithSchedule().
type JobWithSchedule struct {
rawJob RawJob
schedule cron.Schedule
run CronJob
runInput CronJobInput
}
// decodeSerializedFields decode j.serializedJob and j.SerializedJobInput.
func (j *RawJob) decodeSerializedFields() (CronJob, CronJobInput, error) {
var decoder *gob.Decoder
var err error
// decode from base64 the serialized job
decodedJob, err := base64.StdEncoding.DecodeString(j.SerializedJob)
if err != nil {
return nil, CronJobInput{}, err
}
// decode the interface executing the rawJob
decoder = gob.NewDecoder(bytes.NewBuffer(decodedJob))
var runJob CronJob
if err = decoder.Decode(&runJob); err != nil {
return nil, CronJobInput{}, err
}
// decode the input from json
var jobInputMap map[string]interface{}
if err := json.Unmarshal([]byte(j.SerializedJobInput), &jobInputMap); err != nil {
return nil, CronJobInput{}, err
}
// and build the overall object containing all the
// inputs will be passed to the Job
runJobInput := CronJobInput{
JobID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronExpression: j.CronExpression,
OtherInputs: jobInputMap,
}
return runJob, runJobInput, nil
}
// toJob converts j to a Job instance.
func (j *RawJob) toJob() (Job, error) |
// ToJobWithSchedule returns a JobWithSchedule object from the current RawJob,
// by copy. It returns errors in case the given schedule is not valid,
// or in case the conversion of the rawJob interface/input fails.
// It does NOT copy the byte arrays from j.
func (j *RawJob) ToJobWithSchedule() (JobWithSchedule, error) {
var result JobWithSchedule
// decode the schedule
schedule, err := cron.ParseStandard(j.CronExpression)
if err != nil {
return result, err
}
runJob, runJobInput, err := j.decodeSerializedFields()
if err != nil {
return result, err
}
result = JobWithSchedule{
rawJob: RawJob{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
CronID: j.CronID,
CronExpression: j.CronExpression,
Paused: j.Paused,
CreatedAt: j.CreatedAt,
UpdatedAt: j.UpdatedAt,
},
schedule: schedule,
run: runJob,
runInput: runJobInput,
}
return result, nil
}
// encodeJob encodes `job`. A separate function is needed because we need to pass
// a POINTER to interface.
func encodeJob(encoder *gob.Encoder, job CronJob) error {
return encoder.Encode(&job)
}
// BuildJob builds the raw version of the inner job, by encoding
// it. In particular, the encoding is done as follows:
// - for the serialized job, it is encoded in Gob and then in base64
// - for the job input, it is encoded in json.
//This is needed since, when converting from a `RawJob` to a `JobWithSchedule`,
// the binary serialization of the Job is not kept in memory.
func (j *JobWithSchedule) BuildJob() (RawJob, error) {
var bufferJob bytes.Buffer
encoderJob := gob.NewEncoder(&bufferJob)
// encode the CronJob interface keeping the unit of work
// to execute. We need to use the encodeJob method
// due to how gob interface encoding works.
if err := encodeJob(encoderJob, j.run); err != nil {
return RawJob{}, err
}
// finally, encode the bytes to base64
j.rawJob.SerializedJob = base64.StdEncoding.EncodeToString(bufferJob.Bytes())
// now, encode the job input
if err := j.encodeJobInput(); err != nil {
return RawJob{}, err
}
return j.rawJob, nil
}
// encodeJobInput encodes j.rawJob.SerializedJobInput.
func (j *JobWithSchedule) encodeJobInput() error {
encodedInput, err := json.Marshal(j.runInput.OtherInputs)
if err != nil {
return err
}
j.rawJob.SerializedJobInput = string(encodedInput)
return nil
}
// getIdsFromJobRawList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobRawList(jobs []RawJob) []int64 {
ids := make([]int64, len(jobs))
for i, test := range jobs {
ids[i] = test.ID
}
return ids
}
// getIdsFromJobsWithScheduleList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobsWithScheduleList(jobs []JobWithSchedule) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.rawJob.ID
}
return ids
}
// getIdsFromJobList basically does jobs.map(rawJob -> rawJob.id)
func getIdsFromJobList(jobs []Job) []int64 {
ids := make([]int64, len(jobs))
for i, job := range jobs {
ids[i] = job.ID
}
return ids
}
// UpdateOption is the struct used to update
// a Job.
//
// An update on a Job consists in changing
// the schedule, by using the field `CronExpression`,
// or the Job input, by using the field `JobOtherInputs`.
//
// If none of those fields are specified, i.e., they are
// both nil, the struct is considered invalid.
type UpdateOption struct {
// JobID is the ID of the Job to update.
JobID int64
// CronExpression is the new schedule of the Job.
// If nil, it is ignored, i.e., the schedule
// is not changed.
CronExpression *string
// JobOtherInputs is the new OtherInputs of the Job.
// If nil, it is ignored, i.e.,
// the Job input is not changed.
JobOtherInputs *map[string]interface{}
}
func (u *UpdateOption) schedule() (cron.Schedule, error) {
return cron.ParseStandard(*u.CronExpression)
}
var (
// ErrUpdateOptionInvalid is returned when the fields
// of UpdateOption are invalid.
// This error is returned when the combination
// of the fields is not valid (i.e., both nil).
// For error in the CronExpression field,
// the specific error set by the library is returned,
ErrUpdateOptionInvalid = errors.New("invalid option")
)
// Valid returns whether the fields in this struct
// are valid. If the struct is valid, no errors
// are returned.
//
// UpdateOption is considered valid if
// at least one field between CronExpression and JobOtherInputs
// are not nil, and the cron string can be parsed.
func (u *UpdateOption) Valid() error {
if u.CronExpression == nil && u.JobOtherInputs == nil {
return ErrUpdateOptionInvalid
}
if u.CronExpression != nil {
if _, err := u.schedule(); err != nil {
return err
}
}
return nil
}
// getIdsFromUpdateScheduleList basically does schedules.map(rawJob -> rawJob.id)
func getIdsFromUpdateScheduleList(schedules []UpdateOption) []int64 {
ids := make([]int64, len(schedules))
for i, test := range schedules {
ids[i] = test.JobID
}
return ids
}
// CronJobInput is the input passed to the Run function.
type CronJobInput struct {
// JobID is the ID of the current job.
JobID int64
// GroupID is the GroupID of the current job.
GroupID int64
// SuperGroupID is the SuperGroupID of the current job.
SuperGroupID int64
// CronExpression is the interval of execution, as specified on job creation.
CronExpression string
// OtherInputs contains the other inputs of the job.
OtherInputs map[string]interface{}
}
// CronJob is the interface jobs have to implement.
// It contains only one single method, `Run`.
type CronJob interface {
Run(input CronJobInput)
}
| {
job, jobInput, err := j.decodeSerializedFields()
if err != nil {
return Job{}, err
}
result := Job{
ID: j.ID,
GroupID: j.GroupID,
SuperGroupID: j.SuperGroupID,
cronID: j.CronID,
CronExpression: j.CronExpression,
paused: j.Paused,
createdAt: j.CreatedAt,
updatedAt: j.UpdatedAt,
Job: job,
JobInput: jobInput.OtherInputs,
}
return result, nil
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.