file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
lib.rs
//! # docker-bisect //! `docker-bisect` create assumes that the docker daemon is running and that you have a //! docker image with cached layers to probe. extern crate colored; extern crate dockworker; extern crate indicatif; extern crate rand; use std::clone::Clone; use std::fmt; use std::io::{prelude::*, Error, ErrorKind}; use std::sync::Arc; use std::thread; use std::time::{Duration, SystemTime}; use colored::*; use dockworker::*; use indicatif::ProgressBar; use rand::Rng; /// Truncates a string to a single line with a max width /// and removes docker prefixes. /// /// # Example /// ``` /// use docker_bisect::truncate; /// let line = "blar #(nop) real command\n line 2"; /// assert_eq!("real com", truncate(&line, 8)); /// ``` pub fn truncate(mut s: &str, max_chars: usize) -> &str { s = s.lines().next().expect("nothing to truncate"); if s.contains("#(nop) ") { let mut splat = s.split(" #(nop) "); let _ = splat.next(); s = splat.next().expect("#(nop) with no command in."); s = s.trim(); } match s.char_indices().nth(max_chars) { None => s, Some((idx, _)) => &s[..idx], } } /// A layer in a docker image. (A layer is a set of files changed due to the previous command). #[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)] pub struct Layer { pub height: usize, pub image_name: String, pub creation_command: String, } impl fmt::Display for Layer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{} | {:?}", self.image_name, self.creation_command) } } /// The stderr/stdout of running the command on a container made of this layer /// (on top of all earlier layers). If command hit the timeout the result may be truncated or empty. #[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)] pub struct LayerResult { pub layer: Layer, pub result: String, } impl fmt::Display for LayerResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{} | {}", self.layer, self.result) } } /// A Transition is the LayerResult of running the command on the lower layer /// and of running the command on the higher layer. No-op transitions are not recorded. #[derive(Debug, Eq, Ord, PartialOrd, PartialEq)] pub struct Transition { pub before: Option<LayerResult>, pub after: LayerResult, } impl fmt::Display for Transition { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match &self.before { Some(be) => write!(f, "({} -> {})", be, self.after), None => write!(f, "-> {}", self.after), } } } /// Starts the bisect operation. Calculates highest and lowest layer result and if they have /// different outputs it starts a binary chop to figure out which layer(s) caused the change. fn get_changes<T>(layers: Vec<Layer>, action: &T) -> Result<Vec<Transition>, Error> where T: ContainerAction + 'static, { let first_layer = layers.first().expect("no first layer"); let last_layer = layers.last().expect("no last layer"); let first_image_name: String = first_layer.image_name.clone(); let last_image_name = &last_layer.image_name; let action_c = action.clone(); let left_handle = thread::spawn(move || action_c.try_container(&first_image_name)); let end = action.try_container(last_image_name); let start = left_handle.join().expect("first layer execution error!"); if start == end { return Ok(vec![Transition { before: None, after: LayerResult { layer: last_layer.clone(), result: start, }, }]); } bisect( Vec::from(&layers[1..layers.len() - 1]), LayerResult { layer: first_layer.clone(), result: start, }, LayerResult { layer: last_layer.clone(), result: end, }, action, ) } fn bisect<T>( history: Vec<Layer>, start: LayerResult, end: LayerResult, action: &T, ) -> Result<Vec<Transition>, Error> where T: ContainerAction + 'static, { let size = history.len(); if size == 0 { if start.result == end.result { return Err(Error::new(std::io::ErrorKind::Other, "")); } return Ok(vec![Transition { before: Some(start.clone()), after: end.clone(), }]); } let half = size / 2; let mid_result = LayerResult { layer: history[half].clone(), result: action.try_container(&history[half].image_name), }; if size == 1 { let mut results = Vec::<Transition>::new(); if *start.result != mid_result.result { results.push(Transition { before: Some(start.clone()), after: mid_result.clone(), }); } if mid_result.result != *end.result { results.push(Transition { before: Some(mid_result), after: end.clone(), }); } return Ok(results); } if start.result == mid_result.result { action.skip((mid_result.layer.height - start.layer.height) as u64); return bisect(Vec::from(&history[half + 1..]), mid_result, end, action); } if mid_result.result == end.result { action.skip((end.layer.height - mid_result.layer.height) as u64); return bisect(Vec::from(&history[..half]), start, mid_result, action); } let clone_a = action.clone(); let clone_b = action.clone(); let mid_result_c = mid_result.clone(); let hist_a = Vec::from(&history[..half]); let left_handle = thread::spawn(move || bisect(hist_a, start, mid_result, &clone_a)); let right_handle = thread::spawn(move || bisect(Vec::from(&history[half + 1..]), mid_result_c, end, &clone_b)); let mut left_results: Vec<Transition> = left_handle .join() .expect("left") .expect("left transition err"); let right_results: Vec<Transition> = right_handle .join() .expect("right") .expect("right transition err"); left_results.extend(right_results); // These results are sorted later... Ok(left_results) } trait ContainerAction: Clone + Send { fn try_container(&self, container_id: &str) -> String; fn skip(&self, count: u64) -> (); } #[derive(Clone)] struct DockerContainer { pb: Arc<ProgressBar>, command_line: Vec<String>, timeout_in_seconds: usize, } impl DockerContainer { fn new(total: u64, command_line: Vec<String>, timeout_in_seconds: usize) -> DockerContainer { let pb = Arc::new(ProgressBar::new(total)); DockerContainer { pb, command_line, timeout_in_seconds, } } } struct Guard<'a> { buf: &'a mut Vec<u8>, len: usize } impl<'a> Drop for Guard<'a> { fn drop(&mut self) { unsafe { self.buf.set_len(self.len); } } } impl ContainerAction for DockerContainer { fn try_container(&self, container_id: &str) -> String { let docker: Docker = Docker::connect_with_defaults().expect("docker daemon running?"); let container_name: String = rand::thread_rng().gen_range(0., 1.3e4).to_string(); //Create container let mut create = ContainerCreateOptions::new(&container_id); let mut host_config = ContainerHostConfig::new(); host_config.auto_remove(false); create.host_config(host_config); let it = self.command_line.iter(); for command in it { create.cmd(command.clone()); } let container: CreateContainerResponse = docker .create_container(Some(&container_name), &create) .expect("couldn't create container"); let result = docker.start_container(&container.id); if result.is_err() { let err: dockworker::errors::Error = result.unwrap_err(); return format!("{}", err); } let log_options = ContainerLogOptions { stdout: true, stderr: true, since: None, timestamps: None, tail: None, follow: true, }; let timeout = Duration::from_secs(self.timeout_in_seconds as u64); let mut container_output = String::new(); let now = SystemTime::now(); let timeout_time = now + timeout; let result = docker.log_container(&container_name, &log_options); if let Ok(result) = result { let mut r = result; let reservation_size = 32; let mut buf = Vec::<u8>::new(); { let mut g = Guard { len: buf.len(), buf: &mut buf }; loop { if g.len == g.buf.len()
match r.read(&mut g.buf[g.len..]) { Ok(0) => { break; } Ok(n) => g.len += n, Err(ref e) if e.kind() == ErrorKind::Interrupted => {} Err(_e) => { break; } } if SystemTime::now() > timeout_time { break; } } } container_output = String::from_utf8_lossy(&buf).to_string(); } self.pb.inc(1); let _stop_result = docker.stop_container(&container.id, timeout); container_output } fn skip(&self, count: u64) -> () { self.pb.inc(count); } } /// Struct to hold parameters. pub struct BisectOptions { pub timeout_in_seconds: usize, pub trunc_size: usize, } /// Create containers based on layers and run command_line against them. /// Result is the differences in std out and std err. pub fn try_bisect( histories: &Vec<ImageLayer>, command_line: Vec<String>, options: BisectOptions, ) -> Result<Vec<Transition>, Error> { println!( "\n{}\n\n{:?}\n", "Command to apply to layers:".bold(), &command_line ); let create_and_try_container = DockerContainer::new( histories.len() as u64, command_line, options.timeout_in_seconds, ); println!("{}", "Skipped missing layers:".bold()); println!(); let mut layers = Vec::new(); for (index, event) in histories.iter().rev().enumerate() { let mut created = event.created_by.clone(); created = truncate(&created, options.trunc_size).to_string(); match event.id.clone() { Some(layer_name) => layers.push(Layer { height: index, image_name: layer_name, creation_command: event.created_by.clone(), }), None => println!("{:<3}: {}.", index, truncate(&created, options.trunc_size)), } } println!(); println!( "{}", "Bisecting found layers (running command on the layers) ==>\n".bold() ); if layers.len() < 2 { println!(); eprintln!( "{} layers found in cache - not enough layers to bisect.", layers.len() ); return Err(Error::new( std::io::ErrorKind::Other, "no cached layers found!", )); } let results = get_changes(layers, &create_and_try_container); create_and_try_container.pb.finish_with_message("done"); results } #[cfg(test)] mod tests { use super::*; use std::collections::HashMap; #[derive(Clone)] struct MapAction { map: HashMap<String, String>, } impl MapAction { fn new(from: Vec<usize>, to: Vec<&str>) -> Self { let mut object = MapAction { map: HashMap::new(), }; for (f, t) in from.iter().zip(to.iter()) { object.map.insert(f.to_string(), t.to_string()); } object } } impl ContainerAction for MapAction { fn try_container(&self, container_id: &str) -> String { let none = String::new(); let result: &String = self.map.get(container_id).unwrap_or(&none); result.clone() } fn skip(&self, _count: u64) -> () {} } fn lay(id: usize) -> Layer { Layer { height: id, image_name: id.to_string(), creation_command: id.to_string(), } } #[test] fn if_output_always_same_return_earliest_command() { let results = get_changes( vec![lay(1), lay(2), lay(3)], &MapAction::new(vec![1, 2, 3], vec!["A", "A", "A"]), ); assert_eq!( results.unwrap(), vec![Transition { before: None, after: LayerResult { layer: lay(3), result: "A".to_string() }, }] ); } #[test] fn if_one_difference_show_command_that_made_difference() { let results = get_changes( vec![lay(1), lay(2), lay(3)], &MapAction::new(vec![1, 2, 3], vec!["A", "A", "B"]), ); assert_eq!( results.unwrap(), vec![Transition { before: Some(LayerResult { layer: lay(2), result: "A".to_string() }), after: LayerResult { layer: lay(3), result: "B".to_string() }, }] ); } #[test] fn if_two_differences_show_two_commands_that_made_difference() { let results = get_changes( vec![lay(1), lay(2), lay(3), lay(4)], &MapAction::new(vec![1, 2, 3, 4], vec!["A", "B", "B", "C"]), ); let res = results.unwrap(); assert_eq!( res, vec![ Transition { before: Some(LayerResult { layer: lay(1), result: "A".to_string() }), after: LayerResult { layer: lay(2), result: "B".to_string() }, }, Transition { before: Some(LayerResult { layer: lay(3), result: "B".to_string() }), after: LayerResult { layer: lay(4), result: "C".to_string() }, } ] ); } #[test] fn three_transitions() { let results = get_changes( vec![ lay(1), lay(2), lay(3), lay(4), lay(5), lay(6), lay(7), lay(8), lay(9), lay(10), ], &MapAction::new( vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10], vec!["A", "B", "B", "C", "C", "C", "C", "C", "D", "D"], ), ); let res = results.unwrap(); assert_eq!( res, vec![ Transition { before: Some(LayerResult { layer: lay(1), result: "A".to_string() }), after: LayerResult { layer: lay(2), result: "B".to_string() }, }, Transition { before: Some(LayerResult { layer: lay(3), result: "B".to_string() }), after: LayerResult { layer: lay(4), result: "C".to_string() }, }, Transition { before: Some(LayerResult { layer: lay(8), result: "C".to_string() }), after: LayerResult { layer: lay(9), result: "D".to_string() }, } ] ); } }
{ g.buf.resize(g.len + reservation_size, 0); }
conditional_block
lib.rs
//! # docker-bisect //! `docker-bisect` create assumes that the docker daemon is running and that you have a //! docker image with cached layers to probe. extern crate colored; extern crate dockworker; extern crate indicatif; extern crate rand; use std::clone::Clone; use std::fmt; use std::io::{prelude::*, Error, ErrorKind}; use std::sync::Arc; use std::thread; use std::time::{Duration, SystemTime}; use colored::*; use dockworker::*; use indicatif::ProgressBar; use rand::Rng; /// Truncates a string to a single line with a max width /// and removes docker prefixes. /// /// # Example /// ``` /// use docker_bisect::truncate; /// let line = "blar #(nop) real command\n line 2"; /// assert_eq!("real com", truncate(&line, 8)); /// ``` pub fn truncate(mut s: &str, max_chars: usize) -> &str { s = s.lines().next().expect("nothing to truncate"); if s.contains("#(nop) ") { let mut splat = s.split(" #(nop) "); let _ = splat.next(); s = splat.next().expect("#(nop) with no command in."); s = s.trim(); } match s.char_indices().nth(max_chars) { None => s, Some((idx, _)) => &s[..idx], } } /// A layer in a docker image. (A layer is a set of files changed due to the previous command). #[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)] pub struct Layer { pub height: usize, pub image_name: String, pub creation_command: String, } impl fmt::Display for Layer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{} | {:?}", self.image_name, self.creation_command) } } /// The stderr/stdout of running the command on a container made of this layer /// (on top of all earlier layers). If command hit the timeout the result may be truncated or empty. #[derive(Debug, Clone, Eq, Ord, PartialOrd, PartialEq)] pub struct LayerResult { pub layer: Layer, pub result: String, } impl fmt::Display for LayerResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{} | {}", self.layer, self.result) } } /// A Transition is the LayerResult of running the command on the lower layer /// and of running the command on the higher layer. No-op transitions are not recorded. #[derive(Debug, Eq, Ord, PartialOrd, PartialEq)] pub struct Transition { pub before: Option<LayerResult>, pub after: LayerResult, } impl fmt::Display for Transition { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match &self.before { Some(be) => write!(f, "({} -> {})", be, self.after), None => write!(f, "-> {}", self.after), } } } /// Starts the bisect operation. Calculates highest and lowest layer result and if they have /// different outputs it starts a binary chop to figure out which layer(s) caused the change. fn get_changes<T>(layers: Vec<Layer>, action: &T) -> Result<Vec<Transition>, Error> where T: ContainerAction + 'static, { let first_layer = layers.first().expect("no first layer"); let last_layer = layers.last().expect("no last layer"); let first_image_name: String = first_layer.image_name.clone(); let last_image_name = &last_layer.image_name; let action_c = action.clone(); let left_handle = thread::spawn(move || action_c.try_container(&first_image_name)); let end = action.try_container(last_image_name); let start = left_handle.join().expect("first layer execution error!"); if start == end { return Ok(vec![Transition { before: None, after: LayerResult { layer: last_layer.clone(), result: start, }, }]); } bisect( Vec::from(&layers[1..layers.len() - 1]), LayerResult { layer: first_layer.clone(), result: start, }, LayerResult { layer: last_layer.clone(), result: end, }, action, ) } fn bisect<T>( history: Vec<Layer>, start: LayerResult, end: LayerResult, action: &T, ) -> Result<Vec<Transition>, Error> where T: ContainerAction + 'static, { let size = history.len(); if size == 0 { if start.result == end.result { return Err(Error::new(std::io::ErrorKind::Other, "")); } return Ok(vec![Transition { before: Some(start.clone()), after: end.clone(), }]); } let half = size / 2; let mid_result = LayerResult { layer: history[half].clone(), result: action.try_container(&history[half].image_name), }; if size == 1 { let mut results = Vec::<Transition>::new(); if *start.result != mid_result.result { results.push(Transition { before: Some(start.clone()), after: mid_result.clone(), }); } if mid_result.result != *end.result { results.push(Transition { before: Some(mid_result), after: end.clone(), }); } return Ok(results); } if start.result == mid_result.result { action.skip((mid_result.layer.height - start.layer.height) as u64); return bisect(Vec::from(&history[half + 1..]), mid_result, end, action); } if mid_result.result == end.result { action.skip((end.layer.height - mid_result.layer.height) as u64); return bisect(Vec::from(&history[..half]), start, mid_result, action); } let clone_a = action.clone(); let clone_b = action.clone(); let mid_result_c = mid_result.clone(); let hist_a = Vec::from(&history[..half]); let left_handle = thread::spawn(move || bisect(hist_a, start, mid_result, &clone_a)); let right_handle = thread::spawn(move || bisect(Vec::from(&history[half + 1..]), mid_result_c, end, &clone_b)); let mut left_results: Vec<Transition> = left_handle .join() .expect("left") .expect("left transition err"); let right_results: Vec<Transition> = right_handle .join() .expect("right") .expect("right transition err"); left_results.extend(right_results); // These results are sorted later... Ok(left_results) } trait ContainerAction: Clone + Send { fn try_container(&self, container_id: &str) -> String; fn skip(&self, count: u64) -> (); } #[derive(Clone)] struct DockerContainer { pb: Arc<ProgressBar>, command_line: Vec<String>, timeout_in_seconds: usize, } impl DockerContainer { fn new(total: u64, command_line: Vec<String>, timeout_in_seconds: usize) -> DockerContainer { let pb = Arc::new(ProgressBar::new(total)); DockerContainer { pb, command_line, timeout_in_seconds, } } } struct Guard<'a> { buf: &'a mut Vec<u8>, len: usize } impl<'a> Drop for Guard<'a> { fn drop(&mut self) { unsafe { self.buf.set_len(self.len); } } } impl ContainerAction for DockerContainer { fn try_container(&self, container_id: &str) -> String { let docker: Docker = Docker::connect_with_defaults().expect("docker daemon running?"); let container_name: String = rand::thread_rng().gen_range(0., 1.3e4).to_string(); //Create container let mut create = ContainerCreateOptions::new(&container_id); let mut host_config = ContainerHostConfig::new(); host_config.auto_remove(false); create.host_config(host_config); let it = self.command_line.iter(); for command in it { create.cmd(command.clone()); } let container: CreateContainerResponse = docker .create_container(Some(&container_name), &create) .expect("couldn't create container"); let result = docker.start_container(&container.id); if result.is_err() { let err: dockworker::errors::Error = result.unwrap_err(); return format!("{}", err); } let log_options = ContainerLogOptions { stdout: true, stderr: true, since: None, timestamps: None, tail: None, follow: true, }; let timeout = Duration::from_secs(self.timeout_in_seconds as u64); let mut container_output = String::new(); let now = SystemTime::now(); let timeout_time = now + timeout; let result = docker.log_container(&container_name, &log_options); if let Ok(result) = result { let mut r = result; let reservation_size = 32; let mut buf = Vec::<u8>::new(); { let mut g = Guard { len: buf.len(), buf: &mut buf }; loop { if g.len == g.buf.len() { g.buf.resize(g.len + reservation_size, 0); } match r.read(&mut g.buf[g.len..]) { Ok(0) => { break; } Ok(n) => g.len += n, Err(ref e) if e.kind() == ErrorKind::Interrupted => {} Err(_e) => { break; } } if SystemTime::now() > timeout_time { break; } } } container_output = String::from_utf8_lossy(&buf).to_string(); } self.pb.inc(1); let _stop_result = docker.stop_container(&container.id, timeout); container_output } fn skip(&self, count: u64) -> () { self.pb.inc(count); } } /// Struct to hold parameters. pub struct BisectOptions { pub timeout_in_seconds: usize, pub trunc_size: usize, } /// Create containers based on layers and run command_line against them. /// Result is the differences in std out and std err. pub fn try_bisect( histories: &Vec<ImageLayer>, command_line: Vec<String>, options: BisectOptions, ) -> Result<Vec<Transition>, Error> { println!( "\n{}\n\n{:?}\n", "Command to apply to layers:".bold(), &command_line ); let create_and_try_container = DockerContainer::new( histories.len() as u64, command_line, options.timeout_in_seconds, ); println!("{}", "Skipped missing layers:".bold()); println!(); let mut layers = Vec::new(); for (index, event) in histories.iter().rev().enumerate() { let mut created = event.created_by.clone(); created = truncate(&created, options.trunc_size).to_string(); match event.id.clone() { Some(layer_name) => layers.push(Layer { height: index, image_name: layer_name, creation_command: event.created_by.clone(), }), None => println!("{:<3}: {}.", index, truncate(&created, options.trunc_size)), } } println!(); println!( "{}", "Bisecting found layers (running command on the layers) ==>\n".bold() ); if layers.len() < 2 { println!(); eprintln!( "{} layers found in cache - not enough layers to bisect.", layers.len() ); return Err(Error::new( std::io::ErrorKind::Other, "no cached layers found!", )); } let results = get_changes(layers, &create_and_try_container); create_and_try_container.pb.finish_with_message("done"); results } #[cfg(test)] mod tests { use super::*; use std::collections::HashMap; #[derive(Clone)] struct MapAction { map: HashMap<String, String>, } impl MapAction { fn new(from: Vec<usize>, to: Vec<&str>) -> Self { let mut object = MapAction { map: HashMap::new(), }; for (f, t) in from.iter().zip(to.iter()) { object.map.insert(f.to_string(), t.to_string()); } object } } impl ContainerAction for MapAction { fn try_container(&self, container_id: &str) -> String { let none = String::new(); let result: &String = self.map.get(container_id).unwrap_or(&none); result.clone() } fn skip(&self, _count: u64) -> ()
} fn lay(id: usize) -> Layer { Layer { height: id, image_name: id.to_string(), creation_command: id.to_string(), } } #[test] fn if_output_always_same_return_earliest_command() { let results = get_changes( vec![lay(1), lay(2), lay(3)], &MapAction::new(vec![1, 2, 3], vec!["A", "A", "A"]), ); assert_eq!( results.unwrap(), vec![Transition { before: None, after: LayerResult { layer: lay(3), result: "A".to_string() }, }] ); } #[test] fn if_one_difference_show_command_that_made_difference() { let results = get_changes( vec![lay(1), lay(2), lay(3)], &MapAction::new(vec![1, 2, 3], vec!["A", "A", "B"]), ); assert_eq!( results.unwrap(), vec![Transition { before: Some(LayerResult { layer: lay(2), result: "A".to_string() }), after: LayerResult { layer: lay(3), result: "B".to_string() }, }] ); } #[test] fn if_two_differences_show_two_commands_that_made_difference() { let results = get_changes( vec![lay(1), lay(2), lay(3), lay(4)], &MapAction::new(vec![1, 2, 3, 4], vec!["A", "B", "B", "C"]), ); let res = results.unwrap(); assert_eq!( res, vec![ Transition { before: Some(LayerResult { layer: lay(1), result: "A".to_string() }), after: LayerResult { layer: lay(2), result: "B".to_string() }, }, Transition { before: Some(LayerResult { layer: lay(3), result: "B".to_string() }), after: LayerResult { layer: lay(4), result: "C".to_string() }, } ] ); } #[test] fn three_transitions() { let results = get_changes( vec![ lay(1), lay(2), lay(3), lay(4), lay(5), lay(6), lay(7), lay(8), lay(9), lay(10), ], &MapAction::new( vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10], vec!["A", "B", "B", "C", "C", "C", "C", "C", "D", "D"], ), ); let res = results.unwrap(); assert_eq!( res, vec![ Transition { before: Some(LayerResult { layer: lay(1), result: "A".to_string() }), after: LayerResult { layer: lay(2), result: "B".to_string() }, }, Transition { before: Some(LayerResult { layer: lay(3), result: "B".to_string() }), after: LayerResult { layer: lay(4), result: "C".to_string() }, }, Transition { before: Some(LayerResult { layer: lay(8), result: "C".to_string() }), after: LayerResult { layer: lay(9), result: "D".to_string() }, } ] ); } }
{}
identifier_body
meanS1.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue Dec 3 17:41:11 2019 @author: catherine """ import numpy as np #from sklearn.model_selection import train_test_split import random from interval import Interval #m is the number of anchors #p is the dimension of every X_ti #Theta is a low rank matrix of size m by p #R is the rank of Theta #omega is the covariance matrix of X #T is the sample size #signal is the coefficient for error term #mu is the mean for error term #sigma is the variance for error term def simu(mu1,m,p,R,omega,T,signal,mu,sigma): Theta = np.random.normal(mu1,sigma,size=(m,p)) u, s, v= np.linalg.svd(Theta, full_matrices = False) s[R:] = 0 smat = np.diag(s) Theta = np.dot(u, np.dot(smat, v)) X= np.random.multivariate_normal(np.zeros(p), omega, m*T) #X= np.random.multivariate_normal(np.zeros(m * p), omega, T) X= np.reshape(X,[T,m,p]) Xnew = np.zeros([T, m, m *p]) for j in range(T): for i in range(m): em = np.zeros(m) em[i] = 1 Xnew[j, i, :] = np.kron(X[j,i,:], np.transpose(em)) # Xnew is n, m, m*p E=np.random.normal(mu,sigma,size=(T,m)) Y=np.zeros([T,m]) for j in range(T): Y[j,:]=np.sum(np.multiply(Theta,X[j,:,:]),axis=1)+signal*E[j,:] return Xnew, Y def prsm(pX, pY, pnew, nh, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, diag): pX = pX.reshape(nh * m, pnew, order = 'F') pY = pY.reshape(nh * m, 1, order = 'F') thetay = thetaini rho = rhoini itr = 0 diff = 1 while itr <= maxiter and diff > epstol: Xinverse = np.linalg.inv((2 * np.matmul(pX.transpose(), pX)/(nh * m) + beta * np.eye(pnew))) XY = beta * thetay + rho + (2 * np.matmul(pX.transpose(), pY)/(nh * m))[:, 0] thetax = np.matmul(Xinverse, XY) rhohalf = rho - alpha * beta * (thetax - thetay) mtheta = (thetax - rhohalf).reshape(m, int(pnew/m), order = 'F') u, s, v= np.linalg.svd(mtheta, full_matrices=False) snew = np.clip(s - lambdap/beta, 0, None) thetay = np.reshape(np.dot(u, np.dot(np.diag(snew), v)), pnew, order = 'F') rho = rhohalf - alpha * beta * (thetax - thetay) diff = np.sum(np.abs(thetax - thetay)) itr = itr + 1 #if(diag == True): # print(diff) return thetay, pX, pY, rho #cancpt is the candicate of change points #Kmax is the maximum number of segmentations #U is the optimal information criterion for different number of change points without penalty term #taumat is the location of change points def dynProg(cancpt, Kmax, X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter): Nr = Kmax - 2 npt = cancpt.shape[0] V=np.zeros((npt, npt)) V[:]=np.nan for j1 in range(npt): for j2 in range((j1+1), npt): if (j1==0) or (j1==(npt-1)): start = cancpt[j1] else: start = cancpt[j1]+1 end = cancpt[j2]+1 pX1 = X[start:end, :, :] pY1 = Y[start:end, :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) ssrl = np.matmul(error1.transpose(), error1) V[j1, j2] = ssrl Vt=V[0:(npt-1),1:npt] nw=Vt.shape[0] U = []#np.zeros(Kmax) U.append(Vt[0,nw-1]) D = Vt[:,nw-1].copy() Pos = np.zeros((nw, Nr)) Pos[:] = np.nan Pos[nw-1,:] = nw taumat = np.zeros((Nr, Nr)) taumat[:] = np.nan for k in range(Nr): for j in range(nw-1): dist = Vt[j,j:(nw-1)] + D[(j+1):nw] D[j] = np.min(dist) Pos[j,0] = int(np.where(dist == np.min(dist))[0][0] + j) +1 if k > 0: Pos[j,1:(k+1)] = Pos[int(Pos[j,0]),range(k)] U.append(D[0]) taumat[k,range(k+1)] = Pos[0,range(k + 1)] return taumat, U def evaluate(Selected_Index,true_cpt_pos): cpt_dist=np.zeros([len(true_cpt_pos)]) sel_dist=np.zeros([len(Selected_Index)]) for i in range(len(true_cpt_pos)): dist=abs(Selected_Index-true_cpt_pos[i]) cpt_dist[i]=np.min(dist) cpt_dist_max=np.max(cpt_dist) for j in range(len(Selected_Index)): dist=abs(Selected_Index[j]-true_cpt_pos) sel_dist[j]=np.min(dist) sel_dist_max=np.max(sel_dist) return cpt_dist_max,sel_dist_max ###Simulation Start Itermax=10 m=8 p=8 signal=0.5 R1=3 omega = np.zeros((p, p)) for j in range(0, p): for i in range(0, p):
T1=100 T2=75 T3=150 T4=75 T5=100 nall=T1+T2+T3+T4+T5 true_cpt_pos=np.array([T1-1,T1+T2-1,T1+T2+T3-1,T1+T2+T3+T4-1]) cpt_num=np.zeros([Itermax]) d_under=np.zeros([Itermax]) d_over=np.zeros([Itermax]) MSIC=np.zeros([Itermax]) h_selected=np.zeros([Itermax]) np.random.seed(2019) test_number=5 for Iter in range(Itermax): print(Iter) X1,Y1=simu(-1,m,p,R1,omega,T1,signal,0,1) X2,Y2=simu(0,m,p,R1,omega,T2,signal,0,1) X3,Y3=simu(1,m,p,R1,omega,T3,signal,0,1) X4,Y4=simu(-1,m,p,R1,omega,T4,signal,0,1) X5,Y5=simu(0,m,p,R1,omega,T5,signal,0,1) Xall=np.vstack((X1, X2,X3,X4,X5)) Yall=np.vstack((Y1, Y2,Y3,Y4,Y5)) hlist=range(16*int((m/np.log(nall))**(0.5)),48*int((m/np.log(nall))**(0.5)),4*int((m/np.log(nall))**(0.5))) hn=len(hlist) MSE_test=np.zeros([hn]) test_sample=random.sample(range(0,nall),test_number) for kk in range(test_number): #X,X_test,Y,Y_test=train_test_split(Xall,Yall,test_size=0.002,random_state=0) X=np.delete(Xall,test_sample[kk],axis=0) Y=np.delete(Yall,test_sample[kk],axis=0) X_test=Xall[test_sample[kk],:,:] Y_test=Yall[test_sample[kk],:] for jj in range(hn): h=int(hlist[jj]) print(h) pnew = X.shape[2] n= X.shape[0] S = np.zeros(n) Xini = X.reshape(n * m, pnew, order = 'F') Yini = Y.reshape(n * m, 1, order = 'F') XXinv = np.matmul(Xini.transpose(), Xini) XXinv = np.linalg.pinv(XXinv) thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0] rhoini = np.zeros(pnew) lambdap = np.sqrt((m + pnew/m)/h)* m**(-1) * 0.3 rhoini[:] = lambdap maxiter = 1000 epstol = 1e-3 alpha = 0.9 beta = 1 for j in range(h+1, n - h): pX1 = X[(j - h):(j + 1), :, :] pY1 = Y[(j - h):(j + 1), :] pX2 = X[(j + 1): (j + h + 1), :, :] pY2 = Y[(j + 1): (j + h + 1), :] pX = X[(j - h): (j + h + 1), :, :] pY = Y[(j - h): (j + h + 1), :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) thetar, pX2, pY2, rhor = prsm(pX2, pY2, pnew, pY2.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) theta, pX, pY, rho = prsm(pX, pY, pnew, pY.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) error2 = pY2[:, 0] - np.matmul(pX2, thetar) error = pY[:, 0] - np.matmul(pX, theta) ssrl = np.matmul(error1.transpose(), error1)/(h * m) ssrr = np.matmul(error2.transpose(), error2)/(h * m) ssr = np.matmul(error.transpose(), error)/(h * m) S[j] = ssr - ssrl - ssrr # print(j) cpt = np.array([0]) for j in range (h + 1, n-h): if S[j] == np.max(S[(j - h):(j + h + 1)]): cpt = np.hstack((cpt, j)) cpt_new=np.hstack((cpt,(n-1))); taumat, U = dynProg(cpt_new, len(cpt_new),X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter) temp = np.array(U) + (np.array(range(len(U)))) * pow(n,2.0/3) #MSIC[Iter]=np.min(temp) smindex = int(np.where(temp == np.min(temp))[0][0])-1 Selected_Index=cpt_new[taumat[smindex,range(smindex+1)].astype(int)] print(len(Selected_Index)) if len(Selected_Index)==0: #Selected_Index=np.array[(0,(n-1))] #cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) #Ymean=np.mean(Y,axis=0) theta_all, X_all, Y_all, rho_all = prsm(X, Y, pnew, Y.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error = Y_test - np.matmul(X_test, theta_all) MSE_test[jj]=MSE_test[jj]+sum(pow(error,2)) else: for i in range(len(Selected_Index)+1): left=np.hstack((0,Selected_Index)) right=np.hstack((Selected_Index,nall)) if (test_sample[kk] in Interval(left[i],right[i]))==True: Yinter=Y[left[i]:right[i],:] Xinter=X[left[i]:right[i],:,:] theta_interval, X_interval, Y_interval, rho_interval = prsm(Xinter, Yinter, pnew, Yinter.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error = Y_test - np.matmul(X_test, theta_interval) MSE_test[jj]=MSE_test[jj]+sum(pow(error,2)) #Ymean=np.mean(Y[left[i]:right[i],:],axis=0) #MSE_test[jj]=MSE_test[jj]+sum(pow((Y_test-Ymean),2)) print(MSE_test) position=np.where(MSE_test==np.min(MSE_test))[0] position_selected=position[len(position)-1] h_selected[Iter]=hlist[position_selected] X=Xall Y=Yall hh=int(h_selected[Iter]) pnew = X.shape[2] n= X.shape[0] S = np.zeros(n) Xini = X.reshape(n * m, pnew, order = 'F') Yini = Y.reshape(n * m, 1, order = 'F') XXinv = np.matmul(Xini.transpose(), Xini) XXinv = np.linalg.pinv(XXinv) thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0] rhoini = np.zeros(pnew) lambdap = np.sqrt((m + pnew/m)/hh)* m**(-1) * 0.3 rhoini[:] = lambdap maxiter = 1000 epstol = 1e-3 alpha = 0.9 beta = 1 for j in range(hh+1, n - hh): pX1 = X[(j - hh):(j + 1), :, :] pY1 = Y[(j - hh):(j + 1), :] pX2 = X[(j + 1): (j + hh + 1), :, :] pY2 = Y[(j + 1): (j + hh + 1), :] pX = X[(j - hh): (j + hh + 1), :, :] pY = Y[(j - hh): (j + hh + 1), :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) thetar, pX2, pY2, rhor = prsm(pX2, pY2, pnew, pY2.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) theta, pX, pY, rho = prsm(pX, pY, pnew, pY.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) error2 = pY2[:, 0] - np.matmul(pX2, thetar) error = pY[:, 0] - np.matmul(pX, theta) ssrl = np.matmul(error1.transpose(), error1)/(hh * m) ssrr = np.matmul(error2.transpose(), error2)/(hh * m) ssr = np.matmul(error.transpose(), error)/(hh * m) S[j] = ssr - ssrl - ssrr # print(j) cpt = np.array([0]) for j in range (hh + 1, n-hh): if S[j] == np.max(S[(j - hh):(j + hh + 1)]): cpt = np.hstack((cpt, j)) cpt_new=np.hstack((cpt,(n-1))); taumat, U = dynProg(cpt_new, len(cpt_new),X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter) temp = np.array(U) + (np.array(range(len(U)))) * pow(n,2.0/3) MSIC[Iter]=np.min(temp) smindex = int(np.where(temp == np.min(temp))[0][0])-1 Selected_Index=cpt_new[taumat[smindex,range(smindex+1)].astype(int)] print(len(Selected_Index)) if len(Selected_Index)==0: Selected_Index=np.array[(0,(n-1))] cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) else: cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) print(cpt_dist_max) print(sel_dist_max) cpt_num[Iter]=len(Selected_Index) d_under[Iter]=sel_dist_max d_over[Iter]=cpt_dist_max np.save('MULmeancpt_num1',cpt_num) np.save('MULmeand_under1',d_under) np.save('MULmeand_over1',d_over)
omega[i, j] = 0.5**(np.abs(i - j))
conditional_block
meanS1.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue Dec 3 17:41:11 2019 @author: catherine """ import numpy as np #from sklearn.model_selection import train_test_split import random from interval import Interval #m is the number of anchors #p is the dimension of every X_ti #Theta is a low rank matrix of size m by p #R is the rank of Theta #omega is the covariance matrix of X #T is the sample size #signal is the coefficient for error term #mu is the mean for error term #sigma is the variance for error term def simu(mu1,m,p,R,omega,T,signal,mu,sigma):
def prsm(pX, pY, pnew, nh, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, diag): pX = pX.reshape(nh * m, pnew, order = 'F') pY = pY.reshape(nh * m, 1, order = 'F') thetay = thetaini rho = rhoini itr = 0 diff = 1 while itr <= maxiter and diff > epstol: Xinverse = np.linalg.inv((2 * np.matmul(pX.transpose(), pX)/(nh * m) + beta * np.eye(pnew))) XY = beta * thetay + rho + (2 * np.matmul(pX.transpose(), pY)/(nh * m))[:, 0] thetax = np.matmul(Xinverse, XY) rhohalf = rho - alpha * beta * (thetax - thetay) mtheta = (thetax - rhohalf).reshape(m, int(pnew/m), order = 'F') u, s, v= np.linalg.svd(mtheta, full_matrices=False) snew = np.clip(s - lambdap/beta, 0, None) thetay = np.reshape(np.dot(u, np.dot(np.diag(snew), v)), pnew, order = 'F') rho = rhohalf - alpha * beta * (thetax - thetay) diff = np.sum(np.abs(thetax - thetay)) itr = itr + 1 #if(diag == True): # print(diff) return thetay, pX, pY, rho #cancpt is the candicate of change points #Kmax is the maximum number of segmentations #U is the optimal information criterion for different number of change points without penalty term #taumat is the location of change points def dynProg(cancpt, Kmax, X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter): Nr = Kmax - 2 npt = cancpt.shape[0] V=np.zeros((npt, npt)) V[:]=np.nan for j1 in range(npt): for j2 in range((j1+1), npt): if (j1==0) or (j1==(npt-1)): start = cancpt[j1] else: start = cancpt[j1]+1 end = cancpt[j2]+1 pX1 = X[start:end, :, :] pY1 = Y[start:end, :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) ssrl = np.matmul(error1.transpose(), error1) V[j1, j2] = ssrl Vt=V[0:(npt-1),1:npt] nw=Vt.shape[0] U = []#np.zeros(Kmax) U.append(Vt[0,nw-1]) D = Vt[:,nw-1].copy() Pos = np.zeros((nw, Nr)) Pos[:] = np.nan Pos[nw-1,:] = nw taumat = np.zeros((Nr, Nr)) taumat[:] = np.nan for k in range(Nr): for j in range(nw-1): dist = Vt[j,j:(nw-1)] + D[(j+1):nw] D[j] = np.min(dist) Pos[j,0] = int(np.where(dist == np.min(dist))[0][0] + j) +1 if k > 0: Pos[j,1:(k+1)] = Pos[int(Pos[j,0]),range(k)] U.append(D[0]) taumat[k,range(k+1)] = Pos[0,range(k + 1)] return taumat, U def evaluate(Selected_Index,true_cpt_pos): cpt_dist=np.zeros([len(true_cpt_pos)]) sel_dist=np.zeros([len(Selected_Index)]) for i in range(len(true_cpt_pos)): dist=abs(Selected_Index-true_cpt_pos[i]) cpt_dist[i]=np.min(dist) cpt_dist_max=np.max(cpt_dist) for j in range(len(Selected_Index)): dist=abs(Selected_Index[j]-true_cpt_pos) sel_dist[j]=np.min(dist) sel_dist_max=np.max(sel_dist) return cpt_dist_max,sel_dist_max ###Simulation Start Itermax=10 m=8 p=8 signal=0.5 R1=3 omega = np.zeros((p, p)) for j in range(0, p): for i in range(0, p): omega[i, j] = 0.5**(np.abs(i - j)) T1=100 T2=75 T3=150 T4=75 T5=100 nall=T1+T2+T3+T4+T5 true_cpt_pos=np.array([T1-1,T1+T2-1,T1+T2+T3-1,T1+T2+T3+T4-1]) cpt_num=np.zeros([Itermax]) d_under=np.zeros([Itermax]) d_over=np.zeros([Itermax]) MSIC=np.zeros([Itermax]) h_selected=np.zeros([Itermax]) np.random.seed(2019) test_number=5 for Iter in range(Itermax): print(Iter) X1,Y1=simu(-1,m,p,R1,omega,T1,signal,0,1) X2,Y2=simu(0,m,p,R1,omega,T2,signal,0,1) X3,Y3=simu(1,m,p,R1,omega,T3,signal,0,1) X4,Y4=simu(-1,m,p,R1,omega,T4,signal,0,1) X5,Y5=simu(0,m,p,R1,omega,T5,signal,0,1) Xall=np.vstack((X1, X2,X3,X4,X5)) Yall=np.vstack((Y1, Y2,Y3,Y4,Y5)) hlist=range(16*int((m/np.log(nall))**(0.5)),48*int((m/np.log(nall))**(0.5)),4*int((m/np.log(nall))**(0.5))) hn=len(hlist) MSE_test=np.zeros([hn]) test_sample=random.sample(range(0,nall),test_number) for kk in range(test_number): #X,X_test,Y,Y_test=train_test_split(Xall,Yall,test_size=0.002,random_state=0) X=np.delete(Xall,test_sample[kk],axis=0) Y=np.delete(Yall,test_sample[kk],axis=0) X_test=Xall[test_sample[kk],:,:] Y_test=Yall[test_sample[kk],:] for jj in range(hn): h=int(hlist[jj]) print(h) pnew = X.shape[2] n= X.shape[0] S = np.zeros(n) Xini = X.reshape(n * m, pnew, order = 'F') Yini = Y.reshape(n * m, 1, order = 'F') XXinv = np.matmul(Xini.transpose(), Xini) XXinv = np.linalg.pinv(XXinv) thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0] rhoini = np.zeros(pnew) lambdap = np.sqrt((m + pnew/m)/h)* m**(-1) * 0.3 rhoini[:] = lambdap maxiter = 1000 epstol = 1e-3 alpha = 0.9 beta = 1 for j in range(h+1, n - h): pX1 = X[(j - h):(j + 1), :, :] pY1 = Y[(j - h):(j + 1), :] pX2 = X[(j + 1): (j + h + 1), :, :] pY2 = Y[(j + 1): (j + h + 1), :] pX = X[(j - h): (j + h + 1), :, :] pY = Y[(j - h): (j + h + 1), :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) thetar, pX2, pY2, rhor = prsm(pX2, pY2, pnew, pY2.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) theta, pX, pY, rho = prsm(pX, pY, pnew, pY.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) error2 = pY2[:, 0] - np.matmul(pX2, thetar) error = pY[:, 0] - np.matmul(pX, theta) ssrl = np.matmul(error1.transpose(), error1)/(h * m) ssrr = np.matmul(error2.transpose(), error2)/(h * m) ssr = np.matmul(error.transpose(), error)/(h * m) S[j] = ssr - ssrl - ssrr # print(j) cpt = np.array([0]) for j in range (h + 1, n-h): if S[j] == np.max(S[(j - h):(j + h + 1)]): cpt = np.hstack((cpt, j)) cpt_new=np.hstack((cpt,(n-1))); taumat, U = dynProg(cpt_new, len(cpt_new),X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter) temp = np.array(U) + (np.array(range(len(U)))) * pow(n,2.0/3) #MSIC[Iter]=np.min(temp) smindex = int(np.where(temp == np.min(temp))[0][0])-1 Selected_Index=cpt_new[taumat[smindex,range(smindex+1)].astype(int)] print(len(Selected_Index)) if len(Selected_Index)==0: #Selected_Index=np.array[(0,(n-1))] #cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) #Ymean=np.mean(Y,axis=0) theta_all, X_all, Y_all, rho_all = prsm(X, Y, pnew, Y.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error = Y_test - np.matmul(X_test, theta_all) MSE_test[jj]=MSE_test[jj]+sum(pow(error,2)) else: for i in range(len(Selected_Index)+1): left=np.hstack((0,Selected_Index)) right=np.hstack((Selected_Index,nall)) if (test_sample[kk] in Interval(left[i],right[i]))==True: Yinter=Y[left[i]:right[i],:] Xinter=X[left[i]:right[i],:,:] theta_interval, X_interval, Y_interval, rho_interval = prsm(Xinter, Yinter, pnew, Yinter.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error = Y_test - np.matmul(X_test, theta_interval) MSE_test[jj]=MSE_test[jj]+sum(pow(error,2)) #Ymean=np.mean(Y[left[i]:right[i],:],axis=0) #MSE_test[jj]=MSE_test[jj]+sum(pow((Y_test-Ymean),2)) print(MSE_test) position=np.where(MSE_test==np.min(MSE_test))[0] position_selected=position[len(position)-1] h_selected[Iter]=hlist[position_selected] X=Xall Y=Yall hh=int(h_selected[Iter]) pnew = X.shape[2] n= X.shape[0] S = np.zeros(n) Xini = X.reshape(n * m, pnew, order = 'F') Yini = Y.reshape(n * m, 1, order = 'F') XXinv = np.matmul(Xini.transpose(), Xini) XXinv = np.linalg.pinv(XXinv) thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0] rhoini = np.zeros(pnew) lambdap = np.sqrt((m + pnew/m)/hh)* m**(-1) * 0.3 rhoini[:] = lambdap maxiter = 1000 epstol = 1e-3 alpha = 0.9 beta = 1 for j in range(hh+1, n - hh): pX1 = X[(j - hh):(j + 1), :, :] pY1 = Y[(j - hh):(j + 1), :] pX2 = X[(j + 1): (j + hh + 1), :, :] pY2 = Y[(j + 1): (j + hh + 1), :] pX = X[(j - hh): (j + hh + 1), :, :] pY = Y[(j - hh): (j + hh + 1), :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) thetar, pX2, pY2, rhor = prsm(pX2, pY2, pnew, pY2.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) theta, pX, pY, rho = prsm(pX, pY, pnew, pY.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) error2 = pY2[:, 0] - np.matmul(pX2, thetar) error = pY[:, 0] - np.matmul(pX, theta) ssrl = np.matmul(error1.transpose(), error1)/(hh * m) ssrr = np.matmul(error2.transpose(), error2)/(hh * m) ssr = np.matmul(error.transpose(), error)/(hh * m) S[j] = ssr - ssrl - ssrr # print(j) cpt = np.array([0]) for j in range (hh + 1, n-hh): if S[j] == np.max(S[(j - hh):(j + hh + 1)]): cpt = np.hstack((cpt, j)) cpt_new=np.hstack((cpt,(n-1))); taumat, U = dynProg(cpt_new, len(cpt_new),X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter) temp = np.array(U) + (np.array(range(len(U)))) * pow(n,2.0/3) MSIC[Iter]=np.min(temp) smindex = int(np.where(temp == np.min(temp))[0][0])-1 Selected_Index=cpt_new[taumat[smindex,range(smindex+1)].astype(int)] print(len(Selected_Index)) if len(Selected_Index)==0: Selected_Index=np.array[(0,(n-1))] cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) else: cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) print(cpt_dist_max) print(sel_dist_max) cpt_num[Iter]=len(Selected_Index) d_under[Iter]=sel_dist_max d_over[Iter]=cpt_dist_max np.save('MULmeancpt_num1',cpt_num) np.save('MULmeand_under1',d_under) np.save('MULmeand_over1',d_over)
Theta = np.random.normal(mu1,sigma,size=(m,p)) u, s, v= np.linalg.svd(Theta, full_matrices = False) s[R:] = 0 smat = np.diag(s) Theta = np.dot(u, np.dot(smat, v)) X= np.random.multivariate_normal(np.zeros(p), omega, m*T) #X= np.random.multivariate_normal(np.zeros(m * p), omega, T) X= np.reshape(X,[T,m,p]) Xnew = np.zeros([T, m, m *p]) for j in range(T): for i in range(m): em = np.zeros(m) em[i] = 1 Xnew[j, i, :] = np.kron(X[j,i,:], np.transpose(em)) # Xnew is n, m, m*p E=np.random.normal(mu,sigma,size=(T,m)) Y=np.zeros([T,m]) for j in range(T): Y[j,:]=np.sum(np.multiply(Theta,X[j,:,:]),axis=1)+signal*E[j,:] return Xnew, Y
identifier_body
meanS1.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue Dec 3 17:41:11 2019 @author: catherine """ import numpy as np #from sklearn.model_selection import train_test_split import random from interval import Interval #m is the number of anchors #p is the dimension of every X_ti #Theta is a low rank matrix of size m by p #R is the rank of Theta #omega is the covariance matrix of X #T is the sample size #signal is the coefficient for error term #mu is the mean for error term #sigma is the variance for error term def
(mu1,m,p,R,omega,T,signal,mu,sigma): Theta = np.random.normal(mu1,sigma,size=(m,p)) u, s, v= np.linalg.svd(Theta, full_matrices = False) s[R:] = 0 smat = np.diag(s) Theta = np.dot(u, np.dot(smat, v)) X= np.random.multivariate_normal(np.zeros(p), omega, m*T) #X= np.random.multivariate_normal(np.zeros(m * p), omega, T) X= np.reshape(X,[T,m,p]) Xnew = np.zeros([T, m, m *p]) for j in range(T): for i in range(m): em = np.zeros(m) em[i] = 1 Xnew[j, i, :] = np.kron(X[j,i,:], np.transpose(em)) # Xnew is n, m, m*p E=np.random.normal(mu,sigma,size=(T,m)) Y=np.zeros([T,m]) for j in range(T): Y[j,:]=np.sum(np.multiply(Theta,X[j,:,:]),axis=1)+signal*E[j,:] return Xnew, Y def prsm(pX, pY, pnew, nh, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, diag): pX = pX.reshape(nh * m, pnew, order = 'F') pY = pY.reshape(nh * m, 1, order = 'F') thetay = thetaini rho = rhoini itr = 0 diff = 1 while itr <= maxiter and diff > epstol: Xinverse = np.linalg.inv((2 * np.matmul(pX.transpose(), pX)/(nh * m) + beta * np.eye(pnew))) XY = beta * thetay + rho + (2 * np.matmul(pX.transpose(), pY)/(nh * m))[:, 0] thetax = np.matmul(Xinverse, XY) rhohalf = rho - alpha * beta * (thetax - thetay) mtheta = (thetax - rhohalf).reshape(m, int(pnew/m), order = 'F') u, s, v= np.linalg.svd(mtheta, full_matrices=False) snew = np.clip(s - lambdap/beta, 0, None) thetay = np.reshape(np.dot(u, np.dot(np.diag(snew), v)), pnew, order = 'F') rho = rhohalf - alpha * beta * (thetax - thetay) diff = np.sum(np.abs(thetax - thetay)) itr = itr + 1 #if(diag == True): # print(diff) return thetay, pX, pY, rho #cancpt is the candicate of change points #Kmax is the maximum number of segmentations #U is the optimal information criterion for different number of change points without penalty term #taumat is the location of change points def dynProg(cancpt, Kmax, X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter): Nr = Kmax - 2 npt = cancpt.shape[0] V=np.zeros((npt, npt)) V[:]=np.nan for j1 in range(npt): for j2 in range((j1+1), npt): if (j1==0) or (j1==(npt-1)): start = cancpt[j1] else: start = cancpt[j1]+1 end = cancpt[j2]+1 pX1 = X[start:end, :, :] pY1 = Y[start:end, :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) ssrl = np.matmul(error1.transpose(), error1) V[j1, j2] = ssrl Vt=V[0:(npt-1),1:npt] nw=Vt.shape[0] U = []#np.zeros(Kmax) U.append(Vt[0,nw-1]) D = Vt[:,nw-1].copy() Pos = np.zeros((nw, Nr)) Pos[:] = np.nan Pos[nw-1,:] = nw taumat = np.zeros((Nr, Nr)) taumat[:] = np.nan for k in range(Nr): for j in range(nw-1): dist = Vt[j,j:(nw-1)] + D[(j+1):nw] D[j] = np.min(dist) Pos[j,0] = int(np.where(dist == np.min(dist))[0][0] + j) +1 if k > 0: Pos[j,1:(k+1)] = Pos[int(Pos[j,0]),range(k)] U.append(D[0]) taumat[k,range(k+1)] = Pos[0,range(k + 1)] return taumat, U def evaluate(Selected_Index,true_cpt_pos): cpt_dist=np.zeros([len(true_cpt_pos)]) sel_dist=np.zeros([len(Selected_Index)]) for i in range(len(true_cpt_pos)): dist=abs(Selected_Index-true_cpt_pos[i]) cpt_dist[i]=np.min(dist) cpt_dist_max=np.max(cpt_dist) for j in range(len(Selected_Index)): dist=abs(Selected_Index[j]-true_cpt_pos) sel_dist[j]=np.min(dist) sel_dist_max=np.max(sel_dist) return cpt_dist_max,sel_dist_max ###Simulation Start Itermax=10 m=8 p=8 signal=0.5 R1=3 omega = np.zeros((p, p)) for j in range(0, p): for i in range(0, p): omega[i, j] = 0.5**(np.abs(i - j)) T1=100 T2=75 T3=150 T4=75 T5=100 nall=T1+T2+T3+T4+T5 true_cpt_pos=np.array([T1-1,T1+T2-1,T1+T2+T3-1,T1+T2+T3+T4-1]) cpt_num=np.zeros([Itermax]) d_under=np.zeros([Itermax]) d_over=np.zeros([Itermax]) MSIC=np.zeros([Itermax]) h_selected=np.zeros([Itermax]) np.random.seed(2019) test_number=5 for Iter in range(Itermax): print(Iter) X1,Y1=simu(-1,m,p,R1,omega,T1,signal,0,1) X2,Y2=simu(0,m,p,R1,omega,T2,signal,0,1) X3,Y3=simu(1,m,p,R1,omega,T3,signal,0,1) X4,Y4=simu(-1,m,p,R1,omega,T4,signal,0,1) X5,Y5=simu(0,m,p,R1,omega,T5,signal,0,1) Xall=np.vstack((X1, X2,X3,X4,X5)) Yall=np.vstack((Y1, Y2,Y3,Y4,Y5)) hlist=range(16*int((m/np.log(nall))**(0.5)),48*int((m/np.log(nall))**(0.5)),4*int((m/np.log(nall))**(0.5))) hn=len(hlist) MSE_test=np.zeros([hn]) test_sample=random.sample(range(0,nall),test_number) for kk in range(test_number): #X,X_test,Y,Y_test=train_test_split(Xall,Yall,test_size=0.002,random_state=0) X=np.delete(Xall,test_sample[kk],axis=0) Y=np.delete(Yall,test_sample[kk],axis=0) X_test=Xall[test_sample[kk],:,:] Y_test=Yall[test_sample[kk],:] for jj in range(hn): h=int(hlist[jj]) print(h) pnew = X.shape[2] n= X.shape[0] S = np.zeros(n) Xini = X.reshape(n * m, pnew, order = 'F') Yini = Y.reshape(n * m, 1, order = 'F') XXinv = np.matmul(Xini.transpose(), Xini) XXinv = np.linalg.pinv(XXinv) thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0] rhoini = np.zeros(pnew) lambdap = np.sqrt((m + pnew/m)/h)* m**(-1) * 0.3 rhoini[:] = lambdap maxiter = 1000 epstol = 1e-3 alpha = 0.9 beta = 1 for j in range(h+1, n - h): pX1 = X[(j - h):(j + 1), :, :] pY1 = Y[(j - h):(j + 1), :] pX2 = X[(j + 1): (j + h + 1), :, :] pY2 = Y[(j + 1): (j + h + 1), :] pX = X[(j - h): (j + h + 1), :, :] pY = Y[(j - h): (j + h + 1), :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) thetar, pX2, pY2, rhor = prsm(pX2, pY2, pnew, pY2.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) theta, pX, pY, rho = prsm(pX, pY, pnew, pY.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) error2 = pY2[:, 0] - np.matmul(pX2, thetar) error = pY[:, 0] - np.matmul(pX, theta) ssrl = np.matmul(error1.transpose(), error1)/(h * m) ssrr = np.matmul(error2.transpose(), error2)/(h * m) ssr = np.matmul(error.transpose(), error)/(h * m) S[j] = ssr - ssrl - ssrr # print(j) cpt = np.array([0]) for j in range (h + 1, n-h): if S[j] == np.max(S[(j - h):(j + h + 1)]): cpt = np.hstack((cpt, j)) cpt_new=np.hstack((cpt,(n-1))); taumat, U = dynProg(cpt_new, len(cpt_new),X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter) temp = np.array(U) + (np.array(range(len(U)))) * pow(n,2.0/3) #MSIC[Iter]=np.min(temp) smindex = int(np.where(temp == np.min(temp))[0][0])-1 Selected_Index=cpt_new[taumat[smindex,range(smindex+1)].astype(int)] print(len(Selected_Index)) if len(Selected_Index)==0: #Selected_Index=np.array[(0,(n-1))] #cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) #Ymean=np.mean(Y,axis=0) theta_all, X_all, Y_all, rho_all = prsm(X, Y, pnew, Y.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error = Y_test - np.matmul(X_test, theta_all) MSE_test[jj]=MSE_test[jj]+sum(pow(error,2)) else: for i in range(len(Selected_Index)+1): left=np.hstack((0,Selected_Index)) right=np.hstack((Selected_Index,nall)) if (test_sample[kk] in Interval(left[i],right[i]))==True: Yinter=Y[left[i]:right[i],:] Xinter=X[left[i]:right[i],:,:] theta_interval, X_interval, Y_interval, rho_interval = prsm(Xinter, Yinter, pnew, Yinter.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error = Y_test - np.matmul(X_test, theta_interval) MSE_test[jj]=MSE_test[jj]+sum(pow(error,2)) #Ymean=np.mean(Y[left[i]:right[i],:],axis=0) #MSE_test[jj]=MSE_test[jj]+sum(pow((Y_test-Ymean),2)) print(MSE_test) position=np.where(MSE_test==np.min(MSE_test))[0] position_selected=position[len(position)-1] h_selected[Iter]=hlist[position_selected] X=Xall Y=Yall hh=int(h_selected[Iter]) pnew = X.shape[2] n= X.shape[0] S = np.zeros(n) Xini = X.reshape(n * m, pnew, order = 'F') Yini = Y.reshape(n * m, 1, order = 'F') XXinv = np.matmul(Xini.transpose(), Xini) XXinv = np.linalg.pinv(XXinv) thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0] rhoini = np.zeros(pnew) lambdap = np.sqrt((m + pnew/m)/hh)* m**(-1) * 0.3 rhoini[:] = lambdap maxiter = 1000 epstol = 1e-3 alpha = 0.9 beta = 1 for j in range(hh+1, n - hh): pX1 = X[(j - hh):(j + 1), :, :] pY1 = Y[(j - hh):(j + 1), :] pX2 = X[(j + 1): (j + hh + 1), :, :] pY2 = Y[(j + 1): (j + hh + 1), :] pX = X[(j - hh): (j + hh + 1), :, :] pY = Y[(j - hh): (j + hh + 1), :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) thetar, pX2, pY2, rhor = prsm(pX2, pY2, pnew, pY2.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) theta, pX, pY, rho = prsm(pX, pY, pnew, pY.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) error2 = pY2[:, 0] - np.matmul(pX2, thetar) error = pY[:, 0] - np.matmul(pX, theta) ssrl = np.matmul(error1.transpose(), error1)/(hh * m) ssrr = np.matmul(error2.transpose(), error2)/(hh * m) ssr = np.matmul(error.transpose(), error)/(hh * m) S[j] = ssr - ssrl - ssrr # print(j) cpt = np.array([0]) for j in range (hh + 1, n-hh): if S[j] == np.max(S[(j - hh):(j + hh + 1)]): cpt = np.hstack((cpt, j)) cpt_new=np.hstack((cpt,(n-1))); taumat, U = dynProg(cpt_new, len(cpt_new),X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter) temp = np.array(U) + (np.array(range(len(U)))) * pow(n,2.0/3) MSIC[Iter]=np.min(temp) smindex = int(np.where(temp == np.min(temp))[0][0])-1 Selected_Index=cpt_new[taumat[smindex,range(smindex+1)].astype(int)] print(len(Selected_Index)) if len(Selected_Index)==0: Selected_Index=np.array[(0,(n-1))] cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) else: cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) print(cpt_dist_max) print(sel_dist_max) cpt_num[Iter]=len(Selected_Index) d_under[Iter]=sel_dist_max d_over[Iter]=cpt_dist_max np.save('MULmeancpt_num1',cpt_num) np.save('MULmeand_under1',d_under) np.save('MULmeand_over1',d_over)
simu
identifier_name
meanS1.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue Dec 3 17:41:11 2019 @author: catherine """ import numpy as np #from sklearn.model_selection import train_test_split import random from interval import Interval #m is the number of anchors #p is the dimension of every X_ti #Theta is a low rank matrix of size m by p #R is the rank of Theta #omega is the covariance matrix of X #T is the sample size #signal is the coefficient for error term #mu is the mean for error term #sigma is the variance for error term def simu(mu1,m,p,R,omega,T,signal,mu,sigma): Theta = np.random.normal(mu1,sigma,size=(m,p)) u, s, v= np.linalg.svd(Theta, full_matrices = False) s[R:] = 0 smat = np.diag(s) Theta = np.dot(u, np.dot(smat, v)) X= np.random.multivariate_normal(np.zeros(p), omega, m*T) #X= np.random.multivariate_normal(np.zeros(m * p), omega, T) X= np.reshape(X,[T,m,p]) Xnew = np.zeros([T, m, m *p]) for j in range(T): for i in range(m): em = np.zeros(m) em[i] = 1 Xnew[j, i, :] = np.kron(X[j,i,:], np.transpose(em)) # Xnew is n, m, m*p E=np.random.normal(mu,sigma,size=(T,m)) Y=np.zeros([T,m]) for j in range(T): Y[j,:]=np.sum(np.multiply(Theta,X[j,:,:]),axis=1)+signal*E[j,:] return Xnew, Y def prsm(pX, pY, pnew, nh, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, diag): pX = pX.reshape(nh * m, pnew, order = 'F') pY = pY.reshape(nh * m, 1, order = 'F') thetay = thetaini rho = rhoini itr = 0 diff = 1 while itr <= maxiter and diff > epstol: Xinverse = np.linalg.inv((2 * np.matmul(pX.transpose(), pX)/(nh * m) + beta * np.eye(pnew))) XY = beta * thetay + rho + (2 * np.matmul(pX.transpose(), pY)/(nh * m))[:, 0] thetax = np.matmul(Xinverse, XY) rhohalf = rho - alpha * beta * (thetax - thetay) mtheta = (thetax - rhohalf).reshape(m, int(pnew/m), order = 'F') u, s, v= np.linalg.svd(mtheta, full_matrices=False) snew = np.clip(s - lambdap/beta, 0, None) thetay = np.reshape(np.dot(u, np.dot(np.diag(snew), v)), pnew, order = 'F') rho = rhohalf - alpha * beta * (thetax - thetay) diff = np.sum(np.abs(thetax - thetay)) itr = itr + 1 #if(diag == True): # print(diff) return thetay, pX, pY, rho #cancpt is the candicate of change points #Kmax is the maximum number of segmentations #U is the optimal information criterion for different number of change points without penalty term #taumat is the location of change points def dynProg(cancpt, Kmax, X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter): Nr = Kmax - 2 npt = cancpt.shape[0] V=np.zeros((npt, npt)) V[:]=np.nan for j1 in range(npt): for j2 in range((j1+1), npt): if (j1==0) or (j1==(npt-1)): start = cancpt[j1] else: start = cancpt[j1]+1 end = cancpt[j2]+1 pX1 = X[start:end, :, :] pY1 = Y[start:end, :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) ssrl = np.matmul(error1.transpose(), error1) V[j1, j2] = ssrl Vt=V[0:(npt-1),1:npt] nw=Vt.shape[0] U = []#np.zeros(Kmax) U.append(Vt[0,nw-1]) D = Vt[:,nw-1].copy() Pos = np.zeros((nw, Nr)) Pos[:] = np.nan Pos[nw-1,:] = nw taumat = np.zeros((Nr, Nr))
for k in range(Nr): for j in range(nw-1): dist = Vt[j,j:(nw-1)] + D[(j+1):nw] D[j] = np.min(dist) Pos[j,0] = int(np.where(dist == np.min(dist))[0][0] + j) +1 if k > 0: Pos[j,1:(k+1)] = Pos[int(Pos[j,0]),range(k)] U.append(D[0]) taumat[k,range(k+1)] = Pos[0,range(k + 1)] return taumat, U def evaluate(Selected_Index,true_cpt_pos): cpt_dist=np.zeros([len(true_cpt_pos)]) sel_dist=np.zeros([len(Selected_Index)]) for i in range(len(true_cpt_pos)): dist=abs(Selected_Index-true_cpt_pos[i]) cpt_dist[i]=np.min(dist) cpt_dist_max=np.max(cpt_dist) for j in range(len(Selected_Index)): dist=abs(Selected_Index[j]-true_cpt_pos) sel_dist[j]=np.min(dist) sel_dist_max=np.max(sel_dist) return cpt_dist_max,sel_dist_max ###Simulation Start Itermax=10 m=8 p=8 signal=0.5 R1=3 omega = np.zeros((p, p)) for j in range(0, p): for i in range(0, p): omega[i, j] = 0.5**(np.abs(i - j)) T1=100 T2=75 T3=150 T4=75 T5=100 nall=T1+T2+T3+T4+T5 true_cpt_pos=np.array([T1-1,T1+T2-1,T1+T2+T3-1,T1+T2+T3+T4-1]) cpt_num=np.zeros([Itermax]) d_under=np.zeros([Itermax]) d_over=np.zeros([Itermax]) MSIC=np.zeros([Itermax]) h_selected=np.zeros([Itermax]) np.random.seed(2019) test_number=5 for Iter in range(Itermax): print(Iter) X1,Y1=simu(-1,m,p,R1,omega,T1,signal,0,1) X2,Y2=simu(0,m,p,R1,omega,T2,signal,0,1) X3,Y3=simu(1,m,p,R1,omega,T3,signal,0,1) X4,Y4=simu(-1,m,p,R1,omega,T4,signal,0,1) X5,Y5=simu(0,m,p,R1,omega,T5,signal,0,1) Xall=np.vstack((X1, X2,X3,X4,X5)) Yall=np.vstack((Y1, Y2,Y3,Y4,Y5)) hlist=range(16*int((m/np.log(nall))**(0.5)),48*int((m/np.log(nall))**(0.5)),4*int((m/np.log(nall))**(0.5))) hn=len(hlist) MSE_test=np.zeros([hn]) test_sample=random.sample(range(0,nall),test_number) for kk in range(test_number): #X,X_test,Y,Y_test=train_test_split(Xall,Yall,test_size=0.002,random_state=0) X=np.delete(Xall,test_sample[kk],axis=0) Y=np.delete(Yall,test_sample[kk],axis=0) X_test=Xall[test_sample[kk],:,:] Y_test=Yall[test_sample[kk],:] for jj in range(hn): h=int(hlist[jj]) print(h) pnew = X.shape[2] n= X.shape[0] S = np.zeros(n) Xini = X.reshape(n * m, pnew, order = 'F') Yini = Y.reshape(n * m, 1, order = 'F') XXinv = np.matmul(Xini.transpose(), Xini) XXinv = np.linalg.pinv(XXinv) thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0] rhoini = np.zeros(pnew) lambdap = np.sqrt((m + pnew/m)/h)* m**(-1) * 0.3 rhoini[:] = lambdap maxiter = 1000 epstol = 1e-3 alpha = 0.9 beta = 1 for j in range(h+1, n - h): pX1 = X[(j - h):(j + 1), :, :] pY1 = Y[(j - h):(j + 1), :] pX2 = X[(j + 1): (j + h + 1), :, :] pY2 = Y[(j + 1): (j + h + 1), :] pX = X[(j - h): (j + h + 1), :, :] pY = Y[(j - h): (j + h + 1), :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) thetar, pX2, pY2, rhor = prsm(pX2, pY2, pnew, pY2.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) theta, pX, pY, rho = prsm(pX, pY, pnew, pY.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) error2 = pY2[:, 0] - np.matmul(pX2, thetar) error = pY[:, 0] - np.matmul(pX, theta) ssrl = np.matmul(error1.transpose(), error1)/(h * m) ssrr = np.matmul(error2.transpose(), error2)/(h * m) ssr = np.matmul(error.transpose(), error)/(h * m) S[j] = ssr - ssrl - ssrr # print(j) cpt = np.array([0]) for j in range (h + 1, n-h): if S[j] == np.max(S[(j - h):(j + h + 1)]): cpt = np.hstack((cpt, j)) cpt_new=np.hstack((cpt,(n-1))); taumat, U = dynProg(cpt_new, len(cpt_new),X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter) temp = np.array(U) + (np.array(range(len(U)))) * pow(n,2.0/3) #MSIC[Iter]=np.min(temp) smindex = int(np.where(temp == np.min(temp))[0][0])-1 Selected_Index=cpt_new[taumat[smindex,range(smindex+1)].astype(int)] print(len(Selected_Index)) if len(Selected_Index)==0: #Selected_Index=np.array[(0,(n-1))] #cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) #Ymean=np.mean(Y,axis=0) theta_all, X_all, Y_all, rho_all = prsm(X, Y, pnew, Y.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error = Y_test - np.matmul(X_test, theta_all) MSE_test[jj]=MSE_test[jj]+sum(pow(error,2)) else: for i in range(len(Selected_Index)+1): left=np.hstack((0,Selected_Index)) right=np.hstack((Selected_Index,nall)) if (test_sample[kk] in Interval(left[i],right[i]))==True: Yinter=Y[left[i]:right[i],:] Xinter=X[left[i]:right[i],:,:] theta_interval, X_interval, Y_interval, rho_interval = prsm(Xinter, Yinter, pnew, Yinter.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error = Y_test - np.matmul(X_test, theta_interval) MSE_test[jj]=MSE_test[jj]+sum(pow(error,2)) #Ymean=np.mean(Y[left[i]:right[i],:],axis=0) #MSE_test[jj]=MSE_test[jj]+sum(pow((Y_test-Ymean),2)) print(MSE_test) position=np.where(MSE_test==np.min(MSE_test))[0] position_selected=position[len(position)-1] h_selected[Iter]=hlist[position_selected] X=Xall Y=Yall hh=int(h_selected[Iter]) pnew = X.shape[2] n= X.shape[0] S = np.zeros(n) Xini = X.reshape(n * m, pnew, order = 'F') Yini = Y.reshape(n * m, 1, order = 'F') XXinv = np.matmul(Xini.transpose(), Xini) XXinv = np.linalg.pinv(XXinv) thetaini = np.matmul(XXinv, np.matmul(Xini.transpose(), Yini))[:, 0] rhoini = np.zeros(pnew) lambdap = np.sqrt((m + pnew/m)/hh)* m**(-1) * 0.3 rhoini[:] = lambdap maxiter = 1000 epstol = 1e-3 alpha = 0.9 beta = 1 for j in range(hh+1, n - hh): pX1 = X[(j - hh):(j + 1), :, :] pY1 = Y[(j - hh):(j + 1), :] pX2 = X[(j + 1): (j + hh + 1), :, :] pY2 = Y[(j + 1): (j + hh + 1), :] pX = X[(j - hh): (j + hh + 1), :, :] pY = Y[(j - hh): (j + hh + 1), :] thetal, pX1, pY1, rhol = prsm(pX1, pY1, pnew, pY1.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) thetar, pX2, pY2, rhor = prsm(pX2, pY2, pnew, pY2.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) theta, pX, pY, rho = prsm(pX, pY, pnew, pY.shape[0], m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter, True) error1 = pY1[:, 0] - np.matmul(pX1, thetal) error2 = pY2[:, 0] - np.matmul(pX2, thetar) error = pY[:, 0] - np.matmul(pX, theta) ssrl = np.matmul(error1.transpose(), error1)/(hh * m) ssrr = np.matmul(error2.transpose(), error2)/(hh * m) ssr = np.matmul(error.transpose(), error)/(hh * m) S[j] = ssr - ssrl - ssrr # print(j) cpt = np.array([0]) for j in range (hh + 1, n-hh): if S[j] == np.max(S[(j - hh):(j + hh + 1)]): cpt = np.hstack((cpt, j)) cpt_new=np.hstack((cpt,(n-1))); taumat, U = dynProg(cpt_new, len(cpt_new),X, Y, pnew, m, thetaini, rhoini, alpha, beta, lambdap, epstol, maxiter) temp = np.array(U) + (np.array(range(len(U)))) * pow(n,2.0/3) MSIC[Iter]=np.min(temp) smindex = int(np.where(temp == np.min(temp))[0][0])-1 Selected_Index=cpt_new[taumat[smindex,range(smindex+1)].astype(int)] print(len(Selected_Index)) if len(Selected_Index)==0: Selected_Index=np.array[(0,(n-1))] cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) else: cpt_dist_max,sel_dist_max=evaluate(Selected_Index,true_cpt_pos) print(cpt_dist_max) print(sel_dist_max) cpt_num[Iter]=len(Selected_Index) d_under[Iter]=sel_dist_max d_over[Iter]=cpt_dist_max np.save('MULmeancpt_num1',cpt_num) np.save('MULmeand_under1',d_under) np.save('MULmeand_over1',d_over)
taumat[:] = np.nan
random_line_split
lib.rs
use std::cell::{Cell, RefCell}; use std::collections::{HashMap, HashSet}; use std::fmt::Debug; /// `InputCellID` is a unique identifier for an input cell. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct InputCellID(usize); /// `ComputeCellID` is a unique identifier for a compute cell. /// Values of type `InputCellID` and `ComputeCellID` should not be mutually assignable, /// demonstrated by the following tests: /// /// ```compile_fail /// let mut r = react::Reactor::new(); /// let input: react::ComputeCellID = r.create_input(111); /// ``` /// /// ```compile_fail /// let mut r = react::Reactor::new(); /// let input = r.create_input(111); /// let compute: react::InputCellID = r.create_compute(&[react::CellID::Input(input)], |_| 222).unwrap(); /// ``` #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct ComputeCellID(usize); #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct CallbackID(usize); pub type Callback<'reactor, T> = RefCell<Box<dyn 'reactor + FnMut(T)>>; #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum CellID { Input(InputCellID), Compute(ComputeCellID), } #[derive(Debug, PartialEq)] pub enum RemoveCallbackError { NonexistentCell, NonexistentCallback, } struct InputCell<T> { clients: HashSet<ComputeCellID>, value: T, } impl<T: Copy + Debug + PartialEq> InputCell<T> { pub fn new(init: T) -> Self { InputCell { clients: HashSet::new(), value: init, } } } struct ComputeCell<'r, T: Debug> { fun: Box<dyn 'r + Fn(&[T]) -> T>, deps: Vec<CellID>, callbacks: HashMap<CallbackID, Callback<'r, T>>, prev_val: Cell<Option<T>>, next_cbid: usize, // increases monotonically; increments on adding a callback clients: HashSet<ComputeCellID>, } impl<'r, T: Copy + Debug + PartialEq + 'r> ComputeCell<'r, T> { pub fn new<F>(fun: F, deps: &[CellID]) -> Self where F: 'r + Fn(&[T]) -> T, { ComputeCell { fun: Box::new(fun), deps: deps.to_vec(), callbacks: HashMap::new(), prev_val: Cell::new(None), next_cbid: 0, clients: HashSet::new(), } } pub fn call(&self, reactor: &Reactor<'r, T>) -> T { let deps = self .deps .iter() .map(|c| reactor.value(*c).unwrap()) .collect::<Vec<T>>(); let nv = (self.fun)(&deps); let mut fire_callbacks = false; if let Some(pv) = self.prev_val.get() { if nv != pv { self.prev_val.set(Some(nv)); fire_callbacks = true; } } else { self.prev_val.set(Some(nv)); fire_callbacks = true; } if fire_callbacks { for c in self.callbacks.values() { (&mut *c.borrow_mut())(nv); } } nv } } #[derive(Default)] pub struct Reactor<'r, T: Debug> { input_cells: Vec<InputCell<T>>, compute_cells: Vec<ComputeCell<'r, T>>, } // You are guaranteed that Reactor will only be tested against types that are Copy + PartialEq. impl<'r, T: Copy + Debug + PartialEq + 'r> Reactor<'r, T> { pub fn new() -> Self { Reactor { input_cells: Vec::new(), compute_cells: Vec::new(), } } // Creates an input cell with the specified initial value, returning its ID. pub fn create_input(&mut self, initial: T) -> InputCellID { let idx = self.input_cells.len(); let id = InputCellID(idx); self.input_cells.push(InputCell::new(initial)); id } // Creates a compute cell with the specified dependencies and compute function. // The compute function is expected to take in its arguments in the same order as specified in // `dependencies`. // You do not need to reject compute functions that expect more arguments than there are // dependencies (how would you check for this, anyway?). // // If any dependency doesn't exist, returns an Err with that nonexistent dependency. // (If multiple dependencies do not exist, exactly which one is returned is not defined and // will not be tested) // // Notice that there is no way to *remove* a cell. // This means that you may assume, without checking, that if the dependencies exist at creation // time they will continue to exist as long as the Reactor exists. pub fn create_compute<F>( &mut self, dependencies: &[CellID], compute_func: F, ) -> Result<ComputeCellID, CellID> where F: 'r + Fn(&[T]) -> T, { let cidx = self.compute_cells.len(); let cid = ComputeCellID(cidx); for id in dependencies.iter() { match id { CellID::Input(InputCellID(idx)) => { if *idx >= self.input_cells.len() { return Err(*id); } } CellID::Compute(ComputeCellID(idx)) => { if *idx >= self.compute_cells.len() { return Err(*id); } } } } // register as clients with all dependencies. for id in dependencies.iter() { match id { CellID::Input(InputCellID(idx)) => { let _ = self.input_cells[*idx].clients.insert(cid); } CellID::Compute(ComputeCellID(idx)) => { let _ = self.compute_cells[*idx].clients.insert(cid); } } } let cell = ComputeCell::new(compute_func, dependencies); cell.call(&self); // set the initial value self.compute_cells.push(cell); Ok(cid) } // Retrieves the current value of the cell, or None if the cell does not exist. // // You may wonder whether it is possible to implement `get(&self, id: CellID) -> Option<&Cell>` // and have a `value(&self)` method on `Cell`. // // It turns out this introduces a significant amount of extra complexity to this exercise. // We chose not to cover this here, since this exercise is probably enough work as-is. pub fn value(&self, id: CellID) -> Option<T> { match id { CellID::Input(InputCellID(idx)) => self.input_cells.get(idx).map(|i| i.value), CellID::Compute(ComputeCellID(idx)) => { if let Some(cell) = self.compute_cells.get(idx) { Some(cell.call(&self)) } else { None } }
} // Sets the value of the specified input cell. // // Returns false if the cell does not exist. // // Similarly, you may wonder about `get_mut(&mut self, id: CellID) -> Option<&mut Cell>`, with // a `set_value(&mut self, new_value: T)` method on `Cell`. // // As before, that turned out to add too much extra complexity. pub fn set_value(&mut self, id: InputCellID, new_value: T) -> bool { let InputCellID(idx) = id; if idx < self.input_cells.len() { let old_value = self.input_cells[idx].value; if old_value == new_value { return true; } self.input_cells[idx].value = new_value; let mut clients1 = self.input_cells[idx].clients.clone(); let mut clients2 = HashSet::new(); let mut done = false; // Recursively iterate through all clients until we've converged on the // the stable set of them. Does at least N extra checks, where N is // the numer of ultimate clients. while !done { for client in clients1.iter() { clients2.insert(client.clone()); let ComputeCellID(idx) = client; let cell = &self.compute_cells[*idx]; // first find all the clients that will be called without us clients2.extend(cell.clients.iter()); } for client in clients2.iter() { let ComputeCellID(idx) = client; let cell = &self.compute_cells[*idx]; clients1.extend(cell.clients.iter()); } done = clients1 == clients2; } // This has the potential to call more clients than needed, but ComputeCells // cache their previous value and only invoke their callbacks on change, // so client callbacks won't get invoked more than once. // // There's an implicit assumption here that each ComputeCell's function is // cheap to run, which is probably not true in general. We could do a // topological sort of the client graph to ensure we only call leaf nodes. for client in clients1 { let ComputeCellID(idx) = client; let cell = &self.compute_cells[idx]; cell.call(&self); } // we have set a new value and called all clients, return true true } else { // the new value was the same as the old value, return false false } } // Adds a callback to the specified compute cell. // // Returns the ID of the just-added callback, or None if the cell doesn't exist. // // Callbacks on input cells will not be tested. // // The semantics of callbacks (as will be tested): // For a single set_value call, each compute cell's callbacks should each be called: // * Zero times if the compute cell's value did not change as a result of the set_value call. // * Exactly once if the compute cell's value changed as a result of the set_value call. // The value passed to the callback should be the final value of the compute cell after the // set_value call. pub fn add_callback<F: 'r + FnMut(T) -> ()>( &mut self, id: ComputeCellID, callback: F, ) -> Option<CallbackID> { let ComputeCellID(idx) = id; if idx >= self.compute_cells.len() { return None; } let cidx = self.compute_cells[idx].next_cbid.to_owned(); self.compute_cells[idx].next_cbid += 1; let cid = CallbackID(cidx); self.compute_cells[idx] .callbacks .insert(cid, RefCell::new(Box::new(callback))); Some(cid) } // Removes the specified callback, using an ID returned from add_callback. // // Returns an Err if either the cell or callback does not exist. // // A removed callback should no longer be called. pub fn remove_callback( &mut self, cell: ComputeCellID, callback: CallbackID, ) -> Result<(), RemoveCallbackError> { let ComputeCellID(idx) = cell; if let Some(compute_cell) = self.compute_cells.get_mut(idx) { if compute_cell.callbacks.remove(&callback).is_some() { return Ok(()); } else { return Err(RemoveCallbackError::NonexistentCallback); } } else { Err(RemoveCallbackError::NonexistentCell) } } }
}
random_line_split
lib.rs
use std::cell::{Cell, RefCell}; use std::collections::{HashMap, HashSet}; use std::fmt::Debug; /// `InputCellID` is a unique identifier for an input cell. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct InputCellID(usize); /// `ComputeCellID` is a unique identifier for a compute cell. /// Values of type `InputCellID` and `ComputeCellID` should not be mutually assignable, /// demonstrated by the following tests: /// /// ```compile_fail /// let mut r = react::Reactor::new(); /// let input: react::ComputeCellID = r.create_input(111); /// ``` /// /// ```compile_fail /// let mut r = react::Reactor::new(); /// let input = r.create_input(111); /// let compute: react::InputCellID = r.create_compute(&[react::CellID::Input(input)], |_| 222).unwrap(); /// ``` #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct ComputeCellID(usize); #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct CallbackID(usize); pub type Callback<'reactor, T> = RefCell<Box<dyn 'reactor + FnMut(T)>>; #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum CellID { Input(InputCellID), Compute(ComputeCellID), } #[derive(Debug, PartialEq)] pub enum RemoveCallbackError { NonexistentCell, NonexistentCallback, } struct InputCell<T> { clients: HashSet<ComputeCellID>, value: T, } impl<T: Copy + Debug + PartialEq> InputCell<T> { pub fn new(init: T) -> Self
} struct ComputeCell<'r, T: Debug> { fun: Box<dyn 'r + Fn(&[T]) -> T>, deps: Vec<CellID>, callbacks: HashMap<CallbackID, Callback<'r, T>>, prev_val: Cell<Option<T>>, next_cbid: usize, // increases monotonically; increments on adding a callback clients: HashSet<ComputeCellID>, } impl<'r, T: Copy + Debug + PartialEq + 'r> ComputeCell<'r, T> { pub fn new<F>(fun: F, deps: &[CellID]) -> Self where F: 'r + Fn(&[T]) -> T, { ComputeCell { fun: Box::new(fun), deps: deps.to_vec(), callbacks: HashMap::new(), prev_val: Cell::new(None), next_cbid: 0, clients: HashSet::new(), } } pub fn call(&self, reactor: &Reactor<'r, T>) -> T { let deps = self .deps .iter() .map(|c| reactor.value(*c).unwrap()) .collect::<Vec<T>>(); let nv = (self.fun)(&deps); let mut fire_callbacks = false; if let Some(pv) = self.prev_val.get() { if nv != pv { self.prev_val.set(Some(nv)); fire_callbacks = true; } } else { self.prev_val.set(Some(nv)); fire_callbacks = true; } if fire_callbacks { for c in self.callbacks.values() { (&mut *c.borrow_mut())(nv); } } nv } } #[derive(Default)] pub struct Reactor<'r, T: Debug> { input_cells: Vec<InputCell<T>>, compute_cells: Vec<ComputeCell<'r, T>>, } // You are guaranteed that Reactor will only be tested against types that are Copy + PartialEq. impl<'r, T: Copy + Debug + PartialEq + 'r> Reactor<'r, T> { pub fn new() -> Self { Reactor { input_cells: Vec::new(), compute_cells: Vec::new(), } } // Creates an input cell with the specified initial value, returning its ID. pub fn create_input(&mut self, initial: T) -> InputCellID { let idx = self.input_cells.len(); let id = InputCellID(idx); self.input_cells.push(InputCell::new(initial)); id } // Creates a compute cell with the specified dependencies and compute function. // The compute function is expected to take in its arguments in the same order as specified in // `dependencies`. // You do not need to reject compute functions that expect more arguments than there are // dependencies (how would you check for this, anyway?). // // If any dependency doesn't exist, returns an Err with that nonexistent dependency. // (If multiple dependencies do not exist, exactly which one is returned is not defined and // will not be tested) // // Notice that there is no way to *remove* a cell. // This means that you may assume, without checking, that if the dependencies exist at creation // time they will continue to exist as long as the Reactor exists. pub fn create_compute<F>( &mut self, dependencies: &[CellID], compute_func: F, ) -> Result<ComputeCellID, CellID> where F: 'r + Fn(&[T]) -> T, { let cidx = self.compute_cells.len(); let cid = ComputeCellID(cidx); for id in dependencies.iter() { match id { CellID::Input(InputCellID(idx)) => { if *idx >= self.input_cells.len() { return Err(*id); } } CellID::Compute(ComputeCellID(idx)) => { if *idx >= self.compute_cells.len() { return Err(*id); } } } } // register as clients with all dependencies. for id in dependencies.iter() { match id { CellID::Input(InputCellID(idx)) => { let _ = self.input_cells[*idx].clients.insert(cid); } CellID::Compute(ComputeCellID(idx)) => { let _ = self.compute_cells[*idx].clients.insert(cid); } } } let cell = ComputeCell::new(compute_func, dependencies); cell.call(&self); // set the initial value self.compute_cells.push(cell); Ok(cid) } // Retrieves the current value of the cell, or None if the cell does not exist. // // You may wonder whether it is possible to implement `get(&self, id: CellID) -> Option<&Cell>` // and have a `value(&self)` method on `Cell`. // // It turns out this introduces a significant amount of extra complexity to this exercise. // We chose not to cover this here, since this exercise is probably enough work as-is. pub fn value(&self, id: CellID) -> Option<T> { match id { CellID::Input(InputCellID(idx)) => self.input_cells.get(idx).map(|i| i.value), CellID::Compute(ComputeCellID(idx)) => { if let Some(cell) = self.compute_cells.get(idx) { Some(cell.call(&self)) } else { None } } } } // Sets the value of the specified input cell. // // Returns false if the cell does not exist. // // Similarly, you may wonder about `get_mut(&mut self, id: CellID) -> Option<&mut Cell>`, with // a `set_value(&mut self, new_value: T)` method on `Cell`. // // As before, that turned out to add too much extra complexity. pub fn set_value(&mut self, id: InputCellID, new_value: T) -> bool { let InputCellID(idx) = id; if idx < self.input_cells.len() { let old_value = self.input_cells[idx].value; if old_value == new_value { return true; } self.input_cells[idx].value = new_value; let mut clients1 = self.input_cells[idx].clients.clone(); let mut clients2 = HashSet::new(); let mut done = false; // Recursively iterate through all clients until we've converged on the // the stable set of them. Does at least N extra checks, where N is // the numer of ultimate clients. while !done { for client in clients1.iter() { clients2.insert(client.clone()); let ComputeCellID(idx) = client; let cell = &self.compute_cells[*idx]; // first find all the clients that will be called without us clients2.extend(cell.clients.iter()); } for client in clients2.iter() { let ComputeCellID(idx) = client; let cell = &self.compute_cells[*idx]; clients1.extend(cell.clients.iter()); } done = clients1 == clients2; } // This has the potential to call more clients than needed, but ComputeCells // cache their previous value and only invoke their callbacks on change, // so client callbacks won't get invoked more than once. // // There's an implicit assumption here that each ComputeCell's function is // cheap to run, which is probably not true in general. We could do a // topological sort of the client graph to ensure we only call leaf nodes. for client in clients1 { let ComputeCellID(idx) = client; let cell = &self.compute_cells[idx]; cell.call(&self); } // we have set a new value and called all clients, return true true } else { // the new value was the same as the old value, return false false } } // Adds a callback to the specified compute cell. // // Returns the ID of the just-added callback, or None if the cell doesn't exist. // // Callbacks on input cells will not be tested. // // The semantics of callbacks (as will be tested): // For a single set_value call, each compute cell's callbacks should each be called: // * Zero times if the compute cell's value did not change as a result of the set_value call. // * Exactly once if the compute cell's value changed as a result of the set_value call. // The value passed to the callback should be the final value of the compute cell after the // set_value call. pub fn add_callback<F: 'r + FnMut(T) -> ()>( &mut self, id: ComputeCellID, callback: F, ) -> Option<CallbackID> { let ComputeCellID(idx) = id; if idx >= self.compute_cells.len() { return None; } let cidx = self.compute_cells[idx].next_cbid.to_owned(); self.compute_cells[idx].next_cbid += 1; let cid = CallbackID(cidx); self.compute_cells[idx] .callbacks .insert(cid, RefCell::new(Box::new(callback))); Some(cid) } // Removes the specified callback, using an ID returned from add_callback. // // Returns an Err if either the cell or callback does not exist. // // A removed callback should no longer be called. pub fn remove_callback( &mut self, cell: ComputeCellID, callback: CallbackID, ) -> Result<(), RemoveCallbackError> { let ComputeCellID(idx) = cell; if let Some(compute_cell) = self.compute_cells.get_mut(idx) { if compute_cell.callbacks.remove(&callback).is_some() { return Ok(()); } else { return Err(RemoveCallbackError::NonexistentCallback); } } else { Err(RemoveCallbackError::NonexistentCell) } } }
{ InputCell { clients: HashSet::new(), value: init, } }
identifier_body
lib.rs
use std::cell::{Cell, RefCell}; use std::collections::{HashMap, HashSet}; use std::fmt::Debug; /// `InputCellID` is a unique identifier for an input cell. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct InputCellID(usize); /// `ComputeCellID` is a unique identifier for a compute cell. /// Values of type `InputCellID` and `ComputeCellID` should not be mutually assignable, /// demonstrated by the following tests: /// /// ```compile_fail /// let mut r = react::Reactor::new(); /// let input: react::ComputeCellID = r.create_input(111); /// ``` /// /// ```compile_fail /// let mut r = react::Reactor::new(); /// let input = r.create_input(111); /// let compute: react::InputCellID = r.create_compute(&[react::CellID::Input(input)], |_| 222).unwrap(); /// ``` #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct ComputeCellID(usize); #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct CallbackID(usize); pub type Callback<'reactor, T> = RefCell<Box<dyn 'reactor + FnMut(T)>>; #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum CellID { Input(InputCellID), Compute(ComputeCellID), } #[derive(Debug, PartialEq)] pub enum RemoveCallbackError { NonexistentCell, NonexistentCallback, } struct InputCell<T> { clients: HashSet<ComputeCellID>, value: T, } impl<T: Copy + Debug + PartialEq> InputCell<T> { pub fn new(init: T) -> Self { InputCell { clients: HashSet::new(), value: init, } } } struct ComputeCell<'r, T: Debug> { fun: Box<dyn 'r + Fn(&[T]) -> T>, deps: Vec<CellID>, callbacks: HashMap<CallbackID, Callback<'r, T>>, prev_val: Cell<Option<T>>, next_cbid: usize, // increases monotonically; increments on adding a callback clients: HashSet<ComputeCellID>, } impl<'r, T: Copy + Debug + PartialEq + 'r> ComputeCell<'r, T> { pub fn new<F>(fun: F, deps: &[CellID]) -> Self where F: 'r + Fn(&[T]) -> T, { ComputeCell { fun: Box::new(fun), deps: deps.to_vec(), callbacks: HashMap::new(), prev_val: Cell::new(None), next_cbid: 0, clients: HashSet::new(), } } pub fn call(&self, reactor: &Reactor<'r, T>) -> T { let deps = self .deps .iter() .map(|c| reactor.value(*c).unwrap()) .collect::<Vec<T>>(); let nv = (self.fun)(&deps); let mut fire_callbacks = false; if let Some(pv) = self.prev_val.get() { if nv != pv { self.prev_val.set(Some(nv)); fire_callbacks = true; } } else { self.prev_val.set(Some(nv)); fire_callbacks = true; } if fire_callbacks { for c in self.callbacks.values() { (&mut *c.borrow_mut())(nv); } } nv } } #[derive(Default)] pub struct Reactor<'r, T: Debug> { input_cells: Vec<InputCell<T>>, compute_cells: Vec<ComputeCell<'r, T>>, } // You are guaranteed that Reactor will only be tested against types that are Copy + PartialEq. impl<'r, T: Copy + Debug + PartialEq + 'r> Reactor<'r, T> { pub fn
() -> Self { Reactor { input_cells: Vec::new(), compute_cells: Vec::new(), } } // Creates an input cell with the specified initial value, returning its ID. pub fn create_input(&mut self, initial: T) -> InputCellID { let idx = self.input_cells.len(); let id = InputCellID(idx); self.input_cells.push(InputCell::new(initial)); id } // Creates a compute cell with the specified dependencies and compute function. // The compute function is expected to take in its arguments in the same order as specified in // `dependencies`. // You do not need to reject compute functions that expect more arguments than there are // dependencies (how would you check for this, anyway?). // // If any dependency doesn't exist, returns an Err with that nonexistent dependency. // (If multiple dependencies do not exist, exactly which one is returned is not defined and // will not be tested) // // Notice that there is no way to *remove* a cell. // This means that you may assume, without checking, that if the dependencies exist at creation // time they will continue to exist as long as the Reactor exists. pub fn create_compute<F>( &mut self, dependencies: &[CellID], compute_func: F, ) -> Result<ComputeCellID, CellID> where F: 'r + Fn(&[T]) -> T, { let cidx = self.compute_cells.len(); let cid = ComputeCellID(cidx); for id in dependencies.iter() { match id { CellID::Input(InputCellID(idx)) => { if *idx >= self.input_cells.len() { return Err(*id); } } CellID::Compute(ComputeCellID(idx)) => { if *idx >= self.compute_cells.len() { return Err(*id); } } } } // register as clients with all dependencies. for id in dependencies.iter() { match id { CellID::Input(InputCellID(idx)) => { let _ = self.input_cells[*idx].clients.insert(cid); } CellID::Compute(ComputeCellID(idx)) => { let _ = self.compute_cells[*idx].clients.insert(cid); } } } let cell = ComputeCell::new(compute_func, dependencies); cell.call(&self); // set the initial value self.compute_cells.push(cell); Ok(cid) } // Retrieves the current value of the cell, or None if the cell does not exist. // // You may wonder whether it is possible to implement `get(&self, id: CellID) -> Option<&Cell>` // and have a `value(&self)` method on `Cell`. // // It turns out this introduces a significant amount of extra complexity to this exercise. // We chose not to cover this here, since this exercise is probably enough work as-is. pub fn value(&self, id: CellID) -> Option<T> { match id { CellID::Input(InputCellID(idx)) => self.input_cells.get(idx).map(|i| i.value), CellID::Compute(ComputeCellID(idx)) => { if let Some(cell) = self.compute_cells.get(idx) { Some(cell.call(&self)) } else { None } } } } // Sets the value of the specified input cell. // // Returns false if the cell does not exist. // // Similarly, you may wonder about `get_mut(&mut self, id: CellID) -> Option<&mut Cell>`, with // a `set_value(&mut self, new_value: T)` method on `Cell`. // // As before, that turned out to add too much extra complexity. pub fn set_value(&mut self, id: InputCellID, new_value: T) -> bool { let InputCellID(idx) = id; if idx < self.input_cells.len() { let old_value = self.input_cells[idx].value; if old_value == new_value { return true; } self.input_cells[idx].value = new_value; let mut clients1 = self.input_cells[idx].clients.clone(); let mut clients2 = HashSet::new(); let mut done = false; // Recursively iterate through all clients until we've converged on the // the stable set of them. Does at least N extra checks, where N is // the numer of ultimate clients. while !done { for client in clients1.iter() { clients2.insert(client.clone()); let ComputeCellID(idx) = client; let cell = &self.compute_cells[*idx]; // first find all the clients that will be called without us clients2.extend(cell.clients.iter()); } for client in clients2.iter() { let ComputeCellID(idx) = client; let cell = &self.compute_cells[*idx]; clients1.extend(cell.clients.iter()); } done = clients1 == clients2; } // This has the potential to call more clients than needed, but ComputeCells // cache their previous value and only invoke their callbacks on change, // so client callbacks won't get invoked more than once. // // There's an implicit assumption here that each ComputeCell's function is // cheap to run, which is probably not true in general. We could do a // topological sort of the client graph to ensure we only call leaf nodes. for client in clients1 { let ComputeCellID(idx) = client; let cell = &self.compute_cells[idx]; cell.call(&self); } // we have set a new value and called all clients, return true true } else { // the new value was the same as the old value, return false false } } // Adds a callback to the specified compute cell. // // Returns the ID of the just-added callback, or None if the cell doesn't exist. // // Callbacks on input cells will not be tested. // // The semantics of callbacks (as will be tested): // For a single set_value call, each compute cell's callbacks should each be called: // * Zero times if the compute cell's value did not change as a result of the set_value call. // * Exactly once if the compute cell's value changed as a result of the set_value call. // The value passed to the callback should be the final value of the compute cell after the // set_value call. pub fn add_callback<F: 'r + FnMut(T) -> ()>( &mut self, id: ComputeCellID, callback: F, ) -> Option<CallbackID> { let ComputeCellID(idx) = id; if idx >= self.compute_cells.len() { return None; } let cidx = self.compute_cells[idx].next_cbid.to_owned(); self.compute_cells[idx].next_cbid += 1; let cid = CallbackID(cidx); self.compute_cells[idx] .callbacks .insert(cid, RefCell::new(Box::new(callback))); Some(cid) } // Removes the specified callback, using an ID returned from add_callback. // // Returns an Err if either the cell or callback does not exist. // // A removed callback should no longer be called. pub fn remove_callback( &mut self, cell: ComputeCellID, callback: CallbackID, ) -> Result<(), RemoveCallbackError> { let ComputeCellID(idx) = cell; if let Some(compute_cell) = self.compute_cells.get_mut(idx) { if compute_cell.callbacks.remove(&callback).is_some() { return Ok(()); } else { return Err(RemoveCallbackError::NonexistentCallback); } } else { Err(RemoveCallbackError::NonexistentCell) } } }
new
identifier_name
utils.py
import os from typing import List import numpy from aubio import notes, source, pitch, tempo from midiutil import MIDIFile from midiutil.MidiFile import TICKSPERQUARTERNOTE, NoteOn from pydub import AudioSegment from model.channel import channel_map, CHANNEL_NAME_DRUM_KIT from model.note import Note, drum_map def create_midi_file(num_tracks: int, file_format: int): return MIDIFile(numTracks=num_tracks, file_format=file_format) def save_midi_file(filename: str, midi_file: MIDIFile): with open(filename, "wb") as output_file: midi_file.writeFile(output_file) print("====> file %s saved." % filename) DEFAULT_SAMPLE_RATE = 44100 DOWN_SAMPLE = 1 def read_note_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): """ this method try to read notes from a sound wave file with a list of dict of start_time, pitch and duration """ print("====> reading notes from sound file") win_s = 512 // DOWN_SAMPLE # fft size hop_s = 256 // DOWN_SAMPLE # hop size # adjust sample rate s = source(filename, samplerate, hop_s) samplerate = s.samplerate notes_o = notes("default", win_s, hop_s, samplerate) result = [] total_frames = 0 while True: samples, read = s() new_note = notes_o(samples) # note too high considered as noise if new_note[0] != 0 and new_note[0] <= 120: note_klass = Note(time=total_frames / float(samplerate), pitch=new_note[0], volume=new_note[1] - 20, duration=new_note[2]) result.append(note_klass) total_frames += read if read < hop_s: break return result def read_bpm_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): print("====> reading bpm from sound file") win_s, hop_s = 1024, 512 s = source(filename, samplerate, hop_s) samplerate = s.samplerate ''' phase Phase based onset detection function This function uses information both in frequency and in phase to determine changes in the spectral content that might correspond to musical onsets. It is best suited for complex signals such as polyphonic recordings. Juan-Pablo Bello, Mike P. Davies, and Mark B. Sandler. Phase-based note onset detection for music signals. In Proceedings of the IEEE International Conference on Acoustics Speech and Signal Processing, pages 441­444, Hong-Kong, 2003. ''' o = tempo("phase", win_s, hop_s, samplerate) beats = [] total_frames = 0 while True: samples, read = s() is_beat = o(samples) if is_beat: this_beat = o.get_last_s() beats.append(this_beat) # if o.get_confidence() > .2 and len(beats) > 2.: # break total_frames += read if read < hop_s: break def beats_to_bpm(beats, path): # if enough beats are found, convert to periods then to bpm if len(beats) > 1: if len(beats) < 4: print("few beats found in {:s}".format(path)) bpms = 60. / numpy.diff(beats) return numpy.median(bpms) else: print("not enough beats found in {:s}".format(path)) return 0 return beats_to_bpm(beats, filename) def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): """ this method try to read pitches from a sound wave file with a list of dict of pitch and confidence """ if os.path.isfile(filename) is False: raise Exception('File not found with filename = %s' % filename) print("====> reading pitch from sound file") win_s = 4096 // DOWN_SAMPLE # fft size hop_s = 512 // DOWN_SAMPLE # hop size s = source(filename, samplerate, hop_s) samplerate = s.samplerate tolerance = 0.8 pitch_o = pitch("yin", win_s, hop_s, samplerate) pitch_o.set_unit("midi") pitch_o.set_tolerance(tolerance) result = [] # total number of frames read total_frames = 0 while True: samples, read = s() # the pitch value is not rounded and many zeroes occur that_pitch = pitch_o(samples)[0] confidence = pitch_o.get_confidence() result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence)) total_frames += read if read < hop_s: break group_result_with_log_density = compute_density_from_pitch_result(result) density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time']) print("====> density level list length %s" % len(density_level_list)) proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time']) print("====> emphasis proportion list length = %d" % len(proportion_list)) return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list) def compute_density_level(group_result_with_log_density: List[dict], length: float): """ following result of function compute_density_from_pitch_result, this method will compute for each group, a readable (from 0 to 9) density value for further usage :param group_result_with_log_density: :param length end time :return: """ log_density_list = [group['log_density'] for group in group_result_with_log_density] max_val = max(log_density_list) min_val = min(log_density_list) # split range with 10 and compute which to where range_val = max_val - min_val total_level = 9 gap = range_val / total_level level_list = [] for i, log_density in enumerate(log_density_list): level = 5 if gap != 0: level = round((log_density - min_val) / gap) level_list.append(dict(level=level, start_time=group_result_with_log_density[i]['pitches'][0]['time'])) for level_dict in level_list: start = level_dict['start_time'] / length level_dict['start_time'] = start return level_list def compute_density_from_pitch_result(pitch_result: List[dict]): group_result = [] group = [] for i, pitch_dict in enumerate(pitch_result): # current is not zero, but previous is zero # should flush the group if round(pitch_dict['pitch']) != 0 and i - 1 >= 0 and round(pitch_result[i - 1]['pitch']) == 0: group_result.append(group) group = [] group.append(pitch_dict) # now for each group we have the elements which are essentially divided by time frame # we just need to identify the average density and get the highest ones density_list = [len(group) for group in group_result] # average_density = sum(density_list) / len(density_list) log_density_list = numpy.log10(density_list) # only for those group with density > coefficient * log_max_density is qualified to be the emphasis one. # but here we just give the density log result group_result_with_log_density = [] for i, group in enumerate(group_result): group_result_with_log_density.append(dict(log_density=log_density_list[i], pitches=group)) return group_result_with_log_density def get_emphasis_start_times(group_result_with_log_density: List[dict], length: float, coefficient: int = 0.8, threshold: int = 1): """ :param group_result_with_log_density compute_density_from_pitch_result function result :param coefficient compares to the max log value, which should we consider emphasis :param threshold means only pitch density more than threshold could use emphasis method :param length is the length of sound in second unit """ log_density_list = [group['log_density'] for group in group_result_with_log_density] max_log_density = max(log_density_list) filter_value = coefficient * max_log_density pitch_group_list = [] for group in group_result_with_log_density: if group['log_density'] >= threshold and group['log_density'] >= filter_value: pitch_group_list.append(group['pitches']) # now we have pitch group, we can know where to start emphasis and where to end range_time_list = [] for pitch_group in pitch_group_list: start = pitch_group[0]['time'] end = pitch_group[len(pitch_group) - 1]['time'] range_time_list.append(dict(start=start, end=end)) # transform proportion value for further beats computing proportion_list = [] for range_time in range_time_list: start = range_time['start'] / length
def drum_note_to_heart_beat_track(midi_instance: MIDIFile): """ @Deprecated """ # exporting bass drum notes bass_drum_beats_in_ms = [] ms_per_tick = 60 * 1000 / (tempo * TICKSPERQUARTERNOTE) for event in midi_instance.tracks[channel_map[CHANNEL_NAME_DRUM_KIT]].eventList: if isinstance(event, NoteOn) and event.pitch == drum_map['BassDrum']: bass_drum_beats_in_ms.append(ms_per_tick * event.tick) single_heart_beat = AudioSegment.from_file('./single_heartbeat.mp3', format='mp3') heartbeat_track = AudioSegment.empty() for i, bass_drum_beat_note_on in enumerate(bass_drum_beats_in_ms): if i == 0: heartbeat_track += AudioSegment.silent(duration=bass_drum_beat_note_on) elif i + 1 < len(bass_drum_beats_in_ms): # if the next bass drum time is early than heartbeat track if len(heartbeat_track) > bass_drum_beats_in_ms[i + 1]: continue # fill the gap till the next heart beat gap = bass_drum_beats_in_ms[i + 1] - len(heartbeat_track) heartbeat_track += AudioSegment.silent(duration=gap) elif i == len(bass_drum_beats_in_ms) - 1: # ignore the last one continue heartbeat_track += single_heart_beat heartbeat_track.export('heartbeat_track.mp3', format='mp3') def get_one_bar_heart_beat(filename: str, bpm: int): """ given defined bpm, it generates a bar of heartbeat sound. given the fact that the heart beat track has a certain length of each beat, the bpm cannot be too high, which is undetermined yet. :return: """ heart_beat_track = AudioSegment.from_file(file=filename, format='mp3') heart_beat_1 = heart_beat_track[70:180] heart_beat_2 = heart_beat_track[380:490] # AudioSegment.export(part, 'single_heartbeat1.mp3') tick_per_sec = 60 * 1000 / bpm # make a sequential beats by a quarter notes which means a tick contains 2 heat beats # and this is only applied for a half bar. # in conclusion, one bar has two sets of heart beats result_track = AudioSegment.empty() # first set result_track += heart_beat_1 gap = tick_per_sec / 2 - len(result_track) result_track += AudioSegment.silent(gap) result_track += heart_beat_2 # fill the gap gap = tick_per_sec * 2 - len(result_track) result_track += AudioSegment.silent(gap) # # second set result_track += heart_beat_1 gap = tick_per_sec * 2.5 - len(result_track) result_track += AudioSegment.silent(gap) result_track += heart_beat_2 # # fill the end gap gap = tick_per_sec * 4 - len(result_track) result_track += AudioSegment.silent(gap) return result_track def get_heart_beat_track(filename: str, bar_count: int, bpm: int): result = AudioSegment.empty() for i in range(bar_count): result += get_one_bar_heart_beat(filename, bpm) return result def get_heart_beat_track_and_save(filename: str, dest_filename: str, bar_count: int, bpm: int): result = get_heart_beat_track(filename, bar_count, bpm) # reduce 3dB of the result result = result - 3 # tick_per_sec = 60 * 1000 / bpm # fade_time = round(tick_per_sec * 4) # result.fade_in(fade_time) # result.fade_out(fade_time) AudioSegment.export(result, dest_filename) REF_BPM = 90 BOTTOM_BPM = 50 def normalize_bpm(bpm: int): """ presume that general bpm of voice result is in range of 50 - 200 and we may want to center speed to 90 as reference. so the algorithm could be: gap = abs(90 - x) result = 50 + gap :param bpm: :return: """ return abs(REF_BPM - bpm) + BOTTOM_BPM
end = range_time['end'] / length proportion_list.append(dict(start=start, end=end)) return proportion_list
random_line_split
utils.py
import os from typing import List import numpy from aubio import notes, source, pitch, tempo from midiutil import MIDIFile from midiutil.MidiFile import TICKSPERQUARTERNOTE, NoteOn from pydub import AudioSegment from model.channel import channel_map, CHANNEL_NAME_DRUM_KIT from model.note import Note, drum_map def create_midi_file(num_tracks: int, file_format: int): return MIDIFile(numTracks=num_tracks, file_format=file_format) def save_midi_file(filename: str, midi_file: MIDIFile): with open(filename, "wb") as output_file: midi_file.writeFile(output_file) print("====> file %s saved." % filename) DEFAULT_SAMPLE_RATE = 44100 DOWN_SAMPLE = 1 def read_note_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): """ this method try to read notes from a sound wave file with a list of dict of start_time, pitch and duration """ print("====> reading notes from sound file") win_s = 512 // DOWN_SAMPLE # fft size hop_s = 256 // DOWN_SAMPLE # hop size # adjust sample rate s = source(filename, samplerate, hop_s) samplerate = s.samplerate notes_o = notes("default", win_s, hop_s, samplerate) result = [] total_frames = 0 while True: samples, read = s() new_note = notes_o(samples) # note too high considered as noise if new_note[0] != 0 and new_note[0] <= 120: note_klass = Note(time=total_frames / float(samplerate), pitch=new_note[0], volume=new_note[1] - 20, duration=new_note[2]) result.append(note_klass) total_frames += read if read < hop_s: break return result def read_bpm_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): print("====> reading bpm from sound file") win_s, hop_s = 1024, 512 s = source(filename, samplerate, hop_s) samplerate = s.samplerate ''' phase Phase based onset detection function This function uses information both in frequency and in phase to determine changes in the spectral content that might correspond to musical onsets. It is best suited for complex signals such as polyphonic recordings. Juan-Pablo Bello, Mike P. Davies, and Mark B. Sandler. Phase-based note onset detection for music signals. In Proceedings of the IEEE International Conference on Acoustics Speech and Signal Processing, pages 441­444, Hong-Kong, 2003. ''' o = tempo("phase", win_s, hop_s, samplerate) beats = [] total_frames = 0 while True: samples, read = s() is_beat = o(samples) if is_beat: this_beat = o.get_last_s() beats.append(this_beat) # if o.get_confidence() > .2 and len(beats) > 2.: # break total_frames += read if read < hop_s: break def beats_to_bpm(beats, path): # if enough beats are found, convert to periods then to bpm if len(beats) > 1: if len(beats) < 4: print("few beats found in {:s}".format(path)) bpms = 60. / numpy.diff(beats) return numpy.median(bpms) else: print("not enough beats found in {:s}".format(path)) return 0 return beats_to_bpm(beats, filename) def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): """ this method try to read pitches from a sound wave file with a list of dict of pitch and confidence """ if os.path.isfile(filename) is False: raise Exception('File not found with filename = %s' % filename) print("====> reading pitch from sound file") win_s = 4096 // DOWN_SAMPLE # fft size hop_s = 512 // DOWN_SAMPLE # hop size s = source(filename, samplerate, hop_s) samplerate = s.samplerate tolerance = 0.8 pitch_o = pitch("yin", win_s, hop_s, samplerate) pitch_o.set_unit("midi") pitch_o.set_tolerance(tolerance) result = [] # total number of frames read total_frames = 0 while True: samples, read = s() # the pitch value is not rounded and many zeroes occur that_pitch = pitch_o(samples)[0] confidence = pitch_o.get_confidence() result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence)) total_frames += read if read < hop_s: break group_result_with_log_density = compute_density_from_pitch_result(result) density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time']) print("====> density level list length %s" % len(density_level_list)) proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time']) print("====> emphasis proportion list length = %d" % len(proportion_list)) return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list) def compute_density_level(group_result_with_log_density: List[dict], length: float): """ following result of function compute_density_from_pitch_result, this method will compute for each group, a readable (from 0 to 9) density value for further usage :param group_result_with_log_density: :param length end time :return: """ log_density_list = [group['log_density'] for group in group_result_with_log_density] max_val = max(log_density_list) min_val = min(log_density_list) # split range with 10 and compute which to where range_val = max_val - min_val total_level = 9 gap = range_val / total_level level_list = [] for i, log_density in enumerate(log_density_list): level = 5 if gap != 0: level = round((log_density - min_val) / gap) level_list.append(dict(level=level, start_time=group_result_with_log_density[i]['pitches'][0]['time'])) for level_dict in level_list: start = level_dict['start_time'] / length level_dict['start_time'] = start return level_list def compute_density_from_pitch_result(pitch_result: List[dict]): group_result = [] group = [] for i, pitch_dict in enumerate(pitch_result): # current is not zero, but previous is zero # should flush the group if round(pitch_dict['pitch']) != 0 and i - 1 >= 0 and round(pitch_result[i - 1]['pitch']) == 0: group_result.append(group) group = [] group.append(pitch_dict) # now for each group we have the elements which are essentially divided by time frame # we just need to identify the average density and get the highest ones density_list = [len(group) for group in group_result] # average_density = sum(density_list) / len(density_list) log_density_list = numpy.log10(density_list) # only for those group with density > coefficient * log_max_density is qualified to be the emphasis one. # but here we just give the density log result group_result_with_log_density = [] for i, group in enumerate(group_result): group_result_with_log_density.append(dict(log_density=log_density_list[i], pitches=group)) return group_result_with_log_density def get_emphasis_start_times(group_result_with_log_density: List[dict], length: float, coefficient: int = 0.8, threshold: int = 1): """ :param group_result_with_log_density compute_density_from_pitch_result function result :param coefficient compares to the max log value, which should we consider emphasis :param threshold means only pitch density more than threshold could use emphasis method :param length is the length of sound in second unit """ log_density_list = [group['log_density'] for group in group_result_with_log_density] max_log_density = max(log_density_list) filter_value = coefficient * max_log_density pitch_group_list = [] for group in group_result_with_log_density: if group['log_density'] >= threshold and group['log_density'] >= filter_value: pitch_group_list.append(group['pitches']) # now we have pitch group, we can know where to start emphasis and where to end range_time_list = [] for pitch_group in pitch_group_list: start = pitch_group[0]['time'] end = pitch_group[len(pitch_group) - 1]['time'] range_time_list.append(dict(start=start, end=end)) # transform proportion value for further beats computing proportion_list = [] for range_time in range_time_list: start = range_time['start'] / length end = range_time['end'] / length proportion_list.append(dict(start=start, end=end)) return proportion_list def drum_note_to_heart_beat_track(midi_instance: MIDIFile): """ @Deprecated """ # exporting bass drum notes bass_drum_beats_in_ms = [] ms_per_tick = 60 * 1000 / (tempo * TICKSPERQUARTERNOTE) for event in midi_instance.tracks[channel_map[CHANNEL_NAME_DRUM_KIT]].eventList: if isinstance(event, NoteOn) and event.pitch == drum_map['BassDrum']: bass_drum_beats_in_ms.append(ms_per_tick * event.tick) single_heart_beat = AudioSegment.from_file('./single_heartbeat.mp3', format='mp3') heartbeat_track = AudioSegment.empty() for i, bass_drum_beat_note_on in enumerate(bass_drum_beats_in_ms): if i == 0: heartbeat_track += AudioSegment.silent(duration=bass_drum_beat_note_on) elif i + 1 < len(bass_drum_beats_in_ms): # if the next bass drum time is early than heartbeat track if len(heartbeat_track) > bass_drum_beats_in_ms[i + 1]: continue # fill the gap till the next heart beat gap = bass_drum_beats_in_ms[i + 1] - len(heartbeat_track) heartbeat_track += AudioSegment.silent(duration=gap) elif i == len(bass_drum_beats_in_ms) - 1: # ignore the last one continue heartbeat_track += single_heart_beat heartbeat_track.export('heartbeat_track.mp3', format='mp3') def get_one_bar_heart_beat(filename: str, bpm: int): """ given defined bpm, it generates a bar of heartbeat sound. given the fact that the heart beat track has a certain length of each beat, the bpm cannot be too high, which is undetermined yet. :return: """ heart_beat_track = AudioSegment.from_file(file=filename, format='mp3') heart_beat_1 = heart_beat_track[70:180] heart_beat_2 = heart_beat_track[380:490] # AudioSegment.export(part, 'single_heartbeat1.mp3') tick_per_sec = 60 * 1000 / bpm # make a sequential beats by a quarter notes which means a tick contains 2 heat beats # and this is only applied for a half bar. # in conclusion, one bar has two sets of heart beats result_track = AudioSegment.empty() # first set result_track += heart_beat_1 gap = tick_per_sec / 2 - len(result_track) result_track += AudioSegment.silent(gap) result_track += heart_beat_2 # fill the gap gap = tick_per_sec * 2 - len(result_track) result_track += AudioSegment.silent(gap) # # second set result_track += heart_beat_1 gap = tick_per_sec * 2.5 - len(result_track) result_track += AudioSegment.silent(gap) result_track += heart_beat_2 # # fill the end gap gap = tick_per_sec * 4 - len(result_track) result_track += AudioSegment.silent(gap) return result_track def get_heart_beat_track(filename: str, bar_count: int, bpm: int): result = AudioSegment.empty() for i in range(bar_count): result += get_one_bar_heart_beat(filename, bpm) return result def get_heart_beat_track_and_save(filename: str, dest_filename: str, bar_count: int, bpm: int): r
REF_BPM = 90 BOTTOM_BPM = 50 def normalize_bpm(bpm: int): """ presume that general bpm of voice result is in range of 50 - 200 and we may want to center speed to 90 as reference. so the algorithm could be: gap = abs(90 - x) result = 50 + gap :param bpm: :return: """ return abs(REF_BPM - bpm) + BOTTOM_BPM
esult = get_heart_beat_track(filename, bar_count, bpm) # reduce 3dB of the result result = result - 3 # tick_per_sec = 60 * 1000 / bpm # fade_time = round(tick_per_sec * 4) # result.fade_in(fade_time) # result.fade_out(fade_time) AudioSegment.export(result, dest_filename)
identifier_body
utils.py
import os from typing import List import numpy from aubio import notes, source, pitch, tempo from midiutil import MIDIFile from midiutil.MidiFile import TICKSPERQUARTERNOTE, NoteOn from pydub import AudioSegment from model.channel import channel_map, CHANNEL_NAME_DRUM_KIT from model.note import Note, drum_map def create_midi_file(num_tracks: int, file_format: int): return MIDIFile(numTracks=num_tracks, file_format=file_format) def save_midi_file(filename: str, midi_file: MIDIFile): with open(filename, "wb") as output_file: midi_file.writeFile(output_file) print("====> file %s saved." % filename) DEFAULT_SAMPLE_RATE = 44100 DOWN_SAMPLE = 1 def read_note_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): """ this method try to read notes from a sound wave file with a list of dict of start_time, pitch and duration """ print("====> reading notes from sound file") win_s = 512 // DOWN_SAMPLE # fft size hop_s = 256 // DOWN_SAMPLE # hop size # adjust sample rate s = source(filename, samplerate, hop_s) samplerate = s.samplerate notes_o = notes("default", win_s, hop_s, samplerate) result = [] total_frames = 0 while True: samples, read = s() new_note = notes_o(samples) # note too high considered as noise if new_note[0] != 0 and new_note[0] <= 120: note_klass = Note(time=total_frames / float(samplerate), pitch=new_note[0], volume=new_note[1] - 20, duration=new_note[2]) result.append(note_klass) total_frames += read if read < hop_s: break return result def read_bpm_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): print("====> reading bpm from sound file") win_s, hop_s = 1024, 512 s = source(filename, samplerate, hop_s) samplerate = s.samplerate ''' phase Phase based onset detection function This function uses information both in frequency and in phase to determine changes in the spectral content that might correspond to musical onsets. It is best suited for complex signals such as polyphonic recordings. Juan-Pablo Bello, Mike P. Davies, and Mark B. Sandler. Phase-based note onset detection for music signals. In Proceedings of the IEEE International Conference on Acoustics Speech and Signal Processing, pages 441­444, Hong-Kong, 2003. ''' o = tempo("phase", win_s, hop_s, samplerate) beats = [] total_frames = 0 while True: samples, read = s() is_beat = o(samples) if is_beat: this_beat = o.get_last_s() beats.append(this_beat) # if o.get_confidence() > .2 and len(beats) > 2.: # break total_frames += read if read < hop_s: break def beats_to_bpm(beats, path): # if enough beats are found, convert to periods then to bpm if len(beats) > 1: if len(beats) < 4: print("few beats found in {:s}".format(path)) bpms = 60. / numpy.diff(beats) return numpy.median(bpms) else: print("not enough beats found in {:s}".format(path)) return 0 return beats_to_bpm(beats, filename) def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): """ this method try to read pitches from a sound wave file with a list of dict of pitch and confidence """ if os.path.isfile(filename) is False: raise Exception('File not found with filename = %s' % filename) print("====> reading pitch from sound file") win_s = 4096 // DOWN_SAMPLE # fft size hop_s = 512 // DOWN_SAMPLE # hop size s = source(filename, samplerate, hop_s) samplerate = s.samplerate tolerance = 0.8 pitch_o = pitch("yin", win_s, hop_s, samplerate) pitch_o.set_unit("midi") pitch_o.set_tolerance(tolerance) result = [] # total number of frames read total_frames = 0 while True: samples, read = s() # the pitch value is not rounded and many zeroes occur that_pitch = pitch_o(samples)[0] confidence = pitch_o.get_confidence() result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence)) total_frames += read if read < hop_s: break group_result_with_log_density = compute_density_from_pitch_result(result) density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time']) print("====> density level list length %s" % len(density_level_list)) proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time']) print("====> emphasis proportion list length = %d" % len(proportion_list)) return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list) def compute_density_level(group_result_with_log_density: List[dict], length: float): """ following result of function compute_density_from_pitch_result, this method will compute for each group, a readable (from 0 to 9) density value for further usage :param group_result_with_log_density: :param length end time :return: """ log_density_list = [group['log_density'] for group in group_result_with_log_density] max_val = max(log_density_list) min_val = min(log_density_list) # split range with 10 and compute which to where range_val = max_val - min_val total_level = 9 gap = range_val / total_level level_list = [] for i, log_density in enumerate(log_density_list): level = 5 if gap != 0: level = round((log_density - min_val) / gap) level_list.append(dict(level=level, start_time=group_result_with_log_density[i]['pitches'][0]['time'])) for level_dict in level_list: start = level_dict['start_time'] / length level_dict['start_time'] = start return level_list def c
pitch_result: List[dict]): group_result = [] group = [] for i, pitch_dict in enumerate(pitch_result): # current is not zero, but previous is zero # should flush the group if round(pitch_dict['pitch']) != 0 and i - 1 >= 0 and round(pitch_result[i - 1]['pitch']) == 0: group_result.append(group) group = [] group.append(pitch_dict) # now for each group we have the elements which are essentially divided by time frame # we just need to identify the average density and get the highest ones density_list = [len(group) for group in group_result] # average_density = sum(density_list) / len(density_list) log_density_list = numpy.log10(density_list) # only for those group with density > coefficient * log_max_density is qualified to be the emphasis one. # but here we just give the density log result group_result_with_log_density = [] for i, group in enumerate(group_result): group_result_with_log_density.append(dict(log_density=log_density_list[i], pitches=group)) return group_result_with_log_density def get_emphasis_start_times(group_result_with_log_density: List[dict], length: float, coefficient: int = 0.8, threshold: int = 1): """ :param group_result_with_log_density compute_density_from_pitch_result function result :param coefficient compares to the max log value, which should we consider emphasis :param threshold means only pitch density more than threshold could use emphasis method :param length is the length of sound in second unit """ log_density_list = [group['log_density'] for group in group_result_with_log_density] max_log_density = max(log_density_list) filter_value = coefficient * max_log_density pitch_group_list = [] for group in group_result_with_log_density: if group['log_density'] >= threshold and group['log_density'] >= filter_value: pitch_group_list.append(group['pitches']) # now we have pitch group, we can know where to start emphasis and where to end range_time_list = [] for pitch_group in pitch_group_list: start = pitch_group[0]['time'] end = pitch_group[len(pitch_group) - 1]['time'] range_time_list.append(dict(start=start, end=end)) # transform proportion value for further beats computing proportion_list = [] for range_time in range_time_list: start = range_time['start'] / length end = range_time['end'] / length proportion_list.append(dict(start=start, end=end)) return proportion_list def drum_note_to_heart_beat_track(midi_instance: MIDIFile): """ @Deprecated """ # exporting bass drum notes bass_drum_beats_in_ms = [] ms_per_tick = 60 * 1000 / (tempo * TICKSPERQUARTERNOTE) for event in midi_instance.tracks[channel_map[CHANNEL_NAME_DRUM_KIT]].eventList: if isinstance(event, NoteOn) and event.pitch == drum_map['BassDrum']: bass_drum_beats_in_ms.append(ms_per_tick * event.tick) single_heart_beat = AudioSegment.from_file('./single_heartbeat.mp3', format='mp3') heartbeat_track = AudioSegment.empty() for i, bass_drum_beat_note_on in enumerate(bass_drum_beats_in_ms): if i == 0: heartbeat_track += AudioSegment.silent(duration=bass_drum_beat_note_on) elif i + 1 < len(bass_drum_beats_in_ms): # if the next bass drum time is early than heartbeat track if len(heartbeat_track) > bass_drum_beats_in_ms[i + 1]: continue # fill the gap till the next heart beat gap = bass_drum_beats_in_ms[i + 1] - len(heartbeat_track) heartbeat_track += AudioSegment.silent(duration=gap) elif i == len(bass_drum_beats_in_ms) - 1: # ignore the last one continue heartbeat_track += single_heart_beat heartbeat_track.export('heartbeat_track.mp3', format='mp3') def get_one_bar_heart_beat(filename: str, bpm: int): """ given defined bpm, it generates a bar of heartbeat sound. given the fact that the heart beat track has a certain length of each beat, the bpm cannot be too high, which is undetermined yet. :return: """ heart_beat_track = AudioSegment.from_file(file=filename, format='mp3') heart_beat_1 = heart_beat_track[70:180] heart_beat_2 = heart_beat_track[380:490] # AudioSegment.export(part, 'single_heartbeat1.mp3') tick_per_sec = 60 * 1000 / bpm # make a sequential beats by a quarter notes which means a tick contains 2 heat beats # and this is only applied for a half bar. # in conclusion, one bar has two sets of heart beats result_track = AudioSegment.empty() # first set result_track += heart_beat_1 gap = tick_per_sec / 2 - len(result_track) result_track += AudioSegment.silent(gap) result_track += heart_beat_2 # fill the gap gap = tick_per_sec * 2 - len(result_track) result_track += AudioSegment.silent(gap) # # second set result_track += heart_beat_1 gap = tick_per_sec * 2.5 - len(result_track) result_track += AudioSegment.silent(gap) result_track += heart_beat_2 # # fill the end gap gap = tick_per_sec * 4 - len(result_track) result_track += AudioSegment.silent(gap) return result_track def get_heart_beat_track(filename: str, bar_count: int, bpm: int): result = AudioSegment.empty() for i in range(bar_count): result += get_one_bar_heart_beat(filename, bpm) return result def get_heart_beat_track_and_save(filename: str, dest_filename: str, bar_count: int, bpm: int): result = get_heart_beat_track(filename, bar_count, bpm) # reduce 3dB of the result result = result - 3 # tick_per_sec = 60 * 1000 / bpm # fade_time = round(tick_per_sec * 4) # result.fade_in(fade_time) # result.fade_out(fade_time) AudioSegment.export(result, dest_filename) REF_BPM = 90 BOTTOM_BPM = 50 def normalize_bpm(bpm: int): """ presume that general bpm of voice result is in range of 50 - 200 and we may want to center speed to 90 as reference. so the algorithm could be: gap = abs(90 - x) result = 50 + gap :param bpm: :return: """ return abs(REF_BPM - bpm) + BOTTOM_BPM
ompute_density_from_pitch_result(
identifier_name
utils.py
import os from typing import List import numpy from aubio import notes, source, pitch, tempo from midiutil import MIDIFile from midiutil.MidiFile import TICKSPERQUARTERNOTE, NoteOn from pydub import AudioSegment from model.channel import channel_map, CHANNEL_NAME_DRUM_KIT from model.note import Note, drum_map def create_midi_file(num_tracks: int, file_format: int): return MIDIFile(numTracks=num_tracks, file_format=file_format) def save_midi_file(filename: str, midi_file: MIDIFile): with open(filename, "wb") as output_file: midi_file.writeFile(output_file) print("====> file %s saved." % filename) DEFAULT_SAMPLE_RATE = 44100 DOWN_SAMPLE = 1 def read_note_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): """ this method try to read notes from a sound wave file with a list of dict of start_time, pitch and duration """ print("====> reading notes from sound file") win_s = 512 // DOWN_SAMPLE # fft size hop_s = 256 // DOWN_SAMPLE # hop size # adjust sample rate s = source(filename, samplerate, hop_s) samplerate = s.samplerate notes_o = notes("default", win_s, hop_s, samplerate) result = [] total_frames = 0 while True: samples, read = s() new_note = notes_o(samples) # note too high considered as noise if new_note[0] != 0 and new_note[0] <= 120: note_klass = Note(time=total_frames / float(samplerate), pitch=new_note[0], volume=new_note[1] - 20, duration=new_note[2]) result.append(note_klass) total_frames += read if read < hop_s: break return result def read_bpm_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): print("====> reading bpm from sound file") win_s, hop_s = 1024, 512 s = source(filename, samplerate, hop_s) samplerate = s.samplerate ''' phase Phase based onset detection function This function uses information both in frequency and in phase to determine changes in the spectral content that might correspond to musical onsets. It is best suited for complex signals such as polyphonic recordings. Juan-Pablo Bello, Mike P. Davies, and Mark B. Sandler. Phase-based note onset detection for music signals. In Proceedings of the IEEE International Conference on Acoustics Speech and Signal Processing, pages 441­444, Hong-Kong, 2003. ''' o = tempo("phase", win_s, hop_s, samplerate) beats = [] total_frames = 0 while True: samples, read = s() is_beat = o(samples) if is_beat: this_beat = o.get_last_s() beats.append(this_beat) # if o.get_confidence() > .2 and len(beats) > 2.: # break total_frames += read if read < hop_s: break def beats_to_bpm(beats, path): # if enough beats are found, convert to periods then to bpm if len(beats) > 1: if len(beats) < 4: print("few beats found in {:s}".format(path)) bpms = 60. / numpy.diff(beats) return numpy.median(bpms) else: print("not enough beats found in {:s}".format(path)) return 0 return beats_to_bpm(beats, filename) def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): """ this method try to read pitches from a sound wave file with a list of dict of pitch and confidence """ if os.path.isfile(filename) is False: raise Exception('File not found with filename = %s' % filename) print("====> reading pitch from sound file") win_s = 4096 // DOWN_SAMPLE # fft size hop_s = 512 // DOWN_SAMPLE # hop size s = source(filename, samplerate, hop_s) samplerate = s.samplerate tolerance = 0.8 pitch_o = pitch("yin", win_s, hop_s, samplerate) pitch_o.set_unit("midi") pitch_o.set_tolerance(tolerance) result = [] # total number of frames read total_frames = 0 while True: samples, read = s() # the pitch value is not rounded and many zeroes occur that_pitch = pitch_o(samples)[0] confidence = pitch_o.get_confidence() result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence)) total_frames += read if read < hop_s: break group_result_with_log_density = compute_density_from_pitch_result(result) density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time']) print("====> density level list length %s" % len(density_level_list)) proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time']) print("====> emphasis proportion list length = %d" % len(proportion_list)) return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list) def compute_density_level(group_result_with_log_density: List[dict], length: float): """ following result of function compute_density_from_pitch_result, this method will compute for each group, a readable (from 0 to 9) density value for further usage :param group_result_with_log_density: :param length end time :return: """ log_density_list = [group['log_density'] for group in group_result_with_log_density] max_val = max(log_density_list) min_val = min(log_density_list) # split range with 10 and compute which to where range_val = max_val - min_val total_level = 9 gap = range_val / total_level level_list = [] for i, log_density in enumerate(log_density_list): level = 5 if gap != 0: level = round((log_density - min_val) / gap) level_list.append(dict(level=level, start_time=group_result_with_log_density[i]['pitches'][0]['time'])) for level_dict in level_list: start = level_dict['start_time'] / length level_dict['start_time'] = start return level_list def compute_density_from_pitch_result(pitch_result: List[dict]): group_result = [] group = [] for i, pitch_dict in enumerate(pitch_result): # current is not zero, but previous is zero # should flush the group if round(pitch_dict['pitch']) != 0 and i - 1 >= 0 and round(pitch_result[i - 1]['pitch']) == 0: group_result.append(group) group = [] group.append(pitch_dict) # now for each group we have the elements which are essentially divided by time frame # we just need to identify the average density and get the highest ones density_list = [len(group) for group in group_result] # average_density = sum(density_list) / len(density_list) log_density_list = numpy.log10(density_list) # only for those group with density > coefficient * log_max_density is qualified to be the emphasis one. # but here we just give the density log result group_result_with_log_density = [] for i, group in enumerate(group_result): group_result_with_log_density.append(dict(log_density=log_density_list[i], pitches=group)) return group_result_with_log_density def get_emphasis_start_times(group_result_with_log_density: List[dict], length: float, coefficient: int = 0.8, threshold: int = 1): """ :param group_result_with_log_density compute_density_from_pitch_result function result :param coefficient compares to the max log value, which should we consider emphasis :param threshold means only pitch density more than threshold could use emphasis method :param length is the length of sound in second unit """ log_density_list = [group['log_density'] for group in group_result_with_log_density] max_log_density = max(log_density_list) filter_value = coefficient * max_log_density pitch_group_list = [] for group in group_result_with_log_density: if group['log_density'] >= threshold and group['log_density'] >= filter_value: pitch_group_list.append(group['pitches']) # now we have pitch group, we can know where to start emphasis and where to end range_time_list = [] for pitch_group in pitch_group_list: s
# transform proportion value for further beats computing proportion_list = [] for range_time in range_time_list: start = range_time['start'] / length end = range_time['end'] / length proportion_list.append(dict(start=start, end=end)) return proportion_list def drum_note_to_heart_beat_track(midi_instance: MIDIFile): """ @Deprecated """ # exporting bass drum notes bass_drum_beats_in_ms = [] ms_per_tick = 60 * 1000 / (tempo * TICKSPERQUARTERNOTE) for event in midi_instance.tracks[channel_map[CHANNEL_NAME_DRUM_KIT]].eventList: if isinstance(event, NoteOn) and event.pitch == drum_map['BassDrum']: bass_drum_beats_in_ms.append(ms_per_tick * event.tick) single_heart_beat = AudioSegment.from_file('./single_heartbeat.mp3', format='mp3') heartbeat_track = AudioSegment.empty() for i, bass_drum_beat_note_on in enumerate(bass_drum_beats_in_ms): if i == 0: heartbeat_track += AudioSegment.silent(duration=bass_drum_beat_note_on) elif i + 1 < len(bass_drum_beats_in_ms): # if the next bass drum time is early than heartbeat track if len(heartbeat_track) > bass_drum_beats_in_ms[i + 1]: continue # fill the gap till the next heart beat gap = bass_drum_beats_in_ms[i + 1] - len(heartbeat_track) heartbeat_track += AudioSegment.silent(duration=gap) elif i == len(bass_drum_beats_in_ms) - 1: # ignore the last one continue heartbeat_track += single_heart_beat heartbeat_track.export('heartbeat_track.mp3', format='mp3') def get_one_bar_heart_beat(filename: str, bpm: int): """ given defined bpm, it generates a bar of heartbeat sound. given the fact that the heart beat track has a certain length of each beat, the bpm cannot be too high, which is undetermined yet. :return: """ heart_beat_track = AudioSegment.from_file(file=filename, format='mp3') heart_beat_1 = heart_beat_track[70:180] heart_beat_2 = heart_beat_track[380:490] # AudioSegment.export(part, 'single_heartbeat1.mp3') tick_per_sec = 60 * 1000 / bpm # make a sequential beats by a quarter notes which means a tick contains 2 heat beats # and this is only applied for a half bar. # in conclusion, one bar has two sets of heart beats result_track = AudioSegment.empty() # first set result_track += heart_beat_1 gap = tick_per_sec / 2 - len(result_track) result_track += AudioSegment.silent(gap) result_track += heart_beat_2 # fill the gap gap = tick_per_sec * 2 - len(result_track) result_track += AudioSegment.silent(gap) # # second set result_track += heart_beat_1 gap = tick_per_sec * 2.5 - len(result_track) result_track += AudioSegment.silent(gap) result_track += heart_beat_2 # # fill the end gap gap = tick_per_sec * 4 - len(result_track) result_track += AudioSegment.silent(gap) return result_track def get_heart_beat_track(filename: str, bar_count: int, bpm: int): result = AudioSegment.empty() for i in range(bar_count): result += get_one_bar_heart_beat(filename, bpm) return result def get_heart_beat_track_and_save(filename: str, dest_filename: str, bar_count: int, bpm: int): result = get_heart_beat_track(filename, bar_count, bpm) # reduce 3dB of the result result = result - 3 # tick_per_sec = 60 * 1000 / bpm # fade_time = round(tick_per_sec * 4) # result.fade_in(fade_time) # result.fade_out(fade_time) AudioSegment.export(result, dest_filename) REF_BPM = 90 BOTTOM_BPM = 50 def normalize_bpm(bpm: int): """ presume that general bpm of voice result is in range of 50 - 200 and we may want to center speed to 90 as reference. so the algorithm could be: gap = abs(90 - x) result = 50 + gap :param bpm: :return: """ return abs(REF_BPM - bpm) + BOTTOM_BPM
tart = pitch_group[0]['time'] end = pitch_group[len(pitch_group) - 1]['time'] range_time_list.append(dict(start=start, end=end))
conditional_block
save_articles.go
package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "log" "net/http" "os" "time" _ "github.com/lib/pq" "github.com/jmoiron/sqlx" // _ "github.com/mattn/go-sqlite3" ) // Ия файла базы данных SQLite var dbFileName = "rg.db" // DSN параметры подсоединения к postgresql var DSN = os.Getenv("RGDSN") // Конечная точка API для получения текста материала. См. https://works.rg.ru/project/docs/?section=8 var urlArticle = "https://outer.rg.ru/plain/proxy/?query=https://rg.ru/api/get/object/article-%v.json" // Таймаут запросов к API var requestTimeout = 30 func main() { // считать параметры командной строки // batchSize Количество одновременных запросов к API. // showTiming Показывать времена исполнения batchSize, showTiming := readCommandLineParams() // Порождаем таблицу articles если ее нет createArticlesTable() // Заполняем ее пустыми записями с идентификаторами из таблицы связей rubrics_articles fillArticlesWithIds() // Считаем количество новых записей в articles n := getNewRecordsNumber() fmt.Printf("Количество новых записей в таблице articles = %d.\n", n) // Заполняем таблицу articles текстами из API fillArticlesWithTexts(n, batchSize, showTiming) fmt.Println("DONE") } // readCommandLineParams читает параметры командной строки func readCommandLineParams() (batchSize int, showTiming bool) { flag.IntVar(&batchSize, "batchSize", 50, "Количество запросов выполняемых одновременно") // flag.StringVar(&status, "status", "", "Значение поля migration_status обновляемых записей") flag.BoolVar(&showTiming, "showTiming", false, "Показывать времена исполнения") flag.Parse() // flag.Usage() if batchSize == 0 { os.Exit(0) } return } // Порождает таблицу articles в базе данных func createArticlesTable() { sqlCreateArticles := ` CREATE TABLE IF NOT EXISTS articles ( obj_id text PRIMARY KEY, announce text NULL, authors text NULL, date_modified text NULL, "full-text" text NULL, images text NULL, index_priority text NULL, is_active text NULL, is_announce text NULL, is_paid text NULL, link_title text NULL, links text NULL, obj_kind text NULL, projects text NULL, release_date text NULL, spiegel text NULL, title text NULL, uannounce text NULL, url text NULL, migration_status text NULL, -- DEFAULT ''::text, process_status text NULL, elastic_status text NULL, lemmatized_text text NULL, entities_text text NULL, entities_grouped text NULL ); CREATE INDEX IF NOT EXISTS articles_migration_status__idx ON articles (migration_status); CREATE INDEX IF NOT EXISTS articles_process_status__idx ON articles (process_status); CREATE INDEX IF NOT EXISTS articles_elastic_status__idx ON articles (elastic_status); ` mustExec(sqlCreateArticles) fmt.Println("Таблица articles создана. Вставка новых записей ...") } // Заполняет таблицу articles идентификаторами статей полученными // из таблицы связей rubrics_objects func fillArticlesWithIds() { startTime := time.Now() sqlFillArticlesWithIds := ` INSERT INTO articles(obj_id) SELECT DISTINCT rubrics_objects.object_id FROM rubrics_objects LEFT JOIN articles ON rubrics_objects.object_id = articles.obj_id WHERE articles.obj_id IS NULL AND rubrics_objects.kind = 'article' ON CONFLICT (obj_id) DO NOTHING ; ` mustExec(sqlFillArticlesWithIds) fmt.Printf("Новые записи вставлены в таблицу articles за %v \n", time.Since(startTime)) } // Заполняет таблицу articles текстами из API. // - n - полное количество новых записей. For info only. // - batchSize - Количество одновременных запросов к API. // - showTiming - Показывать времена исполнения func fillArticlesWithTexts(n, batchSize int, showTiming bool) { // время отдыха между порциями запросов var sleepTime = 50 * time.Millisecond // Счетчик сделанных запросов counter := 0 //Время начала процесса startTime := time.Now() //Берем первую порцию идентификаторов из таблицы articles ids := getArticleIds(batchSize, showTiming) // Пока в порции в порции есть идентификаторы for len(ids) > 0 { //Запрашиваем тексты статей articleTexts := getAPITextsParallel(ids, showTiming) // преобразовываем тексты в записи - массивы полей материала articleRecords := textsToArticleRecords(articleTexts) // Сохраняем записи в базу данных saveArticlesToDatabase(articleRecords, showTiming) // Выводим сообщение counter += len(ids) duration := time.Since(startTime) durationHours := float64(duration) / float64(time.Hour) articlesPerHour := float64(counter) / durationHours fmt.Printf("Таблица articles. Загружено %8d/%d статей за %14v. Средняя скорость %.0f статей/час. \n", counter, n, duration, articlesPerHour) // отдыхаем time.Sleep(sleepTime) // Берем следующую порцию идентификаторов ids = getArticleIds(batchSize, showTiming) } } // Получает количество новых записей в таблице articles, // где поле migration_status имеет значение NULL. func getNewRecordsNumber() int { db, err := sqlx.Open("postgres", DSN) checkErr(err) ids := make([]int, 0) err = db.Select(&ids, "SELECT count(obj_id) FROM articles WHERE migration_status IS NULL") checkErr(err) err = db.Close() checkErr(err) return ids[0] } // Получает массив идентификаторов (размером не более limit) статей из базы данных, // в которых поле migration_status имеет значение NULL. func getArticleIds(limit int, showTiming bool) []string { startTime := time.Now() // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) checkErr(err) ids := make([]string, 0) err = db.Select(&ids, fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status IS NULL LIMIT %d", limit)) checkErr(err) // закомментированный код работает тоже в том числе для sqllite3 // rows, err := db.Query(fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status = '%s' LIMIT %d", status, limit)) // checkErr(err) // var id string // for rows.Next() { // err = rows.Scan(&id) // checkErr(err) // ids = append(ids, id) // } // rows.Close() //good habit to close err = db.Close() checkErr(err) if showTiming { fmt.Printf("Got %v ids in %v. \n", len(ids), time.Since(startTime)) } return ids } // Делает последовательные запросы к API возвращая массив пар: // [ [id, text], [id,text],...] func getAPITexts(ids []string) [][]string { // startTime := time.Now() articles := make([][]string, 0) for _, id := range ids { articles = append(articles, getOneArticleFromAPI(id)) } // duration := time.Since(startTime) // fmt.Printf("Got %v articles in %v. \n", len(ids), duration) return articles } // Делает параллельные запросы к API возвращая массив пар: // [ [id, text], [id,text],...] func getAPITextsParallel(ids []string, showTiming bool) [][]string { startTime := time.Now() articles := make([][]string, 0) ch := make(chan []string) for _, id := range ids { go func(id string) { ch <- getOneArticleFromAPI(id) }(id) } for range ids { v := <-ch articles = append(articles, v) } close(ch) if showTiming { fmt.Printf("Got %v articles in %v. \n", len(ids), time.Since(startTime)) } return articles } // Возвращает id материала и его текст в виде [id, text] из API func getOneArticleFromAPI(id string) []string { client := http.Client{ Timeout: time.Duration(requestTimeout) * time.Second, } req, err := http.NewRequest("GET", fmt.Sprintf(urlArticle, id), nil) if err != nil { fmt.Println(err) } req.Close = true req.Header.Set("Connection", "close") resp, err := client.Do(req) // resp, err := http.Get(fmt.Sprintf(urlArticle, id)) if err != nil { fmt.Println(err) return []string{id, ""} } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { fmt.Println(err) return []string{id, ""} } s := string(body) return []string{id, s} } // Преобразует массив текстов в массив записей. // Запись это отображение: имя_поля -> значение_поля func textsToArticleRecords(texts [][]string) []map[string]interface{} { records := make([]map[string]interface{}, 0) for _, o := range texts { id := o[0] text := o[1] // record := map[string]string{"obj_id": id} var objmap map[string]interface{} //json.RawMessage err := json.Unmarshal([]byte(text), &objmap) if err != nil { fmt.Println(err) objmap = make(map[string]interface{}) objmap["obj_id"] = id objmap["migration_status"] = "error" } else { objmap["migration_status"] = "success" } records = append(records, objmap) } return records } // Сохраняет массив записей в базу данных. // Запись представляет собой map[string]interface{}. func saveArticlesToDatabase(records []map[string]interface{}, showTiming bool) { startTime := time.Now() paramsArray := make([][]interface{}, 0) for _, record := range records { params := make([]interface{}, 0) params = append(params, getMapVal(record, "announce")) params = append(params, getMapVal(record, "authors")) params = append(params, getMapVal(record, "date_modified")) params = append(params, getMapVal(record, "full-text")) params = append(params, getMapVal(record, "images")) params = append(params, getMapVal(record, "index_priority")) params = append(params, getMapVal(record, "is_active")) params = append(params, getMapVal(record, "is_announce")) params = append(params, getMapVal(record, "is_paid")) params = append(params, getMapVal(record, "link_title")) params = append(params, getMapVal(record, "links")) params = append(params, getMapVal(record, "obj_kind")) params = append(params, getMapVal(record, "projects")) params = append(params, getMapVal(record, "release_date")) params = append(params, getMapVal(record, "spiegel")) params = append(params, getMapVal(record, "title")) params = append(params, getMapVal(record, "uannounce")) params = append(params, getMapVal(record, "url")) params = append(params, getMapVal(record, "migration_status")) params = append(params, getMapVal(record, "obj_id")) paramsArray = append(paramsArray, params) } sqlUpdate := ` UPDATE articles SET announce = $1, authors = $2, date_modified = $3, "full-text" = $4, images = $5, index_priority = $6, is_active = $7, is_announce = $8, is_paid = $9, link_title = $10, links = $11, obj_kind = $12, projects = $13, release_date = $14, spiegel = $15, title = $16, uannounce = $17, url = $18, migration_status = $19 WHERE obj_id = $20 ` execMany(sqlUpdate, paramsArray) if showTiming { fmt.Printf("Saved %v articles to database in %v. \n", len(records), time.Since(startTime)) } } // Получает значение поля из отображения. // Возвращает NULL в случае отсутствия поля, // и тестовое представление если поле содержит JSON. func getMapVal(m map[string]interface{}, key string) interface{} { v, ok := m[key] if !ok { return nil } s, ok := v.(string) if ok { return s } b, err := json.Marshal(v) if err == nil { return string(b) } return "something bad" } // Исполняет запрос к базе данных. For all kinds of databases. func exec(sqlText string) { // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) defer db.Close() checkErr(err) stmt, err := db.Prepare(sqlText) defer stmt.Close() checkErr(err) _, err = stmt.Exec() checkErr(err) } // Исполняет запрос к базе данных. Specific to postgresql. func mustExec(sqlText string) { db, err := sqlx.Open("postgres", DSN) defer db.Close() if err != nil { log.Fatalln(err) } db.MustExec(sqlText) } // Исполняет несколько параметризованных запросов на обновление или вставку. // Если запрос не прошел, печатает сообщение. func execMany(sqlText string, paramsArray [][]interface{}) { // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) // defer db.Close() checkErr(err) stmt, err := db.Prepare(sqlText) checkErr(err) for _, params := range paramsArray { // fmt.Println("params length================", len(params)) res, err := stmt.Exec(params...) checkErr(err) // Если запрос не затронул ни одну запись, выводим сообщение. affect, err := res.RowsAffected() checkErr(err) if affect == 0 { fmt.Println("Affected->", affect) } } err = stmt.Close() checkErr(err) err = db.Close() checkErr(err) } // Печатаем сообщение об ошибке func checkErr(err error) { if err != nil { fmt.Print(err) } }
conditional_block
save_articles.go
package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "log" "net/http" "os" "time" _ "github.com/lib/pq" "github.com/jmoiron/sqlx" // _ "github.com/mattn/go-sqlite3" ) // Ия файла базы данных SQLite var dbFileName = "rg.db" // DSN параметры подсоединения к postgresql var DSN = os.Getenv("RGDSN") // Конечная точка API для получения текста материала. См. https://works.rg.ru/project/docs/?section=8 var urlArticle = "https://outer.rg.ru/plain/proxy/?query=https://rg.ru/api/get/object/article-%v.json" // Таймаут запросов к API var requestTimeout = 30 func main() { // считать параметры командной строки // batchSize Количество одновременных запросов к API. // showTiming Показывать времена исполнения batchSize, showTiming := readCommandLineParams() // Порождаем таблицу articles если ее нет createArticlesTable() // Заполняем ее пустыми записями с идентификаторами из таблицы связей rubrics_articles fillArticlesWithIds() // Считаем количество новых записей в articles n := getNewRecordsNumber() fmt.Printf("Количество новых записей в таблице articles = %d.\n", n) // Заполняем таблицу articles текстами из API fillArticlesWithTexts(n, batchSize, showTiming) fmt.Println("DONE") } // readCommandLineParams читает параметры командной строки func readCommandLineParams() (batchSize int, showTiming bool) { flag.IntVar(&batchSize, "batchSize", 50, "Количество запросов выполняемых одновременно") // flag.StringVar(&status, "status", "", "Значение поля migration_status обновляемых записей") flag.BoolVar(&showTiming, "showTiming", false, "Показывать времена исполнения") flag.Parse() // flag.Usage() if batchSize == 0 { os.Exit(0) } return } // Порождает таблицу articles в базе данных func createArticlesTable() { sqlCreateArticles := ` CREATE TABLE IF NOT EXISTS articles ( obj_id text PRIMARY KEY, announce text NULL, authors text NULL, date_modified text NULL, "full-text" text NULL, images text NULL, index_priority text NULL, is_active text NULL, is_announce text NULL, is_paid text NULL, link_title text NULL, links text NULL, obj_kind text NULL, projects text NULL, release_date text NULL, spiegel text NULL, title text NULL, uannounce text NULL, url text NULL, migration_status text NULL, -- DEFAULT ''::text, process_status text NULL, elastic_status text NULL, lemmatized_text text NULL, entities_text text NULL, entities_grouped text NULL ); CREATE INDEX IF NOT EXISTS articles_migration_status__idx ON articles (migration_status); CREATE INDEX IF NOT EXISTS articles_process_status__idx ON articles (process_status); CREATE INDEX IF NOT EXISTS articles_elastic_status__idx ON articles (elastic_status); ` mustExec(sqlCreateArticles) fmt.Println("Таблица articles создана. Вставка новых записей ...") } // Заполняет таблицу articles идентификаторами статей полученными // из таблицы связей rubrics_objects func fillArticlesWithIds() { startTime := time.Now() sqlFillArticlesWithIds := ` INSERT INTO articles(obj_id) SELECT DISTINCT rubrics_objects.object_id FROM rubrics_objects LEFT JOIN articles ON rubrics_objects.object_id = articles.obj_id WHERE articles.obj_id IS NULL AND rubrics_objects.kind = 'article' ON CONFLICT (obj_id) DO NOTHING ; ` mustExec(sqlFillArticlesWithIds) fmt.Printf("Новые записи вставлены в таблицу articles за %v \n", time.Since(startTime)) } // Заполняет таблицу articles текстами из API. // - n - полное количество новых записей. For info only. // - batchSize - Количество одновременных з
тексты статей articleTexts := getAPITextsParallel(ids, showTiming) // преобразовываем тексты в записи - массивы полей материала articleRecords := textsToArticleRecords(articleTexts) // Сохраняем записи в базу данных saveArticlesToDatabase(articleRecords, showTiming) // Выводим сообщение counter += len(ids) duration := time.Since(startTime) durationHours := float64(duration) / float64(time.Hour) articlesPerHour := float64(counter) / durationHours fmt.Printf("Таблица articles. Загружено %8d/%d статей за %14v. Средняя скорость %.0f статей/час. \n", counter, n, duration, articlesPerHour) // отдыхаем time.Sleep(sleepTime) // Берем следующую порцию идентификаторов ids = getArticleIds(batchSize, showTiming) } } // Получает количество новых записей в таблице articles, // где поле migration_status имеет значение NULL. func getNewRecordsNumber() int { db, err := sqlx.Open("postgres", DSN) checkErr(err) ids := make([]int, 0) err = db.Select(&ids, "SELECT count(obj_id) FROM articles WHERE migration_status IS NULL") checkErr(err) err = db.Close() checkErr(err) return ids[0] } // Получает массив идентификаторов (размером не более limit) статей из базы данных, // в которых поле migration_status имеет значение NULL. func getArticleIds(limit int, showTiming bool) []string { startTime := time.Now() // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) checkErr(err) ids := make([]string, 0) err = db.Select(&ids, fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status IS NULL LIMIT %d", limit)) checkErr(err) // закомментированный код работает тоже в том числе для sqllite3 // rows, err := db.Query(fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status = '%s' LIMIT %d", status, limit)) // checkErr(err) // var id string // for rows.Next() { // err = rows.Scan(&id) // checkErr(err) // ids = append(ids, id) // } // rows.Close() //good habit to close err = db.Close() checkErr(err) if showTiming { fmt.Printf("Got %v ids in %v. \n", len(ids), time.Since(startTime)) } return ids } // Делает последовательные запросы к API возвращая массив пар: // [ [id, text], [id,text],...] func getAPITexts(ids []string) [][]string { // startTime := time.Now() articles := make([][]string, 0) for _, id := range ids { articles = append(articles, getOneArticleFromAPI(id)) } // duration := time.Since(startTime) // fmt.Printf("Got %v articles in %v. \n", len(ids), duration) return articles } // Делает параллельные запросы к API возвращая массив пар: // [ [id, text], [id,text],...] func getAPITextsParallel(ids []string, showTiming bool) [][]string { startTime := time.Now() articles := make([][]string, 0) ch := make(chan []string) for _, id := range ids { go func(id string) { ch <- getOneArticleFromAPI(id) }(id) } for range ids { v := <-ch articles = append(articles, v) } close(ch) if showTiming { fmt.Printf("Got %v articles in %v. \n", len(ids), time.Since(startTime)) } return articles } // Возвращает id материала и его текст в виде [id, text] из API func getOneArticleFromAPI(id string) []string { client := http.Client{ Timeout: time.Duration(requestTimeout) * time.Second, } req, err := http.NewRequest("GET", fmt.Sprintf(urlArticle, id), nil) if err != nil { fmt.Println(err) } req.Close = true req.Header.Set("Connection", "close") resp, err := client.Do(req) // resp, err := http.Get(fmt.Sprintf(urlArticle, id)) if err != nil { fmt.Println(err) return []string{id, ""} } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { fmt.Println(err) return []string{id, ""} } s := string(body) return []string{id, s} } // Преобразует массив текстов в массив записей. // Запись это отображение: имя_поля -> значение_поля func textsToArticleRecords(texts [][]string) []map[string]interface{} { records := make([]map[string]interface{}, 0) for _, o := range texts { id := o[0] text := o[1] // record := map[string]string{"obj_id": id} var objmap map[string]interface{} //json.RawMessage err := json.Unmarshal([]byte(text), &objmap) if err != nil { fmt.Println(err) objmap = make(map[string]interface{}) objmap["obj_id"] = id objmap["migration_status"] = "error" } else { objmap["migration_status"] = "success" } records = append(records, objmap) } return records } // Сохраняет массив записей в базу данных. // Запись представляет собой map[string]interface{}. func saveArticlesToDatabase(records []map[string]interface{}, showTiming bool) { startTime := time.Now() paramsArray := make([][]interface{}, 0) for _, record := range records { params := make([]interface{}, 0) params = append(params, getMapVal(record, "announce")) params = append(params, getMapVal(record, "authors")) params = append(params, getMapVal(record, "date_modified")) params = append(params, getMapVal(record, "full-text")) params = append(params, getMapVal(record, "images")) params = append(params, getMapVal(record, "index_priority")) params = append(params, getMapVal(record, "is_active")) params = append(params, getMapVal(record, "is_announce")) params = append(params, getMapVal(record, "is_paid")) params = append(params, getMapVal(record, "link_title")) params = append(params, getMapVal(record, "links")) params = append(params, getMapVal(record, "obj_kind")) params = append(params, getMapVal(record, "projects")) params = append(params, getMapVal(record, "release_date")) params = append(params, getMapVal(record, "spiegel")) params = append(params, getMapVal(record, "title")) params = append(params, getMapVal(record, "uannounce")) params = append(params, getMapVal(record, "url")) params = append(params, getMapVal(record, "migration_status")) params = append(params, getMapVal(record, "obj_id")) paramsArray = append(paramsArray, params) } sqlUpdate := ` UPDATE articles SET announce = $1, authors = $2, date_modified = $3, "full-text" = $4, images = $5, index_priority = $6, is_active = $7, is_announce = $8, is_paid = $9, link_title = $10, links = $11, obj_kind = $12, projects = $13, release_date = $14, spiegel = $15, title = $16, uannounce = $17, url = $18, migration_status = $19 WHERE obj_id = $20 ` execMany(sqlUpdate, paramsArray) if showTiming { fmt.Printf("Saved %v articles to database in %v. \n", len(records), time.Since(startTime)) } } // Получает значение поля из отображения. // Возвращает NULL в случае отсутствия поля, // и тестовое представление если поле содержит JSON. func getMapVal(m map[string]interface{}, key string) interface{} { v, ok := m[key] if !ok { return nil } s, ok := v.(string) if ok { return s } b, err := json.Marshal(v) if err == nil { return string(b) } return "something bad" } // Исполняет запрос к базе данных. For all kinds of databases. func exec(sqlText string) { // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) defer db.Close() checkErr(err) stmt, err := db.Prepare(sqlText) defer stmt.Close() checkErr(err) _, err = stmt.Exec() checkErr(err) } // Исполняет запрос к базе данных. Specific to postgresql. func mustExec(sqlText string) { db, err := sqlx.Open("postgres", DSN) defer db.Close() if err != nil { log.Fatalln(err) } db.MustExec(sqlText) } // Исполняет несколько параметризованных запросов на обновление или вставку. // Если запрос не прошел, печатает сообщение. func execMany(sqlText string, paramsArray [][]interface{}) { // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) // defer db.Close() checkErr(err) stmt, err := db.Prepare(sqlText) checkErr(err) for _, params := range paramsArray { // fmt.Println("params length================", len(params)) res, err := stmt.Exec(params...) checkErr(err) // Если запрос не затронул ни одну запись, выводим сообщение. affect, err := res.RowsAffected() checkErr(err) if affect == 0 { fmt.Println("Affected->", affect) } } err = stmt.Close() checkErr(err) err = db.Close() checkErr(err) } // Печатаем сообщение об ошибке func checkErr(err error) { if err != nil { fmt.Print(err) } }
апросов к API. // - showTiming - Показывать времена исполнения func fillArticlesWithTexts(n, batchSize int, showTiming bool) { // время отдыха между порциями запросов var sleepTime = 50 * time.Millisecond // Счетчик сделанных запросов counter := 0 //Время начала процесса startTime := time.Now() //Берем первую порцию идентификаторов из таблицы articles ids := getArticleIds(batchSize, showTiming) // Пока в порции в порции есть идентификаторы for len(ids) > 0 { //Запрашиваем
identifier_body
save_articles.go
package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "log" "net/http" "os" "time" _ "github.com/lib/pq" "github.com/jmoiron/sqlx" // _ "github.com/mattn/go-sqlite3" ) // Ия файла базы данных SQLite var dbFileName = "rg.db" // DSN параметры подсоединения к postgresql var DSN = os.Getenv("RGDSN") // Конечная точка API для получения текста материала. См. https://works.rg.ru/project/docs/?section=8 var urlArticle = "https://outer.rg.ru/plain/proxy/?query=https://rg.ru/api/get/object/article-%v.json" // Таймаут запросов к API var requestTimeout = 30 func main() { // считать параметры командной строки // batchSize Количество одновременных запросов к API. // showTiming Показывать времена исполнения batchSize, showTiming := readCommandLineParams() // Порождаем таблицу articles если ее нет createArticlesTable() // Заполняем ее пустыми записями с идентификаторами из таблицы связей rubrics_articles fillArticlesWithIds() // Считаем количество новых записей в articles n := getNewRecordsNumber() fmt.Printf("Количество новых записей в таблице articles = %d.\n", n) // Заполняем таблицу articles текстами из API fillArticlesWithTexts(n, batchSize, showTiming) fmt.Println("DONE") } // readCommandLineParams читает параметры командной строки func readCommandLineParams() (batchSize int, showTiming bool) { flag.IntVar(&batchSize, "batchSize", 50, "Количество запросов выполняемых одновременно") // flag.StringVar(&status, "status", "", "Значение поля migration_status обновляемых записей") flag.BoolVar(&showTiming, "showTiming", false, "Показывать времена исполнения") flag.Parse() // flag.Usage() if batchSize == 0 { os.Exit(0) } return } // Порождает таблицу articles в базе данных func createArticlesTable() { sqlCreateArticles := ` CREATE TABLE IF NOT EXISTS articles ( obj_id text PRIMARY KEY, announce text NULL, authors text NULL, date_modified text NULL, "full-text" text NULL, images text NULL, index_priority text NULL, is_active text NULL, is_announce text NULL, is_paid text NULL, link_title text NULL, links text NULL, obj_kind text NULL, projects text NULL, release_date text NULL, spiegel text NULL, title text NULL, uannounce text NULL, url text NULL, migration_status text NULL, -- DEFAULT ''::text, process_status text NULL, elastic_status text NULL, lemmatized_text text NULL, entities_text text NULL, entities_grouped text NULL ); CREATE INDEX IF NOT EXISTS articles_migration_status__idx ON articles (migration_status); CREATE INDEX IF NOT EXISTS articles_process_status__idx ON articles (process_status); CREATE INDEX IF NOT EXISTS articles_elastic_status__idx ON articles (elastic_status); ` mustExec(sqlCreateArticles) fmt.Println("Таблица articles создана. Вставка новых записей ...") } // Заполняет таблицу articles идентификаторами статей полученными // из таблицы связей rubrics_objects func fillArticlesWithIds() { startTime := time.Now() sqlFillArticlesWithIds := ` INSERT INTO articles(obj_id) SELECT DISTINCT rubrics_objects.object_id FROM rubrics_objects LEFT JOIN articles ON rubrics_objects.object_id = articles.obj_id WHERE articles.obj_id IS NULL AND rubrics_objects.kind = 'article' ON CONFLICT (obj_id) DO NOTHING ; ` mustExec(sqlFillArticlesWithIds) fmt.Printf("Новые записи вставлены в таблицу articles за %v \n", time.Since(startTime)) } // Заполняет таблицу articles текстами из API. // - n - полное количество новых записей. For info only. // - batchSize - Количество одновременных запросов к API. // - showTiming - Показывать времена исполнения func fillArticlesWithTexts(n, batchSize int, showTiming bool) { // время отдыха между порциями запросов var sleepTime = 50 * time.Millisecond // Счетчик сделанных запросов counter := 0 //Время начала процесса startTime := time.Now() //Берем первую порцию идентификаторов из таблицы articles ids := getArticleIds(batchSize, showTiming) // Пока в порции в порции есть идентификаторы for len(ids) > 0 { //Запрашиваем тексты статей articleTexts := getAPITextsParallel(ids, showTiming) // преобразовываем тексты в записи - массивы полей материала articleRecords := textsToArticleRecords(articleTexts) // Сохраняем записи в базу данных saveArticlesToDatabase(articleRecords, showTiming) // Выводим сообщение counter += len(ids) duration := time.Since(startTime) durationHours := float64(duration) / float64(time.Hour) articlesPerHour := float64(counter) / durationHours fmt.Printf("Таблица articles. Загружено %8d/%d статей за %14v. Средняя скорость %.0f статей/час. \n", counter, n, duration, articlesPerHour) // отдыхаем time.Sleep(sleepTime) // Берем следующую порцию идентификаторов ids = getArticleIds(batchSize, showTiming) } } // Получает количество новых записей в таблице articles, // где поле migration_status имеет значение NULL. func getNewRecordsNumber() int { db, err := sqlx.Open("postgres", DSN) checkErr(err) ids := make([]int, 0) err = db.Select(&ids, "SELECT count(obj_id) FROM articles WHERE migration_status IS NULL") checkErr(err) err = db.Close() checkErr(err) return ids[0] } // Получает массив идентификаторов (размером не более limit) статей из базы данных, // в которых поле migration_status имеет значение NULL. func getArticleIds(limit int, showTiming bool) []string { startTime := time.Now() // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) checkErr(err) ids := make([]string, 0) err = db.Select(&ids, fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status IS NULL LIMIT %d", limit)) checkErr(err) // закомментированный код работает тоже в том числе для sqllite3 // rows, err := db.Query(fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status = '%s' LIMIT %d", status, limit)) // checkErr(err) // var id string // for rows.Next() { // err = rows.Scan(&id) // checkErr(err) // ids = append(ids, id) // } // rows.Close() //good habit to close err = db.Close() checkErr(err) if showTiming { fmt.Printf("Got %v ids in %v. \n", len(ids), time.Since(startTime)) } return ids } // Делает последовательные запросы к API возвращая массив пар: // [ [id, text], [id,text],...] func getAPITexts(ids []string) [][]string { // startTime := time.Now() articles := make([][]string, 0) for _, id := range ids { articles = append(articles, getOneArticleFromAPI(id)) } // duration := time.Since(startTime) // fmt.Printf("Got %v articles in %v. \n", len(ids), duration) return articles } // Дел
ные запросы к API возвращая массив пар: // [ [id, text], [id,text],...] func getAPITextsParallel(ids []string, showTiming bool) [][]string { startTime := time.Now() articles := make([][]string, 0) ch := make(chan []string) for _, id := range ids { go func(id string) { ch <- getOneArticleFromAPI(id) }(id) } for range ids { v := <-ch articles = append(articles, v) } close(ch) if showTiming { fmt.Printf("Got %v articles in %v. \n", len(ids), time.Since(startTime)) } return articles } // Возвращает id материала и его текст в виде [id, text] из API func getOneArticleFromAPI(id string) []string { client := http.Client{ Timeout: time.Duration(requestTimeout) * time.Second, } req, err := http.NewRequest("GET", fmt.Sprintf(urlArticle, id), nil) if err != nil { fmt.Println(err) } req.Close = true req.Header.Set("Connection", "close") resp, err := client.Do(req) // resp, err := http.Get(fmt.Sprintf(urlArticle, id)) if err != nil { fmt.Println(err) return []string{id, ""} } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { fmt.Println(err) return []string{id, ""} } s := string(body) return []string{id, s} } // Преобразует массив текстов в массив записей. // Запись это отображение: имя_поля -> значение_поля func textsToArticleRecords(texts [][]string) []map[string]interface{} { records := make([]map[string]interface{}, 0) for _, o := range texts { id := o[0] text := o[1] // record := map[string]string{"obj_id": id} var objmap map[string]interface{} //json.RawMessage err := json.Unmarshal([]byte(text), &objmap) if err != nil { fmt.Println(err) objmap = make(map[string]interface{}) objmap["obj_id"] = id objmap["migration_status"] = "error" } else { objmap["migration_status"] = "success" } records = append(records, objmap) } return records } // Сохраняет массив записей в базу данных. // Запись представляет собой map[string]interface{}. func saveArticlesToDatabase(records []map[string]interface{}, showTiming bool) { startTime := time.Now() paramsArray := make([][]interface{}, 0) for _, record := range records { params := make([]interface{}, 0) params = append(params, getMapVal(record, "announce")) params = append(params, getMapVal(record, "authors")) params = append(params, getMapVal(record, "date_modified")) params = append(params, getMapVal(record, "full-text")) params = append(params, getMapVal(record, "images")) params = append(params, getMapVal(record, "index_priority")) params = append(params, getMapVal(record, "is_active")) params = append(params, getMapVal(record, "is_announce")) params = append(params, getMapVal(record, "is_paid")) params = append(params, getMapVal(record, "link_title")) params = append(params, getMapVal(record, "links")) params = append(params, getMapVal(record, "obj_kind")) params = append(params, getMapVal(record, "projects")) params = append(params, getMapVal(record, "release_date")) params = append(params, getMapVal(record, "spiegel")) params = append(params, getMapVal(record, "title")) params = append(params, getMapVal(record, "uannounce")) params = append(params, getMapVal(record, "url")) params = append(params, getMapVal(record, "migration_status")) params = append(params, getMapVal(record, "obj_id")) paramsArray = append(paramsArray, params) } sqlUpdate := ` UPDATE articles SET announce = $1, authors = $2, date_modified = $3, "full-text" = $4, images = $5, index_priority = $6, is_active = $7, is_announce = $8, is_paid = $9, link_title = $10, links = $11, obj_kind = $12, projects = $13, release_date = $14, spiegel = $15, title = $16, uannounce = $17, url = $18, migration_status = $19 WHERE obj_id = $20 ` execMany(sqlUpdate, paramsArray) if showTiming { fmt.Printf("Saved %v articles to database in %v. \n", len(records), time.Since(startTime)) } } // Получает значение поля из отображения. // Возвращает NULL в случае отсутствия поля, // и тестовое представление если поле содержит JSON. func getMapVal(m map[string]interface{}, key string) interface{} { v, ok := m[key] if !ok { return nil } s, ok := v.(string) if ok { return s } b, err := json.Marshal(v) if err == nil { return string(b) } return "something bad" } // Исполняет запрос к базе данных. For all kinds of databases. func exec(sqlText string) { // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) defer db.Close() checkErr(err) stmt, err := db.Prepare(sqlText) defer stmt.Close() checkErr(err) _, err = stmt.Exec() checkErr(err) } // Исполняет запрос к базе данных. Specific to postgresql. func mustExec(sqlText string) { db, err := sqlx.Open("postgres", DSN) defer db.Close() if err != nil { log.Fatalln(err) } db.MustExec(sqlText) } // Исполняет несколько параметризованных запросов на обновление или вставку. // Если запрос не прошел, печатает сообщение. func execMany(sqlText string, paramsArray [][]interface{}) { // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) // defer db.Close() checkErr(err) stmt, err := db.Prepare(sqlText) checkErr(err) for _, params := range paramsArray { // fmt.Println("params length================", len(params)) res, err := stmt.Exec(params...) checkErr(err) // Если запрос не затронул ни одну запись, выводим сообщение. affect, err := res.RowsAffected() checkErr(err) if affect == 0 { fmt.Println("Affected->", affect) } } err = stmt.Close() checkErr(err) err = db.Close() checkErr(err) } // Печатаем сообщение об ошибке func checkErr(err error) { if err != nil { fmt.Print(err) } }
ает параллель
identifier_name
save_articles.go
package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "log" "net/http" "os" "time" _ "github.com/lib/pq" "github.com/jmoiron/sqlx" // _ "github.com/mattn/go-sqlite3" ) // Ия файла базы данных SQLite var dbFileName = "rg.db" // DSN параметры подсоединения к postgresql var DSN = os.Getenv("RGDSN") // Конечная точка API для получения текста материала. См. https://works.rg.ru/project/docs/?section=8 var urlArticle = "https://outer.rg.ru/plain/proxy/?query=https://rg.ru/api/get/object/article-%v.json" // Таймаут запросов к API var requestTimeout = 30 func main() { // считать параметры командной строки // batchSize Количество одновременных запросов к API. // showTiming Показывать времена исполнения batchSize, showTiming := readCommandLineParams() // Порождаем таблицу articles если ее нет createArticlesTable() // Заполняем ее пустыми записями с идентификаторами из таблицы связей rubrics_articles fillArticlesWithIds() // Считаем количество новых записей в articles n := getNewRecordsNumber() fmt.Printf("Количество новых записей в таблице articles = %d.\n", n) // Заполняем таблицу articles текстами из API fillArticlesWithTexts(n, batchSize, showTiming) fmt.Println("DONE") } // readCommandLineParams читает параметры командной строки func readCommandLineParams() (batchSize int, showTiming bool) { flag.IntVar(&batchSize, "batchSize", 50, "Количество запросов выполняемых одновременно") // flag.StringVar(&status, "status", "", "Значение поля migration_status обновляемых записей") flag.BoolVar(&showTiming, "showTiming", false, "Показывать времена исполнения") flag.Parse() // flag.Usage() if batchSize == 0 { os.Exit(0) } return } // Порождает таблицу articles в базе данных func createArticlesTable() { sqlCreateArticles := ` CREATE TABLE IF NOT EXISTS articles ( obj_id text PRIMARY KEY, announce text NULL, authors text NULL, date_modified text NULL, "full-text" text NULL, images text NULL, index_priority text NULL, is_active text NULL, is_announce text NULL, is_paid text NULL, link_title text NULL, links text NULL, obj_kind text NULL, projects text NULL, release_date text NULL, spiegel text NULL, title text NULL, uannounce text NULL, url text NULL, migration_status text NULL, -- DEFAULT ''::text, process_status text NULL, elastic_status text NULL, lemmatized_text text NULL, entities_text text NULL, entities_grouped text NULL ); CREATE INDEX IF NOT EXISTS articles_migration_status__idx ON articles (migration_status); CREATE INDEX IF NOT EXISTS articles_process_status__idx ON articles (process_status); CREATE INDEX IF NOT EXISTS articles_elastic_status__idx ON articles (elastic_status); ` mustExec(sqlCreateArticles) fmt.Println("Таблица articles создана. Вставка новых записей ...") } // Заполняет таблицу articles идентификаторами статей полученными // из таблицы связей rubrics_objects func fillArticlesWithIds() { startTime := time.Now() sqlFillArticlesWithIds := ` INSERT INTO articles(obj_id) SELECT DISTINCT rubrics_objects.object_id FROM rubrics_objects LEFT JOIN articles ON rubrics_objects.object_id = articles.obj_id WHERE articles.obj_id IS NULL AND rubrics_objects.kind = 'article' ON CONFLICT (obj_id) DO NOTHING ; ` mustExec(sqlFillArticlesWithIds) fmt.Printf("Новые записи вставлены в таблицу articles за %v \n", time.Since(startTime)) } // Заполняет таблицу articles текстами из API. // - n - полное количество новых записей. For info only. // - batchSize - Количество одновременных запросов к API. // - showTiming - Показывать времена исполнения func fillArticlesWithTexts(n, batchSize int, showTiming bool) { // время отдыха между порциями запросов var sleepTime = 50 * time.Millisecond // Счетчик сделанных запросов counter := 0 //Время начала процесса startTime := time.Now() //Берем первую порцию идентификаторов из таблицы articles ids := getArticleIds(batchSize, showTiming) // Пока в порции в порции есть идентификаторы for len(ids) > 0 { //Запрашиваем тексты статей articleTexts := getAPITextsParallel(ids, showTiming) // преобразовываем тексты в записи - массивы полей материала articleRecords := textsToArticleRecords(articleTexts) // Сохраняем записи в базу данных saveArticlesToDatabase(articleRecords, showTiming) // Выводим сообщение counter += len(ids) duration := time.Since(startTime) durationHours := float64(duration) / float64(time.Hour) articlesPerHour := float64(counter) / durationHours fmt.Printf("Таблица articles. Загружено %8d/%d статей за %14v. Средняя скорость %.0f статей/час. \n", counter, n, duration, articlesPerHour)
} // Получает количество новых записей в таблице articles, // где поле migration_status имеет значение NULL. func getNewRecordsNumber() int { db, err := sqlx.Open("postgres", DSN) checkErr(err) ids := make([]int, 0) err = db.Select(&ids, "SELECT count(obj_id) FROM articles WHERE migration_status IS NULL") checkErr(err) err = db.Close() checkErr(err) return ids[0] } // Получает массив идентификаторов (размером не более limit) статей из базы данных, // в которых поле migration_status имеет значение NULL. func getArticleIds(limit int, showTiming bool) []string { startTime := time.Now() // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) checkErr(err) ids := make([]string, 0) err = db.Select(&ids, fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status IS NULL LIMIT %d", limit)) checkErr(err) // закомментированный код работает тоже в том числе для sqllite3 // rows, err := db.Query(fmt.Sprintf("SELECT obj_id FROM articles WHERE migration_status = '%s' LIMIT %d", status, limit)) // checkErr(err) // var id string // for rows.Next() { // err = rows.Scan(&id) // checkErr(err) // ids = append(ids, id) // } // rows.Close() //good habit to close err = db.Close() checkErr(err) if showTiming { fmt.Printf("Got %v ids in %v. \n", len(ids), time.Since(startTime)) } return ids } // Делает последовательные запросы к API возвращая массив пар: // [ [id, text], [id,text],...] func getAPITexts(ids []string) [][]string { // startTime := time.Now() articles := make([][]string, 0) for _, id := range ids { articles = append(articles, getOneArticleFromAPI(id)) } // duration := time.Since(startTime) // fmt.Printf("Got %v articles in %v. \n", len(ids), duration) return articles } // Делает параллельные запросы к API возвращая массив пар: // [ [id, text], [id,text],...] func getAPITextsParallel(ids []string, showTiming bool) [][]string { startTime := time.Now() articles := make([][]string, 0) ch := make(chan []string) for _, id := range ids { go func(id string) { ch <- getOneArticleFromAPI(id) }(id) } for range ids { v := <-ch articles = append(articles, v) } close(ch) if showTiming { fmt.Printf("Got %v articles in %v. \n", len(ids), time.Since(startTime)) } return articles } // Возвращает id материала и его текст в виде [id, text] из API func getOneArticleFromAPI(id string) []string { client := http.Client{ Timeout: time.Duration(requestTimeout) * time.Second, } req, err := http.NewRequest("GET", fmt.Sprintf(urlArticle, id), nil) if err != nil { fmt.Println(err) } req.Close = true req.Header.Set("Connection", "close") resp, err := client.Do(req) // resp, err := http.Get(fmt.Sprintf(urlArticle, id)) if err != nil { fmt.Println(err) return []string{id, ""} } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { fmt.Println(err) return []string{id, ""} } s := string(body) return []string{id, s} } // Преобразует массив текстов в массив записей. // Запись это отображение: имя_поля -> значение_поля func textsToArticleRecords(texts [][]string) []map[string]interface{} { records := make([]map[string]interface{}, 0) for _, o := range texts { id := o[0] text := o[1] // record := map[string]string{"obj_id": id} var objmap map[string]interface{} //json.RawMessage err := json.Unmarshal([]byte(text), &objmap) if err != nil { fmt.Println(err) objmap = make(map[string]interface{}) objmap["obj_id"] = id objmap["migration_status"] = "error" } else { objmap["migration_status"] = "success" } records = append(records, objmap) } return records } // Сохраняет массив записей в базу данных. // Запись представляет собой map[string]interface{}. func saveArticlesToDatabase(records []map[string]interface{}, showTiming bool) { startTime := time.Now() paramsArray := make([][]interface{}, 0) for _, record := range records { params := make([]interface{}, 0) params = append(params, getMapVal(record, "announce")) params = append(params, getMapVal(record, "authors")) params = append(params, getMapVal(record, "date_modified")) params = append(params, getMapVal(record, "full-text")) params = append(params, getMapVal(record, "images")) params = append(params, getMapVal(record, "index_priority")) params = append(params, getMapVal(record, "is_active")) params = append(params, getMapVal(record, "is_announce")) params = append(params, getMapVal(record, "is_paid")) params = append(params, getMapVal(record, "link_title")) params = append(params, getMapVal(record, "links")) params = append(params, getMapVal(record, "obj_kind")) params = append(params, getMapVal(record, "projects")) params = append(params, getMapVal(record, "release_date")) params = append(params, getMapVal(record, "spiegel")) params = append(params, getMapVal(record, "title")) params = append(params, getMapVal(record, "uannounce")) params = append(params, getMapVal(record, "url")) params = append(params, getMapVal(record, "migration_status")) params = append(params, getMapVal(record, "obj_id")) paramsArray = append(paramsArray, params) } sqlUpdate := ` UPDATE articles SET announce = $1, authors = $2, date_modified = $3, "full-text" = $4, images = $5, index_priority = $6, is_active = $7, is_announce = $8, is_paid = $9, link_title = $10, links = $11, obj_kind = $12, projects = $13, release_date = $14, spiegel = $15, title = $16, uannounce = $17, url = $18, migration_status = $19 WHERE obj_id = $20 ` execMany(sqlUpdate, paramsArray) if showTiming { fmt.Printf("Saved %v articles to database in %v. \n", len(records), time.Since(startTime)) } } // Получает значение поля из отображения. // Возвращает NULL в случае отсутствия поля, // и тестовое представление если поле содержит JSON. func getMapVal(m map[string]interface{}, key string) interface{} { v, ok := m[key] if !ok { return nil } s, ok := v.(string) if ok { return s } b, err := json.Marshal(v) if err == nil { return string(b) } return "something bad" } // Исполняет запрос к базе данных. For all kinds of databases. func exec(sqlText string) { // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) defer db.Close() checkErr(err) stmt, err := db.Prepare(sqlText) defer stmt.Close() checkErr(err) _, err = stmt.Exec() checkErr(err) } // Исполняет запрос к базе данных. Specific to postgresql. func mustExec(sqlText string) { db, err := sqlx.Open("postgres", DSN) defer db.Close() if err != nil { log.Fatalln(err) } db.MustExec(sqlText) } // Исполняет несколько параметризованных запросов на обновление или вставку. // Если запрос не прошел, печатает сообщение. func execMany(sqlText string, paramsArray [][]interface{}) { // db, err := sql.Open("sqlite3", dbFileName) db, err := sqlx.Open("postgres", DSN) // defer db.Close() checkErr(err) stmt, err := db.Prepare(sqlText) checkErr(err) for _, params := range paramsArray { // fmt.Println("params length================", len(params)) res, err := stmt.Exec(params...) checkErr(err) // Если запрос не затронул ни одну запись, выводим сообщение. affect, err := res.RowsAffected() checkErr(err) if affect == 0 { fmt.Println("Affected->", affect) } } err = stmt.Close() checkErr(err) err = db.Close() checkErr(err) } // Печатаем сообщение об ошибке func checkErr(err error) { if err != nil { fmt.Print(err) } }
// отдыхаем time.Sleep(sleepTime) // Берем следующую порцию идентификаторов ids = getArticleIds(batchSize, showTiming) }
random_line_split
segment_all_data.py
#!/usr/bin/python from bisect import * from copy import * import sys import json import numpy from pylab import * key_time = 'timestamp' key_value = 'value' key_counts = 'counts' key_energies = 'energies' key_lightvar = 'lightvariance' key_id = 'id' key_survey = 'survey' key_interval = 'interval' key_label = 'label' k_sensor_keys = [key_lightvar, key_counts, key_energies] k_conversion_factor = (1.0 / 60.0) # to minutes from seconds k_interval = 15.0 # minutes k_segment_split_duaration = 60.0 * 3.0 #minutes k_max_segment_length_in_intervals = 14*60/k_interval #intervals k_min_segment_length_in_intervals = 5*60/k_interval #intervals k_num_zeros_to_prepend = 6 k_num_zeros_to_append = 6 #hour 0-23, mode 0 - not sleepy times #mode 1 - possibly sleepy times #mode 2 - definitely sleep times k_hour_mode_lookup = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] def filter_bad_values(events): events2 = [item for item in events if item[key_value] > 0] return events2 ''' take list of events, find gaps of time in events, and use those gaps to split the events into segments of events where there is activity ''' def segment(dict_of_lists): segments = [] for key in dict_of_lists: pilllists = dict_of_lists[key]['pill'] senselist = dict_of_lists[key]['sense'] timelist = pilllists[0] valuelist = pilllists[1] sensetimes = senselist[0] temperatures = senselist[1] humidities = senselist[2] lights = senselist[3] t1_list = [] t2_list = [] if len(timelist) == 0: continue seg_t1 = timelist[0] t1 = seg_t1 last_t2 = t1 is_one_segment_found = False for t in timelist: t2 = t dt = float(t2-t1)*k_conversion_factor if dt > k_segment_split_duaration: seg_t2 = last_t1 t1_list.append(seg_t1) t2_list.append(seg_t2) seg_t1 = t2 is_one_segment_found = True last_t1 = t1 t1 = t2 if not is_one_segment_found: seg_t2 = last_t1 dt = seg_t2 - seg_t1 t1_list.append(seg_t1) t2_list.append(seg_t2) seg_t1 = t2 for i in range(len(t1_list)): segment_dict = {} t1 = t1_list[i] t2 = t2_list[i] i1 = bisect(timelist, t1) i2 = bisect(timelist, t2) + 1 segment_dict['pill'] = (timelist[i1:i2], valuelist[i1:i2]) j1 = bisect(sensetimes, t1) j2 = bisect(sensetimes, t2) + 1 segment_dict['sense'] = (sensetimes[j1:j2],temperatures[j1:j2],humidities[j1:j2],lights[j1:j2]) segment_dict[key_id] = key segments.append(segment_dict) return segments ''' build series of observation data at fixed intervals for training HMMs ''' def compute_log_variance(x, logbase = 2.0, offset=1.0): return numpy.log(numpy.var(x) + offset) / numpy.log(logbase) def compute_log_range(x, logbase = 2.0, maxval=10.): imin = numpy.argmin(x) imax = numpy.argmax(x) min = x[imin] max = x[imax] #if the max happened later than the min, then this was an increase #we only are looking at lights out if imax > imin: range = 0 else: range = max - min fracchange = range / (min + 20) fracchange = fracchange - 0.25 if fracchange < 0: fracchange = 0 val = numpy.ceil(numpy.log(fracchange + 1) / numpy.log(logbase)) if val > maxval: val = maxval return val def summarize(segments, interval_in_minutes): if segments is None or len(segments) == 0: return None summary = [] for segment in segments: times = segment['pill'][0] values = segment['pill'][1] id = segment[key_id] sensetimes = segment['sense'][0] humidities = segment['sense'][1] temperatures = segment['sense'][2] lights = segment['sense'][3] if times is None or len(times) == 0: continue t0 = times[0] tf = times[-1] #get time in minutes from first times = [(t - t0) * k_conversion_factor for t in times ] sensetimes = [(t - t0) * k_conversion_factor for t in sensetimes] #get index of each time point indices = [int(t / interval_in_minutes) for t in times] indices2 = [int(t / interval_in_minutes) for t in sensetimes] if len(indices2) == 0 or len(indices) == 0: continue if indices2[-1] > indices[-1]: maxidx = indices2[-1] else: maxidx = indices[-1] mycounts = [] myenergies = [] mylight = [] mytimeofday = [] #create counts and energies arrays for i in xrange(maxidx+1): mycounts.append(0) myenergies.append(0) mylight.append(0) mytimeofday.append(0) #SUMMARIZE PILL DATA for i in xrange(len(indices)): idx = indices[i] mycounts[idx] = mycounts[idx] + 1 myenergies[idx] = myenergies[idx] + values[i] for i in range(len(myenergies)): #transform energy output to to a quantized log value logval = int(numpy.ceil(numpy.log10(myenergies[i] + 1.0) )) myenergies[i] = logval for i in range(len(mycounts)): logval = int(numpy.ceil(numpy.log(mycounts[i] + 1.0)/numpy.log(2.0) )) mycounts[i] = logval for i in range(len(mytimeofday)): tt = t0 + interval_in_minutes*i*60 mytimeofday[i] = k_hour_mode_lookup[datetime.datetime.fromtimestamp(tt).hour] #SUMMARIZE SENSE DATA for idx in xrange(maxidx+1): indices = [i for i in xrange(len(indices2)) if indices2[i] == idx] lightvals = numpy.array(map(lights.__getitem__, indices)) if len(lightvals) == 0: lightvals = numpy.array([0]) y = int(compute_log_range(lightvals, 3, 1.)) mylight[idx] = y
''' remove segments that are too long or too short those that are in the acceptable range, pad with zeros to fill out the max length ''' def enforce_summary_limits(summary, min_length, max_length): summary2 = [] if summary is None: print 'got a nonexistant summary. wat?' return None for item in summary: counts = item[key_counts] #reject if len(counts) < min_length: #print "rejecting %d length item, which was less than %d counts" % (len(counts), min_length) continue #reject if len(counts) > max_length: #print "rejecting %d length item, which was greater than %d counts" % (len(counts), max_length) continue summary2.append(deepcopy(item)) return summary2 def prepend_zeros(summary, numzeros, numzeros2): for item in summary: for key in item: if key in k_sensor_keys: thisvector = item[key] item[key] = numpy.concatenate((numpy.zeros((numzeros, )), thisvector, numpy.zeros((numzeros2, )))) def vectorize_measurements(summary): meas = [] info = [] for item in summary: e = item[key_energies] c = item[key_counts] l = item[key_lightvar] id = item[key_id] interval = item[key_interval] label = None if item.has_key(key_label): label = item[key_label] if len(e) != len(c): print ("somehow, energies and counts are not the same length.") continue arr = numpy.array([e, c, l]) meas.append(arr) info.append((id,interval,label)) return meas, info def get_labels(summary, dict_of_lists): for id in dict_of_lists: if not dict_of_lists[id].has_key(key_survey): continue survey = dict_of_lists[id][key_survey] #assume it's all sorted matching_summaries = [s for s in summary if s[key_id] == id] if len(matching_summaries) == 0: continue label_idx = 0 summary_idx = 0 N = len(survey[0]) while(summary_idx < len(matching_summaries) and label_idx < N): s = matching_summaries[summary_idx] t0_1 = s[key_interval][0] tf_1 = s[key_interval][1] t0_2 = survey[0][label_idx] tf_2 = survey[2][label_idx] oof = t0_1 #print t0_1 - oof, (tf_1 -oof)/3600.0, (t0_2 - oof) / 3600.0, (tf_2 -oof)/3600.0 #end of segment 1 less than beginning of segment 2? move up segment 1 if tf_1 < t0_2: #print 'continue 1' summary_idx = summary_idx + 1 continue #beginning of segment 1 greater than end of segment2? move up segment 2 if t0_1 > tf_2: #print 'idx++' label_idx = label_idx + 1 continue #neither end of seg1 < begin seg2 nor # end of seg2 < begin of seg1 # overlap! if t0_1 > t0_2: tt1 = t0_1 else: tt1 = t0_2 if tf_1 < tf_2: tt2 = tf_1 else: tt2 = tf_2 dt = tt2 - tt1 print 'dt overlap (hrs):', dt / 3600.0 s[key_label] = [dt, survey[0][label_idx], survey[1][label_idx], survey[2][label_idx]] summary_idx += 1 #label_idx += 1 #for d in #if dict_of_lists.has_key('') foo = 3 def process(dict_of_lists): segments = segment(dict_of_lists) summary = summarize(segments,k_interval) get_labels(summary, dict_of_lists) summary2 = enforce_summary_limits(summary, k_min_segment_length_in_intervals, k_max_segment_length_in_intervals) prepend_zeros(summary2, k_num_zeros_to_prepend, k_num_zeros_to_append) meas, info = vectorize_measurements(summary2) return meas if __name__ == '__main__': f = open(sys.argv[1]) data = json.load(f) f.close() meas = process(data) f = open(sys.argv[1] + '.meas', 'w') json.dump(summary2, f) f.close()
summary.append({key_counts : mycounts, key_energies : myenergies, key_lightvar : mylight, key_id : id, key_interval : (t0, tf)}) return summary
random_line_split
segment_all_data.py
#!/usr/bin/python from bisect import * from copy import * import sys import json import numpy from pylab import * key_time = 'timestamp' key_value = 'value' key_counts = 'counts' key_energies = 'energies' key_lightvar = 'lightvariance' key_id = 'id' key_survey = 'survey' key_interval = 'interval' key_label = 'label' k_sensor_keys = [key_lightvar, key_counts, key_energies] k_conversion_factor = (1.0 / 60.0) # to minutes from seconds k_interval = 15.0 # minutes k_segment_split_duaration = 60.0 * 3.0 #minutes k_max_segment_length_in_intervals = 14*60/k_interval #intervals k_min_segment_length_in_intervals = 5*60/k_interval #intervals k_num_zeros_to_prepend = 6 k_num_zeros_to_append = 6 #hour 0-23, mode 0 - not sleepy times #mode 1 - possibly sleepy times #mode 2 - definitely sleep times k_hour_mode_lookup = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] def filter_bad_values(events): events2 = [item for item in events if item[key_value] > 0] return events2 ''' take list of events, find gaps of time in events, and use those gaps to split the events into segments of events where there is activity ''' def segment(dict_of_lists): segments = [] for key in dict_of_lists: pilllists = dict_of_lists[key]['pill'] senselist = dict_of_lists[key]['sense'] timelist = pilllists[0] valuelist = pilllists[1] sensetimes = senselist[0] temperatures = senselist[1] humidities = senselist[2] lights = senselist[3] t1_list = [] t2_list = [] if len(timelist) == 0: continue seg_t1 = timelist[0] t1 = seg_t1 last_t2 = t1 is_one_segment_found = False for t in timelist: t2 = t dt = float(t2-t1)*k_conversion_factor if dt > k_segment_split_duaration: seg_t2 = last_t1 t1_list.append(seg_t1) t2_list.append(seg_t2) seg_t1 = t2 is_one_segment_found = True last_t1 = t1 t1 = t2 if not is_one_segment_found: seg_t2 = last_t1 dt = seg_t2 - seg_t1 t1_list.append(seg_t1) t2_list.append(seg_t2) seg_t1 = t2 for i in range(len(t1_list)): segment_dict = {} t1 = t1_list[i] t2 = t2_list[i] i1 = bisect(timelist, t1) i2 = bisect(timelist, t2) + 1 segment_dict['pill'] = (timelist[i1:i2], valuelist[i1:i2]) j1 = bisect(sensetimes, t1) j2 = bisect(sensetimes, t2) + 1 segment_dict['sense'] = (sensetimes[j1:j2],temperatures[j1:j2],humidities[j1:j2],lights[j1:j2]) segment_dict[key_id] = key segments.append(segment_dict) return segments ''' build series of observation data at fixed intervals for training HMMs ''' def compute_log_variance(x, logbase = 2.0, offset=1.0): return numpy.log(numpy.var(x) + offset) / numpy.log(logbase) def compute_log_range(x, logbase = 2.0, maxval=10.): imin = numpy.argmin(x) imax = numpy.argmax(x) min = x[imin] max = x[imax] #if the max happened later than the min, then this was an increase #we only are looking at lights out if imax > imin: range = 0 else: range = max - min fracchange = range / (min + 20) fracchange = fracchange - 0.25 if fracchange < 0: fracchange = 0 val = numpy.ceil(numpy.log(fracchange + 1) / numpy.log(logbase)) if val > maxval: val = maxval return val def summarize(segments, interval_in_minutes): if segments is None or len(segments) == 0: return None summary = [] for segment in segments: times = segment['pill'][0] values = segment['pill'][1] id = segment[key_id] sensetimes = segment['sense'][0] humidities = segment['sense'][1] temperatures = segment['sense'][2] lights = segment['sense'][3] if times is None or len(times) == 0: continue t0 = times[0] tf = times[-1] #get time in minutes from first times = [(t - t0) * k_conversion_factor for t in times ] sensetimes = [(t - t0) * k_conversion_factor for t in sensetimes] #get index of each time point indices = [int(t / interval_in_minutes) for t in times] indices2 = [int(t / interval_in_minutes) for t in sensetimes] if len(indices2) == 0 or len(indices) == 0: continue if indices2[-1] > indices[-1]: maxidx = indices2[-1] else: maxidx = indices[-1] mycounts = [] myenergies = [] mylight = [] mytimeofday = [] #create counts and energies arrays for i in xrange(maxidx+1): mycounts.append(0) myenergies.append(0) mylight.append(0) mytimeofday.append(0) #SUMMARIZE PILL DATA for i in xrange(len(indices)): idx = indices[i] mycounts[idx] = mycounts[idx] + 1 myenergies[idx] = myenergies[idx] + values[i] for i in range(len(myenergies)): #transform energy output to to a quantized log value logval = int(numpy.ceil(numpy.log10(myenergies[i] + 1.0) )) myenergies[i] = logval for i in range(len(mycounts)): logval = int(numpy.ceil(numpy.log(mycounts[i] + 1.0)/numpy.log(2.0) )) mycounts[i] = logval for i in range(len(mytimeofday)): tt = t0 + interval_in_minutes*i*60 mytimeofday[i] = k_hour_mode_lookup[datetime.datetime.fromtimestamp(tt).hour] #SUMMARIZE SENSE DATA for idx in xrange(maxidx+1): indices = [i for i in xrange(len(indices2)) if indices2[i] == idx] lightvals = numpy.array(map(lights.__getitem__, indices)) if len(lightvals) == 0: lightvals = numpy.array([0]) y = int(compute_log_range(lightvals, 3, 1.)) mylight[idx] = y summary.append({key_counts : mycounts, key_energies : myenergies, key_lightvar : mylight, key_id : id, key_interval : (t0, tf)}) return summary ''' remove segments that are too long or too short those that are in the acceptable range, pad with zeros to fill out the max length ''' def enforce_summary_limits(summary, min_length, max_length): summary2 = [] if summary is None: print 'got a nonexistant summary. wat?' return None for item in summary: counts = item[key_counts] #reject if len(counts) < min_length: #print "rejecting %d length item, which was less than %d counts" % (len(counts), min_length) continue #reject if len(counts) > max_length: #print "rejecting %d length item, which was greater than %d counts" % (len(counts), max_length) continue summary2.append(deepcopy(item)) return summary2 def prepend_zeros(summary, numzeros, numzeros2):
def vectorize_measurements(summary): meas = [] info = [] for item in summary: e = item[key_energies] c = item[key_counts] l = item[key_lightvar] id = item[key_id] interval = item[key_interval] label = None if item.has_key(key_label): label = item[key_label] if len(e) != len(c): print ("somehow, energies and counts are not the same length.") continue arr = numpy.array([e, c, l]) meas.append(arr) info.append((id,interval,label)) return meas, info def get_labels(summary, dict_of_lists): for id in dict_of_lists: if not dict_of_lists[id].has_key(key_survey): continue survey = dict_of_lists[id][key_survey] #assume it's all sorted matching_summaries = [s for s in summary if s[key_id] == id] if len(matching_summaries) == 0: continue label_idx = 0 summary_idx = 0 N = len(survey[0]) while(summary_idx < len(matching_summaries) and label_idx < N): s = matching_summaries[summary_idx] t0_1 = s[key_interval][0] tf_1 = s[key_interval][1] t0_2 = survey[0][label_idx] tf_2 = survey[2][label_idx] oof = t0_1 #print t0_1 - oof, (tf_1 -oof)/3600.0, (t0_2 - oof) / 3600.0, (tf_2 -oof)/3600.0 #end of segment 1 less than beginning of segment 2? move up segment 1 if tf_1 < t0_2: #print 'continue 1' summary_idx = summary_idx + 1 continue #beginning of segment 1 greater than end of segment2? move up segment 2 if t0_1 > tf_2: #print 'idx++' label_idx = label_idx + 1 continue #neither end of seg1 < begin seg2 nor # end of seg2 < begin of seg1 # overlap! if t0_1 > t0_2: tt1 = t0_1 else: tt1 = t0_2 if tf_1 < tf_2: tt2 = tf_1 else: tt2 = tf_2 dt = tt2 - tt1 print 'dt overlap (hrs):', dt / 3600.0 s[key_label] = [dt, survey[0][label_idx], survey[1][label_idx], survey[2][label_idx]] summary_idx += 1 #label_idx += 1 #for d in #if dict_of_lists.has_key('') foo = 3 def process(dict_of_lists): segments = segment(dict_of_lists) summary = summarize(segments,k_interval) get_labels(summary, dict_of_lists) summary2 = enforce_summary_limits(summary, k_min_segment_length_in_intervals, k_max_segment_length_in_intervals) prepend_zeros(summary2, k_num_zeros_to_prepend, k_num_zeros_to_append) meas, info = vectorize_measurements(summary2) return meas if __name__ == '__main__': f = open(sys.argv[1]) data = json.load(f) f.close() meas = process(data) f = open(sys.argv[1] + '.meas', 'w') json.dump(summary2, f) f.close()
for item in summary: for key in item: if key in k_sensor_keys: thisvector = item[key] item[key] = numpy.concatenate((numpy.zeros((numzeros, )), thisvector, numpy.zeros((numzeros2, ))))
identifier_body
segment_all_data.py
#!/usr/bin/python from bisect import * from copy import * import sys import json import numpy from pylab import * key_time = 'timestamp' key_value = 'value' key_counts = 'counts' key_energies = 'energies' key_lightvar = 'lightvariance' key_id = 'id' key_survey = 'survey' key_interval = 'interval' key_label = 'label' k_sensor_keys = [key_lightvar, key_counts, key_energies] k_conversion_factor = (1.0 / 60.0) # to minutes from seconds k_interval = 15.0 # minutes k_segment_split_duaration = 60.0 * 3.0 #minutes k_max_segment_length_in_intervals = 14*60/k_interval #intervals k_min_segment_length_in_intervals = 5*60/k_interval #intervals k_num_zeros_to_prepend = 6 k_num_zeros_to_append = 6 #hour 0-23, mode 0 - not sleepy times #mode 1 - possibly sleepy times #mode 2 - definitely sleep times k_hour_mode_lookup = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] def filter_bad_values(events): events2 = [item for item in events if item[key_value] > 0] return events2 ''' take list of events, find gaps of time in events, and use those gaps to split the events into segments of events where there is activity ''' def segment(dict_of_lists): segments = [] for key in dict_of_lists: pilllists = dict_of_lists[key]['pill'] senselist = dict_of_lists[key]['sense'] timelist = pilllists[0] valuelist = pilllists[1] sensetimes = senselist[0] temperatures = senselist[1] humidities = senselist[2] lights = senselist[3] t1_list = [] t2_list = [] if len(timelist) == 0: continue seg_t1 = timelist[0] t1 = seg_t1 last_t2 = t1 is_one_segment_found = False for t in timelist: t2 = t dt = float(t2-t1)*k_conversion_factor if dt > k_segment_split_duaration: seg_t2 = last_t1 t1_list.append(seg_t1) t2_list.append(seg_t2) seg_t1 = t2 is_one_segment_found = True last_t1 = t1 t1 = t2 if not is_one_segment_found: seg_t2 = last_t1 dt = seg_t2 - seg_t1 t1_list.append(seg_t1) t2_list.append(seg_t2) seg_t1 = t2 for i in range(len(t1_list)): segment_dict = {} t1 = t1_list[i] t2 = t2_list[i] i1 = bisect(timelist, t1) i2 = bisect(timelist, t2) + 1 segment_dict['pill'] = (timelist[i1:i2], valuelist[i1:i2]) j1 = bisect(sensetimes, t1) j2 = bisect(sensetimes, t2) + 1 segment_dict['sense'] = (sensetimes[j1:j2],temperatures[j1:j2],humidities[j1:j2],lights[j1:j2]) segment_dict[key_id] = key segments.append(segment_dict) return segments ''' build series of observation data at fixed intervals for training HMMs ''' def
(x, logbase = 2.0, offset=1.0): return numpy.log(numpy.var(x) + offset) / numpy.log(logbase) def compute_log_range(x, logbase = 2.0, maxval=10.): imin = numpy.argmin(x) imax = numpy.argmax(x) min = x[imin] max = x[imax] #if the max happened later than the min, then this was an increase #we only are looking at lights out if imax > imin: range = 0 else: range = max - min fracchange = range / (min + 20) fracchange = fracchange - 0.25 if fracchange < 0: fracchange = 0 val = numpy.ceil(numpy.log(fracchange + 1) / numpy.log(logbase)) if val > maxval: val = maxval return val def summarize(segments, interval_in_minutes): if segments is None or len(segments) == 0: return None summary = [] for segment in segments: times = segment['pill'][0] values = segment['pill'][1] id = segment[key_id] sensetimes = segment['sense'][0] humidities = segment['sense'][1] temperatures = segment['sense'][2] lights = segment['sense'][3] if times is None or len(times) == 0: continue t0 = times[0] tf = times[-1] #get time in minutes from first times = [(t - t0) * k_conversion_factor for t in times ] sensetimes = [(t - t0) * k_conversion_factor for t in sensetimes] #get index of each time point indices = [int(t / interval_in_minutes) for t in times] indices2 = [int(t / interval_in_minutes) for t in sensetimes] if len(indices2) == 0 or len(indices) == 0: continue if indices2[-1] > indices[-1]: maxidx = indices2[-1] else: maxidx = indices[-1] mycounts = [] myenergies = [] mylight = [] mytimeofday = [] #create counts and energies arrays for i in xrange(maxidx+1): mycounts.append(0) myenergies.append(0) mylight.append(0) mytimeofday.append(0) #SUMMARIZE PILL DATA for i in xrange(len(indices)): idx = indices[i] mycounts[idx] = mycounts[idx] + 1 myenergies[idx] = myenergies[idx] + values[i] for i in range(len(myenergies)): #transform energy output to to a quantized log value logval = int(numpy.ceil(numpy.log10(myenergies[i] + 1.0) )) myenergies[i] = logval for i in range(len(mycounts)): logval = int(numpy.ceil(numpy.log(mycounts[i] + 1.0)/numpy.log(2.0) )) mycounts[i] = logval for i in range(len(mytimeofday)): tt = t0 + interval_in_minutes*i*60 mytimeofday[i] = k_hour_mode_lookup[datetime.datetime.fromtimestamp(tt).hour] #SUMMARIZE SENSE DATA for idx in xrange(maxidx+1): indices = [i for i in xrange(len(indices2)) if indices2[i] == idx] lightvals = numpy.array(map(lights.__getitem__, indices)) if len(lightvals) == 0: lightvals = numpy.array([0]) y = int(compute_log_range(lightvals, 3, 1.)) mylight[idx] = y summary.append({key_counts : mycounts, key_energies : myenergies, key_lightvar : mylight, key_id : id, key_interval : (t0, tf)}) return summary ''' remove segments that are too long or too short those that are in the acceptable range, pad with zeros to fill out the max length ''' def enforce_summary_limits(summary, min_length, max_length): summary2 = [] if summary is None: print 'got a nonexistant summary. wat?' return None for item in summary: counts = item[key_counts] #reject if len(counts) < min_length: #print "rejecting %d length item, which was less than %d counts" % (len(counts), min_length) continue #reject if len(counts) > max_length: #print "rejecting %d length item, which was greater than %d counts" % (len(counts), max_length) continue summary2.append(deepcopy(item)) return summary2 def prepend_zeros(summary, numzeros, numzeros2): for item in summary: for key in item: if key in k_sensor_keys: thisvector = item[key] item[key] = numpy.concatenate((numpy.zeros((numzeros, )), thisvector, numpy.zeros((numzeros2, )))) def vectorize_measurements(summary): meas = [] info = [] for item in summary: e = item[key_energies] c = item[key_counts] l = item[key_lightvar] id = item[key_id] interval = item[key_interval] label = None if item.has_key(key_label): label = item[key_label] if len(e) != len(c): print ("somehow, energies and counts are not the same length.") continue arr = numpy.array([e, c, l]) meas.append(arr) info.append((id,interval,label)) return meas, info def get_labels(summary, dict_of_lists): for id in dict_of_lists: if not dict_of_lists[id].has_key(key_survey): continue survey = dict_of_lists[id][key_survey] #assume it's all sorted matching_summaries = [s for s in summary if s[key_id] == id] if len(matching_summaries) == 0: continue label_idx = 0 summary_idx = 0 N = len(survey[0]) while(summary_idx < len(matching_summaries) and label_idx < N): s = matching_summaries[summary_idx] t0_1 = s[key_interval][0] tf_1 = s[key_interval][1] t0_2 = survey[0][label_idx] tf_2 = survey[2][label_idx] oof = t0_1 #print t0_1 - oof, (tf_1 -oof)/3600.0, (t0_2 - oof) / 3600.0, (tf_2 -oof)/3600.0 #end of segment 1 less than beginning of segment 2? move up segment 1 if tf_1 < t0_2: #print 'continue 1' summary_idx = summary_idx + 1 continue #beginning of segment 1 greater than end of segment2? move up segment 2 if t0_1 > tf_2: #print 'idx++' label_idx = label_idx + 1 continue #neither end of seg1 < begin seg2 nor # end of seg2 < begin of seg1 # overlap! if t0_1 > t0_2: tt1 = t0_1 else: tt1 = t0_2 if tf_1 < tf_2: tt2 = tf_1 else: tt2 = tf_2 dt = tt2 - tt1 print 'dt overlap (hrs):', dt / 3600.0 s[key_label] = [dt, survey[0][label_idx], survey[1][label_idx], survey[2][label_idx]] summary_idx += 1 #label_idx += 1 #for d in #if dict_of_lists.has_key('') foo = 3 def process(dict_of_lists): segments = segment(dict_of_lists) summary = summarize(segments,k_interval) get_labels(summary, dict_of_lists) summary2 = enforce_summary_limits(summary, k_min_segment_length_in_intervals, k_max_segment_length_in_intervals) prepend_zeros(summary2, k_num_zeros_to_prepend, k_num_zeros_to_append) meas, info = vectorize_measurements(summary2) return meas if __name__ == '__main__': f = open(sys.argv[1]) data = json.load(f) f.close() meas = process(data) f = open(sys.argv[1] + '.meas', 'w') json.dump(summary2, f) f.close()
compute_log_variance
identifier_name
segment_all_data.py
#!/usr/bin/python from bisect import * from copy import * import sys import json import numpy from pylab import * key_time = 'timestamp' key_value = 'value' key_counts = 'counts' key_energies = 'energies' key_lightvar = 'lightvariance' key_id = 'id' key_survey = 'survey' key_interval = 'interval' key_label = 'label' k_sensor_keys = [key_lightvar, key_counts, key_energies] k_conversion_factor = (1.0 / 60.0) # to minutes from seconds k_interval = 15.0 # minutes k_segment_split_duaration = 60.0 * 3.0 #minutes k_max_segment_length_in_intervals = 14*60/k_interval #intervals k_min_segment_length_in_intervals = 5*60/k_interval #intervals k_num_zeros_to_prepend = 6 k_num_zeros_to_append = 6 #hour 0-23, mode 0 - not sleepy times #mode 1 - possibly sleepy times #mode 2 - definitely sleep times k_hour_mode_lookup = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] def filter_bad_values(events): events2 = [item for item in events if item[key_value] > 0] return events2 ''' take list of events, find gaps of time in events, and use those gaps to split the events into segments of events where there is activity ''' def segment(dict_of_lists): segments = [] for key in dict_of_lists:
return segments ''' build series of observation data at fixed intervals for training HMMs ''' def compute_log_variance(x, logbase = 2.0, offset=1.0): return numpy.log(numpy.var(x) + offset) / numpy.log(logbase) def compute_log_range(x, logbase = 2.0, maxval=10.): imin = numpy.argmin(x) imax = numpy.argmax(x) min = x[imin] max = x[imax] #if the max happened later than the min, then this was an increase #we only are looking at lights out if imax > imin: range = 0 else: range = max - min fracchange = range / (min + 20) fracchange = fracchange - 0.25 if fracchange < 0: fracchange = 0 val = numpy.ceil(numpy.log(fracchange + 1) / numpy.log(logbase)) if val > maxval: val = maxval return val def summarize(segments, interval_in_minutes): if segments is None or len(segments) == 0: return None summary = [] for segment in segments: times = segment['pill'][0] values = segment['pill'][1] id = segment[key_id] sensetimes = segment['sense'][0] humidities = segment['sense'][1] temperatures = segment['sense'][2] lights = segment['sense'][3] if times is None or len(times) == 0: continue t0 = times[0] tf = times[-1] #get time in minutes from first times = [(t - t0) * k_conversion_factor for t in times ] sensetimes = [(t - t0) * k_conversion_factor for t in sensetimes] #get index of each time point indices = [int(t / interval_in_minutes) for t in times] indices2 = [int(t / interval_in_minutes) for t in sensetimes] if len(indices2) == 0 or len(indices) == 0: continue if indices2[-1] > indices[-1]: maxidx = indices2[-1] else: maxidx = indices[-1] mycounts = [] myenergies = [] mylight = [] mytimeofday = [] #create counts and energies arrays for i in xrange(maxidx+1): mycounts.append(0) myenergies.append(0) mylight.append(0) mytimeofday.append(0) #SUMMARIZE PILL DATA for i in xrange(len(indices)): idx = indices[i] mycounts[idx] = mycounts[idx] + 1 myenergies[idx] = myenergies[idx] + values[i] for i in range(len(myenergies)): #transform energy output to to a quantized log value logval = int(numpy.ceil(numpy.log10(myenergies[i] + 1.0) )) myenergies[i] = logval for i in range(len(mycounts)): logval = int(numpy.ceil(numpy.log(mycounts[i] + 1.0)/numpy.log(2.0) )) mycounts[i] = logval for i in range(len(mytimeofday)): tt = t0 + interval_in_minutes*i*60 mytimeofday[i] = k_hour_mode_lookup[datetime.datetime.fromtimestamp(tt).hour] #SUMMARIZE SENSE DATA for idx in xrange(maxidx+1): indices = [i for i in xrange(len(indices2)) if indices2[i] == idx] lightvals = numpy.array(map(lights.__getitem__, indices)) if len(lightvals) == 0: lightvals = numpy.array([0]) y = int(compute_log_range(lightvals, 3, 1.)) mylight[idx] = y summary.append({key_counts : mycounts, key_energies : myenergies, key_lightvar : mylight, key_id : id, key_interval : (t0, tf)}) return summary ''' remove segments that are too long or too short those that are in the acceptable range, pad with zeros to fill out the max length ''' def enforce_summary_limits(summary, min_length, max_length): summary2 = [] if summary is None: print 'got a nonexistant summary. wat?' return None for item in summary: counts = item[key_counts] #reject if len(counts) < min_length: #print "rejecting %d length item, which was less than %d counts" % (len(counts), min_length) continue #reject if len(counts) > max_length: #print "rejecting %d length item, which was greater than %d counts" % (len(counts), max_length) continue summary2.append(deepcopy(item)) return summary2 def prepend_zeros(summary, numzeros, numzeros2): for item in summary: for key in item: if key in k_sensor_keys: thisvector = item[key] item[key] = numpy.concatenate((numpy.zeros((numzeros, )), thisvector, numpy.zeros((numzeros2, )))) def vectorize_measurements(summary): meas = [] info = [] for item in summary: e = item[key_energies] c = item[key_counts] l = item[key_lightvar] id = item[key_id] interval = item[key_interval] label = None if item.has_key(key_label): label = item[key_label] if len(e) != len(c): print ("somehow, energies and counts are not the same length.") continue arr = numpy.array([e, c, l]) meas.append(arr) info.append((id,interval,label)) return meas, info def get_labels(summary, dict_of_lists): for id in dict_of_lists: if not dict_of_lists[id].has_key(key_survey): continue survey = dict_of_lists[id][key_survey] #assume it's all sorted matching_summaries = [s for s in summary if s[key_id] == id] if len(matching_summaries) == 0: continue label_idx = 0 summary_idx = 0 N = len(survey[0]) while(summary_idx < len(matching_summaries) and label_idx < N): s = matching_summaries[summary_idx] t0_1 = s[key_interval][0] tf_1 = s[key_interval][1] t0_2 = survey[0][label_idx] tf_2 = survey[2][label_idx] oof = t0_1 #print t0_1 - oof, (tf_1 -oof)/3600.0, (t0_2 - oof) / 3600.0, (tf_2 -oof)/3600.0 #end of segment 1 less than beginning of segment 2? move up segment 1 if tf_1 < t0_2: #print 'continue 1' summary_idx = summary_idx + 1 continue #beginning of segment 1 greater than end of segment2? move up segment 2 if t0_1 > tf_2: #print 'idx++' label_idx = label_idx + 1 continue #neither end of seg1 < begin seg2 nor # end of seg2 < begin of seg1 # overlap! if t0_1 > t0_2: tt1 = t0_1 else: tt1 = t0_2 if tf_1 < tf_2: tt2 = tf_1 else: tt2 = tf_2 dt = tt2 - tt1 print 'dt overlap (hrs):', dt / 3600.0 s[key_label] = [dt, survey[0][label_idx], survey[1][label_idx], survey[2][label_idx]] summary_idx += 1 #label_idx += 1 #for d in #if dict_of_lists.has_key('') foo = 3 def process(dict_of_lists): segments = segment(dict_of_lists) summary = summarize(segments,k_interval) get_labels(summary, dict_of_lists) summary2 = enforce_summary_limits(summary, k_min_segment_length_in_intervals, k_max_segment_length_in_intervals) prepend_zeros(summary2, k_num_zeros_to_prepend, k_num_zeros_to_append) meas, info = vectorize_measurements(summary2) return meas if __name__ == '__main__': f = open(sys.argv[1]) data = json.load(f) f.close() meas = process(data) f = open(sys.argv[1] + '.meas', 'w') json.dump(summary2, f) f.close()
pilllists = dict_of_lists[key]['pill'] senselist = dict_of_lists[key]['sense'] timelist = pilllists[0] valuelist = pilllists[1] sensetimes = senselist[0] temperatures = senselist[1] humidities = senselist[2] lights = senselist[3] t1_list = [] t2_list = [] if len(timelist) == 0: continue seg_t1 = timelist[0] t1 = seg_t1 last_t2 = t1 is_one_segment_found = False for t in timelist: t2 = t dt = float(t2-t1)*k_conversion_factor if dt > k_segment_split_duaration: seg_t2 = last_t1 t1_list.append(seg_t1) t2_list.append(seg_t2) seg_t1 = t2 is_one_segment_found = True last_t1 = t1 t1 = t2 if not is_one_segment_found: seg_t2 = last_t1 dt = seg_t2 - seg_t1 t1_list.append(seg_t1) t2_list.append(seg_t2) seg_t1 = t2 for i in range(len(t1_list)): segment_dict = {} t1 = t1_list[i] t2 = t2_list[i] i1 = bisect(timelist, t1) i2 = bisect(timelist, t2) + 1 segment_dict['pill'] = (timelist[i1:i2], valuelist[i1:i2]) j1 = bisect(sensetimes, t1) j2 = bisect(sensetimes, t2) + 1 segment_dict['sense'] = (sensetimes[j1:j2],temperatures[j1:j2],humidities[j1:j2],lights[j1:j2]) segment_dict[key_id] = key segments.append(segment_dict)
conditional_block
listparser.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup import codecs from time import sleep import re levels = {u'федеральный': 'federal', u'региональный': 'subject', u'административный центр': 'local', u'местное самоуправление': 'local'} PATTERNS_LINKS_TO_RESULT_DATA = [u'Сводная таблица результатов выборов по единому многомандатному округу', u'Сводная таблица итогов голосования по пропорциональной ', u'Сводная таблица результатов выборов по единому', u'Сводная таблица предварительных итогов голосования', u'Результаты референдума', u'Сводная таблица итогов голосования по федеральному округу', # u'Сводная таблица итогов голосования по мажоритарной системе выборов(Протокол №1)', u'Сводная таблица результатов выборов', u'Сводная таблица итогов голосования', u'Сводный отчет об итогах голосования', u'Сводная таблица о результатах выборов'] PATTERNS_TO_EXCLUDE = [u'одномандатн', u'мажоритарн'] nrec = 0 no_results_href = 0 class LoadRetryWithDifferentFormat(Exception): pass class LoadFailedDoNotRetry(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) class LoadFailedDifferentCandidates(Exception): pass class LoadErrorNoDataMarker(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) class LoadFailedEmptyCells(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) def check_len(rr_cc, target_len, error_message): if len(rr_cc) != target_len: print error_message raise LoadRetryWithDifferentFormat def check_lens(rr_cc, target_lens, error_message): if len(rr_cc) not in target_lens: print error_message raise LoadRetryWithDifferentFormat def check_text (r_c, target_texts, error_message): if not any_text_is_there(target_texts, r_c.get_text()): print error_message print r_c.get_text().strip() print repr(target_texts).decode('unicode-escape') raise LoadRetryWithDifferentFormat def check_not_empty(s, error_message): if s == '': print error_message raise LoadRetryWithDifferentFormat def make_link (href, title): return "<a href=\'" + href + "\'>" + title + "</a>" n_pages_got = 0 n_pages_exceptions = 0 n_pages_retried = 0 n_pages_got_after_retries = 0 def print_parser_stats(): print 'n_pages_got', n_pages_got print 'n_pages_exceptions', n_pages_exceptions print 'n_pages_retried', n_pages_retried print 'n_pages_got_after_retries', n_pages_got_after_retries return def get_safe(link): global n_pages_got global n_pages_exceptions global n_pages_retried global n_pages_got_after_retries headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0' } n_attempts = 0 done = False max_attempts = 1000 while n_attempts < max_attempts and not done: try: bad_page = True n_retries = 0 max_retries = 7 while n_retries < max_retries and bad_page: bad_page = False sleep(1) response = requests.get(link, headers, timeout=5) page = BeautifulSoup(response.text,'html.parser') for table in page.find_all('table'): if not bad_page: for r in table.find_all('tr'): if not bad_page: for c in r.find_all('td'): if not bad_page and c.get_text().strip() == u'Нет данных для построения отчета.': bad_page = True if n_retries == 0: n_pages_retried += 1 if bad_page: n_retries += 1 if n_retries != 0 and not bad_page: n_pages_got_after_retries += 1 done = True n_pages_got += 1 except: print 'requests.get failed, attempt ' + str(n_attempts) sleep(3) n_pages_exceptions += 1 pass n_attempts += 1 if n_attempts == max_attempts: print 'ERRORERROR: did not manage to get the data from url ' + link[0] exit(1) return response def any_text_is_there(patterns, text): t = text.lower().replace(' ', '') for ptrn in patterns: p = ptrn.lower() p = p.replace(' ', '') if re.search(p, t) is not None: return True return False rs = 0 class HTMLListParser: def __init__(self): return def get_level(self, table): ss = table.find_all('select') level = None for s in ss: if s.attrs['name'] == 'urovproved': options = s.find_all('option') for opt in options: if 'selected' in opt.attrs: t = opt.get_text().lower() if t in levels: if level is None: level = levels[t] else: print 'ERRORERROR: Cannot work with several elections levels simultaneously, please select one' exit(1) return level def parse_elections_list_file(self, file): f = codecs.open(file, encoding='windows-1251') d = f.read() soup = BeautifulSoup(d, 'html.parser') f.close() take_next = False for table in soup.find_all('table'): if re.search(u'Уровень выборов', table.get_text()) is not None: if len(table.find_all('table')) == 0: level = self.
level is None or level == '': print('ERRORERROR: No level for elections list') exit(1) elections_list['level'] = level return elections_list # find the innermost table with this text if re.search(u'Всего найдено записей:', table.get_text()) is not None: if len(table.find_all('table')) == 0: take_next = True return None def get_results_href(self, href): response = get_safe(href) soup = BeautifulSoup(response.text, 'html.parser') global rs rs += 1 if rs == 24: pass results_hrefs = [] for table in soup.find_all('table'): if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, table.get_text()): # Look for innermost table with ^^ header if len(table.find_all('table')) == 0: rr = table.find_all('tr') for r in rr: cc = r.find_all('td') check_lens(cc, [1,2], 'Must be 1 or 2 columns here, exiting') for c in cc: if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, c.get_text()): links = c.find_all('a', href=True) check_len(links, 1, 'Must be 1 link here') for l in links: results_hrefs.append ({'href':l['href'], 'title':l.text}) # If empty - exception if len(results_hrefs) == 0: print 'Did not find results_href, ERRORERROR' raise LoadFailedDoNotRetry(href) # If one link - return it elif len(results_hrefs) == 1: return results_hrefs[0]['href'] # If there are several protocols (links), try to return one which does not contain patterns to exclude # If did not manage, return the last one (it usually contains links to results we need) else: for r in results_hrefs: if not any_text_is_there(PATTERNS_TO_EXCLUDE, r['title']): return r['href'] return results_hrefs[len(results_hrefs) - 1]['href'] return None def parse_elections_list_row(self, elections_list, rr, n_filtered_out, nr): global nrec global no_results_href # date cc = rr[nrec].find_all('td') check_len(cc, 1, 'Must be 1 column here') dt = cc[0].get_text().strip() nrec += 1 cc = rr[nrec].find_all('td') region = '' while (nrec < nr) and (len(cc) == 2): if cc[0].get_text().strip() != '': region = cc[0].get_text().strip() if region == '': print 'ERRORERROR: Empty region, exiting' exit(1) links = cc[1].find_all('a', href=True) href = links[0]['href'] title = links[0].text.strip() print 'Region: ' + region + ' title: ' + title + ' date: ' + dt + ', row ' + str(nrec) + ' out of ' + str(nr) try: results_href = self.get_results_href(href) rec = {'date': dt, 'generic_href': href, 'results_href': results_href, 'title': title, 'location': region} except LoadFailedDoNotRetry: print 'ERROR: Exception at row number ' + str(nrec) + ' did not get results href, writing NULL' print 'Generic URL: ' + href rec = {'date': dt, 'generic_href': href, 'results_href': 'NULL', 'title': title, 'location': region} no_results_href += 1 except LoadRetryWithDifferentFormat: print 'WARNING: Exception at row number ' + str(nrec) + ' did not get results href, writing NULL' print 'Generic URL: ' + href rec = {'date': dt, 'generic_href': href, 'results_href': 'NULL', 'title': title, 'location': region} no_results_href += 1 elections_list['elections'].append(rec) nrec += 1 if nrec < nr: cc = rr[nrec].find_all('td') return n_filtered_out def parse_elections_list_table(self, table): elections_list = {'elections': []} # First row - date, next row - region, title and link rr = table.find_all('tr') nr = len(rr) global nrec n_filtered_out = 0 nexceptions = 0 nrec = 0 while nrec < nr: try: n_filtered_out = self.parse_elections_list_row(elections_list, rr, n_filtered_out, nr) except LoadFailedDoNotRetry: print 'WARNING: Exception, skipped row number ' + str(nrec) nexceptions += 1 nrec += 1 except LoadRetryWithDifferentFormat: print 'WARNING: Exception, skipped row number ' + str(nrec) nexceptions += 1 nrec += 1 print 'Returning ' + str(len(elections_list['elections'])) + ', filtered out ' + \ str(n_filtered_out) + ', exceptions: ' + str(nexceptions) + ' no_results_href: ' + str(no_results_href) print ' Total (taken + exceptions + filtered out): ' + \ str(len(elections_list['elections']) + n_filtered_out + nexceptions) return elections_list
get_level(table) if take_next: elections_list = self.parse_elections_list_table(table) if
conditional_block
listparser.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup import codecs from time import sleep import re levels = {u'федеральный': 'federal', u'региональный': 'subject', u'административный центр': 'local', u'местное самоуправление': 'local'} PATTERNS_LINKS_TO_RESULT_DATA = [u'Сводная таблица результатов выборов по единому многомандатному округу', u'Сводная таблица итогов голосования по пропорциональной ', u'Сводная таблица результатов выборов по единому', u'Сводная таблица предварительных итогов голосования', u'Результаты референдума', u'Сводная таблица итогов голосования по федеральному округу', # u'Сводная таблица итогов голосования по мажоритарной системе выборов(Протокол №1)', u'Сводная таблица результатов выборов', u'Сводная таблица итогов голосования', u'Сводный отчет об итогах голосования', u'Сводная таблица о результатах выборов'] PATTERNS_TO_EXCLUDE = [u'одномандатн', u'мажоритарн'] nrec = 0 no_results_href = 0 class LoadRetryWithDifferentFormat(Exception): pass class LoadFailedDoNotRetry(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) class LoadFailedDifferentCandidates(Exception): pass class LoadErrorNoDataMarker(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) class LoadFailedEmptyCells(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) def check_len(rr_cc, target_len, error_message): if len(rr_cc) != target_len: print error_message raise LoadRetryWithDifferentFormat def check_lens(rr_cc, target_lens, error_message): if len(rr_cc) not in target_lens: print error_message raise LoadRetryWithDifferentFormat def check_text (r_c, target_texts, error_message): if not any_text_is_there(target_texts, r_c.get_text()): print error_message print r_c.get_text().strip() print repr(target_texts).decode('unicode-escape') raise LoadRetryWithDifferentFormat def check_not_empty(s, error_message): if s == '': print error_message raise LoadRetryWithDifferentFormat def make_link (href, title): return "<a href=\'" + href + "\'>" + title + "</a>" n_pages_got = 0 n_pages_exceptions = 0 n_pages_retried = 0 n_pages_got_after_retries = 0 def print_parser_stats(): print 'n_pages_got', n_pages_got print 'n_pages_exceptions', n_pages_exceptions print 'n_pages_retried', n_pages_retried print 'n_pages_got_after_retries', n_pages_got_after_retries return def get_safe(link): global n_pages_got global n_pages_exceptions global n_pages_retried global n_pages_got_after_retries headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0' } n_attempts = 0 done = False max_attempts = 1000 while n_attempts < max_attempts and not done: try: bad_page = True n_retries = 0 max_retries = 7 while n_retries < max_retries and bad_page: bad_page = False sleep(1) response = requests.get(link, headers, timeout=5) page = BeautifulSoup(response.text,'html.parser') for table in page.find_all('table'): if not bad_page: for r in table.find_all('tr'): if not bad_page: for c in r.find_all('td'): if not bad_page and c.get_text().strip() == u'Нет данных для построения отчета.': bad_page = True if n_retries == 0: n_pages_retried += 1 if bad_page: n_retries += 1 if n_retries != 0 and not bad_page: n_pages_got_after_retries += 1 done = True n_pages_got += 1 except: print 'requests.get failed, attempt ' + str(n_attempts) sleep(3) n_pages_exceptions += 1 pass n_attempts += 1 if n_attempts == max_attempts: print 'ERRORERROR: did not manage to get the data from url ' + link[0] exit(1) return response def any_text_is_there(patterns, text): t = text.lower().replace(' ', '') for ptrn in patterns: p = ptrn.lower() p = p.replace(' ', '') if re.search(p, t) is not None: return True return False rs = 0 class HTMLListParser: def __init__(self): return def get_level(self, table): ss = table.find_all('select') level = None for s in ss: if s.attrs['name'] == 'urovproved': options = s.find_all('option') for opt in options: if 'selected' in opt.attrs: t = opt.get_text().lower() if t in levels: if level is None: level = levels[t] else: print 'ERRORERROR: Cannot work with several elections levels simultaneously, please select one' exit(1) return level def parse_elections_list_file(self, file): f = codecs.open(file, encoding='windows-1251') d = f.read() soup = BeautifulSoup(d, 'html.parser') f.close() take_next = False for table in soup.find_all('table'): if re.search(u'Уровень выборов', table.get_text()) is not None: if len(table.find_all('table')) == 0: level = self.get_level(table) if take_next: elections_list = self.parse_elections_list_table(table) if level is None or level == '': print('ERRORERROR: No level for elections list') exit(1) elections_list['level'] = level return elections_list # find the innermost table with this text if re.search(u'Всего найдено записей:', table.get_text()) is not None: if len(table.find_all('table')) == 0: take_next = True return None def get_results_href(self, href): response = get_safe(href) soup = BeautifulSoup(response.text, 'html.parser') global rs rs += 1 if rs == 24: pass results_hrefs = [] for table in soup.find_all('table'): if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, table.get_text()): # Look for innermost table with ^^ header if len(table.find_all('table')) == 0: rr = table.find_all('tr') for r in rr: cc = r.find_all('td') check_lens(cc, [1,2], 'Must be 1 or 2 columns here, exiting') for c in cc: if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, c.get_text()): links = c.find_all('a', href=True) check_len(links, 1, 'Must be 1 link here') for l in links:
if len(results_hrefs) == 0: print 'Did not find results_href, ERRORERROR' raise LoadFailedDoNotRetry(href) # If one link - return it elif len(results_hrefs) == 1: return results_hrefs[0]['href'] # If there are several protocols (links), try to return one which does not contain patterns to exclude # If did not manage, return the last one (it usually contains links to results we need) else: for r in results_hrefs: if not any_text_is_there(PATTERNS_TO_EXCLUDE, r['title']): return r['href'] return results_hrefs[len(results_hrefs) - 1]['href'] return None def parse_elections_list_row(self, elections_list, rr, n_filtered_out, nr): global nrec global no_results_href # date cc = rr[nrec].find_all('td') check_len(cc, 1, 'Must be 1 column here') dt = cc[0].get_text().strip() nrec += 1 cc = rr[nrec].find_all('td') region = '' while (nrec < nr) and (len(cc) == 2): if cc[0].get_text().strip() != '': region = cc[0].get_text().strip() if region == '': print 'ERRORERROR: Empty region, exiting' exit(1) links = cc[1].find_all('a', href=True) href = links[0]['href'] title = links[0].text.strip() print 'Region: ' + region + ' title: ' + title + ' date: ' + dt + ', row ' + str(nrec) + ' out of ' + str(nr) try: results_href = self.get_results_href(href) rec = {'date': dt, 'generic_href': href, 'results_href': results_href, 'title': title, 'location': region} except LoadFailedDoNotRetry: print 'ERROR: Exception at row number ' + str(nrec) + ' did not get results href, writing NULL' print 'Generic URL: ' + href rec = {'date': dt, 'generic_href': href, 'results_href': 'NULL', 'title': title, 'location': region} no_results_href += 1 except LoadRetryWithDifferentFormat: print 'WARNING: Exception at row number ' + str(nrec) + ' did not get results href, writing NULL' print 'Generic URL: ' + href rec = {'date': dt, 'generic_href': href, 'results_href': 'NULL', 'title': title, 'location': region} no_results_href += 1 elections_list['elections'].append(rec) nrec += 1 if nrec < nr: cc = rr[nrec].find_all('td') return n_filtered_out def parse_elections_list_table(self, table): elections_list = {'elections': []} # First row - date, next row - region, title and link rr = table.find_all('tr') nr = len(rr) global nrec n_filtered_out = 0 nexceptions = 0 nrec = 0 while nrec < nr: try: n_filtered_out = self.parse_elections_list_row(elections_list, rr, n_filtered_out, nr) except LoadFailedDoNotRetry: print 'WARNING: Exception, skipped row number ' + str(nrec) nexceptions += 1 nrec += 1 except LoadRetryWithDifferentFormat: print 'WARNING: Exception, skipped row number ' + str(nrec) nexceptions += 1 nrec += 1 print 'Returning ' + str(len(elections_list['elections'])) + ', filtered out ' + \ str(n_filtered_out) + ', exceptions: ' + str(nexceptions) + ' no_results_href: ' + str(no_results_href) print ' Total (taken + exceptions + filtered out): ' + \ str(len(elections_list['elections']) + n_filtered_out + nexceptions) return elections_list
results_hrefs.append ({'href':l['href'], 'title':l.text}) # If empty - exception
random_line_split
listparser.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup import codecs from time import sleep import re levels = {u'федеральный': 'federal', u'региональный': 'subject', u'административный центр': 'local', u'местное самоуправление': 'local'} PATTERNS_LINKS_TO_RESULT_DATA = [u'Сводная таблица результатов выборов по единому многомандатному округу', u'Сводная таблица итогов голосования по пропорциональной ', u'Сводная таблица результатов выборов по единому', u'Сводная таблица предварительных итогов голосования', u'Результаты референдума', u'Сводная таблица итогов голосования по федеральному округу', # u'Сводная таблица итогов голосования по мажоритарной системе выборов(Протокол №1)', u'Сводная таблица результатов выборов', u'Сводная таблица итогов голосования', u'Сводный отчет об итогах голосования', u'Сводная таблица о результатах выборов'] PATTERNS_TO_EXCLUDE = [u'одномандатн', u'мажоритарн'] nrec = 0 no_results_href = 0 class LoadRetryWithDifferentFormat(Exception): pass class LoadFailedDoNotRetry(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) class LoadFailedDifferentCandidates(Exception): pass class LoadErrorNoDataMarker(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) class LoadFailedEmptyCells(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) def check_len(rr_cc, target_len, error_message): if len(rr_cc) != target_len: print error_message raise LoadRetryWithDifferentFormat def check_lens(rr_cc, target_lens, error_message): if len(rr_cc) not in target_lens: print error_message raise LoadRetryWithDifferentFormat def check_text (r_c, target_texts, error_message): if not any_text_is_there(target_texts, r_c.get_text()): print error_message print r_c.get_text().strip() print repr(target_texts).decode('unicode-escape') raise LoadRetryWithDifferentFormat def check_not_empty(s, error_message): if s == '': print error_message raise LoadRetryWithDifferentFormat def make_link (href, title): return "<a href=\'" + href + "\'>" + title + "</a>" n_pages_got = 0 n_pages_exceptions = 0 n_pages_retried = 0 n_pages_got_after_retries = 0 def print_parser_stats(): print 'n_pages_got', n_pages_got print 'n_pages_exceptions', n_pages_exceptions print 'n_pages_retried', n_pages_retried print 'n_pages_got_after_retries', n_pages_got_after_retries return def get_safe(link): global n_pages_got global n_pages_exceptions global n_pages_retried global n_pages_got_after_retries headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0' } n_attempts = 0 done = False max_attempts = 1000 while n_attempts < max_attempts and not done: try: bad_page = True n_retries = 0 max_retries = 7 while n_retries < max_retries and bad_page: bad_page = False sleep(1) response = requests.get(link, headers, timeout=5) page = BeautifulSoup(response.text,'html.parser') for table in page.find_all('table'): if not bad_page: for r in table.find_all('tr'): if not bad_page: for c in r.find_all('td'): if not bad_page and c.get_text().strip() == u'Нет данных для построения отчета.': bad_page = True if n_retries == 0: n_pages_retried += 1 if bad_page: n_retries += 1 if n_retries != 0 and not bad_page: n_pages_got_after_retries += 1 done = True n_pages_got += 1 except: print 'requests.get failed, attempt ' + str(n_attempts) sleep(3) n_pages_exceptions += 1 pass n_attempts += 1 if n_attempts == max_attempts: print 'ERRORERROR: did not manage to get the data from url ' + link[0] exit(1) return response def any_text_is_there(patterns, text): t = text.lower().replace(' ', '') for ptrn in patterns: p = ptrn.lower() p = p.replace(' ', '') if re.search(p, t) is not None: return True return False rs = 0 class HTMLListParser: def __init__(self): return def get_level(self, table): ss = table.find_all('select') level = None for s in ss: if s.attrs['name'] == 'urovproved': options = s.find_all('option') for opt in options: if 'selected' in opt.attrs: t = opt.get_text().lower() if t in levels: if level is None: level = levels[t] else: print 'ERRORERROR: Cannot work with several elections levels simultaneously,
l == '': print('ERRORERROR: No level for elections list') exit(1) elections_list['level'] = level return elections_list # find the innermost table with this text if re.search(u'Всего найдено записей:', table.get_text()) is not None: if len(table.find_all('table')) == 0: take_next = True return None def get_results_href(self, href): response = get_safe(href) soup = BeautifulSoup(response.text, 'html.parser') global rs rs += 1 if rs == 24: pass results_hrefs = [] for table in soup.find_all('table'): if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, table.get_text()): # Look for innermost table with ^^ header if len(table.find_all('table')) == 0: rr = table.find_all('tr') for r in rr: cc = r.find_all('td') check_lens(cc, [1,2], 'Must be 1 or 2 columns here, exiting') for c in cc: if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, c.get_text()): links = c.find_all('a', href=True) check_len(links, 1, 'Must be 1 link here') for l in links: results_hrefs.append ({'href':l['href'], 'title':l.text}) # If empty - exception if len(results_hrefs) == 0: print 'Did not find results_href, ERRORERROR' raise LoadFailedDoNotRetry(href) # If one link - return it elif len(results_hrefs) == 1: return results_hrefs[0]['href'] # If there are several protocols (links), try to return one which does not contain patterns to exclude # If did not manage, return the last one (it usually contains links to results we need) else: for r in results_hrefs: if not any_text_is_there(PATTERNS_TO_EXCLUDE, r['title']): return r['href'] return results_hrefs[len(results_hrefs) - 1]['href'] return None def parse_elections_list_row(self, elections_list, rr, n_filtered_out, nr): global nrec global no_results_href # date cc = rr[nrec].find_all('td') check_len(cc, 1, 'Must be 1 column here') dt = cc[0].get_text().strip() nrec += 1 cc = rr[nrec].find_all('td') region = '' while (nrec < nr) and (len(cc) == 2): if cc[0].get_text().strip() != '': region = cc[0].get_text().strip() if region == '': print 'ERRORERROR: Empty region, exiting' exit(1) links = cc[1].find_all('a', href=True) href = links[0]['href'] title = links[0].text.strip() print 'Region: ' + region + ' title: ' + title + ' date: ' + dt + ', row ' + str(nrec) + ' out of ' + str(nr) try: results_href = self.get_results_href(href) rec = {'date': dt, 'generic_href': href, 'results_href': results_href, 'title': title, 'location': region} except LoadFailedDoNotRetry: print 'ERROR: Exception at row number ' + str(nrec) + ' did not get results href, writing NULL' print 'Generic URL: ' + href rec = {'date': dt, 'generic_href': href, 'results_href': 'NULL', 'title': title, 'location': region} no_results_href += 1 except LoadRetryWithDifferentFormat: print 'WARNING: Exception at row number ' + str(nrec) + ' did not get results href, writing NULL' print 'Generic URL: ' + href rec = {'date': dt, 'generic_href': href, 'results_href': 'NULL', 'title': title, 'location': region} no_results_href += 1 elections_list['elections'].append(rec) nrec += 1 if nrec < nr: cc = rr[nrec].find_all('td') return n_filtered_out def parse_elections_list_table(self, table): elections_list = {'elections': []} # First row - date, next row - region, title and link rr = table.find_all('tr') nr = len(rr) global nrec n_filtered_out = 0 nexceptions = 0 nrec = 0 while nrec < nr: try: n_filtered_out = self.parse_elections_list_row(elections_list, rr, n_filtered_out, nr) except LoadFailedDoNotRetry: print 'WARNING: Exception, skipped row number ' + str(nrec) nexceptions += 1 nrec += 1 except LoadRetryWithDifferentFormat: print 'WARNING: Exception, skipped row number ' + str(nrec) nexceptions += 1 nrec += 1 print 'Returning ' + str(len(elections_list['elections'])) + ', filtered out ' + \ str(n_filtered_out) + ', exceptions: ' + str(nexceptions) + ' no_results_href: ' + str(no_results_href) print ' Total (taken + exceptions + filtered out): ' + \ str(len(elections_list['elections']) + n_filtered_out + nexceptions) return elections_list
please select one' exit(1) return level def parse_elections_list_file(self, file): f = codecs.open(file, encoding='windows-1251') d = f.read() soup = BeautifulSoup(d, 'html.parser') f.close() take_next = False for table in soup.find_all('table'): if re.search(u'Уровень выборов', table.get_text()) is not None: if len(table.find_all('table')) == 0: level = self.get_level(table) if take_next: elections_list = self.parse_elections_list_table(table) if level is None or leve
identifier_body
listparser.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup import codecs from time import sleep import re levels = {u'федеральный': 'federal', u'региональный': 'subject', u'административный центр': 'local', u'местное самоуправление': 'local'} PATTERNS_LINKS_TO_RESULT_DATA = [u'Сводная таблица результатов выборов по единому многомандатному округу', u'Сводная таблица итогов голосования по пропорциональной ', u'Сводная таблица результатов выборов по единому', u'Сводная таблица предварительных итогов голосования', u'Результаты референдума', u'Сводная таблица итогов голосования по федеральному округу', # u'Сводная таблица итогов голосования по мажоритарной системе выборов(Протокол №1)', u'Сводная таблица результатов выборов', u'Сводная таблица итогов голосования', u'Сводный отчет об итогах голосования', u'Сводная таблица о результатах выборов'] PATTERNS_TO_EXCLUDE = [u'одномандатн', u'мажоритарн'] nrec = 0 no_results_href = 0 class LoadRetryWithDifferentFormat(Exception): pass class LoadFailedDoNotRetry(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) class LoadFailedDifferentCandidates(Exception): pass class LoadErrorNoDataMarker(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) class LoadFailedEmptyCells(Exception): def __init__(self, url): self.url = url def __str__(self): return repr(self.url) def check_len(rr_cc, target_len, error_message): if len(rr_cc) != target_len: print er
age raise LoadRetryWithDifferentFormat def check_lens(rr_cc, target_lens, error_message): if len(rr_cc) not in target_lens: print error_message raise LoadRetryWithDifferentFormat def check_text (r_c, target_texts, error_message): if not any_text_is_there(target_texts, r_c.get_text()): print error_message print r_c.get_text().strip() print repr(target_texts).decode('unicode-escape') raise LoadRetryWithDifferentFormat def check_not_empty(s, error_message): if s == '': print error_message raise LoadRetryWithDifferentFormat def make_link (href, title): return "<a href=\'" + href + "\'>" + title + "</a>" n_pages_got = 0 n_pages_exceptions = 0 n_pages_retried = 0 n_pages_got_after_retries = 0 def print_parser_stats(): print 'n_pages_got', n_pages_got print 'n_pages_exceptions', n_pages_exceptions print 'n_pages_retried', n_pages_retried print 'n_pages_got_after_retries', n_pages_got_after_retries return def get_safe(link): global n_pages_got global n_pages_exceptions global n_pages_retried global n_pages_got_after_retries headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0' } n_attempts = 0 done = False max_attempts = 1000 while n_attempts < max_attempts and not done: try: bad_page = True n_retries = 0 max_retries = 7 while n_retries < max_retries and bad_page: bad_page = False sleep(1) response = requests.get(link, headers, timeout=5) page = BeautifulSoup(response.text,'html.parser') for table in page.find_all('table'): if not bad_page: for r in table.find_all('tr'): if not bad_page: for c in r.find_all('td'): if not bad_page and c.get_text().strip() == u'Нет данных для построения отчета.': bad_page = True if n_retries == 0: n_pages_retried += 1 if bad_page: n_retries += 1 if n_retries != 0 and not bad_page: n_pages_got_after_retries += 1 done = True n_pages_got += 1 except: print 'requests.get failed, attempt ' + str(n_attempts) sleep(3) n_pages_exceptions += 1 pass n_attempts += 1 if n_attempts == max_attempts: print 'ERRORERROR: did not manage to get the data from url ' + link[0] exit(1) return response def any_text_is_there(patterns, text): t = text.lower().replace(' ', '') for ptrn in patterns: p = ptrn.lower() p = p.replace(' ', '') if re.search(p, t) is not None: return True return False rs = 0 class HTMLListParser: def __init__(self): return def get_level(self, table): ss = table.find_all('select') level = None for s in ss: if s.attrs['name'] == 'urovproved': options = s.find_all('option') for opt in options: if 'selected' in opt.attrs: t = opt.get_text().lower() if t in levels: if level is None: level = levels[t] else: print 'ERRORERROR: Cannot work with several elections levels simultaneously, please select one' exit(1) return level def parse_elections_list_file(self, file): f = codecs.open(file, encoding='windows-1251') d = f.read() soup = BeautifulSoup(d, 'html.parser') f.close() take_next = False for table in soup.find_all('table'): if re.search(u'Уровень выборов', table.get_text()) is not None: if len(table.find_all('table')) == 0: level = self.get_level(table) if take_next: elections_list = self.parse_elections_list_table(table) if level is None or level == '': print('ERRORERROR: No level for elections list') exit(1) elections_list['level'] = level return elections_list # find the innermost table with this text if re.search(u'Всего найдено записей:', table.get_text()) is not None: if len(table.find_all('table')) == 0: take_next = True return None def get_results_href(self, href): response = get_safe(href) soup = BeautifulSoup(response.text, 'html.parser') global rs rs += 1 if rs == 24: pass results_hrefs = [] for table in soup.find_all('table'): if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, table.get_text()): # Look for innermost table with ^^ header if len(table.find_all('table')) == 0: rr = table.find_all('tr') for r in rr: cc = r.find_all('td') check_lens(cc, [1,2], 'Must be 1 or 2 columns here, exiting') for c in cc: if any_text_is_there(PATTERNS_LINKS_TO_RESULT_DATA, c.get_text()): links = c.find_all('a', href=True) check_len(links, 1, 'Must be 1 link here') for l in links: results_hrefs.append ({'href':l['href'], 'title':l.text}) # If empty - exception if len(results_hrefs) == 0: print 'Did not find results_href, ERRORERROR' raise LoadFailedDoNotRetry(href) # If one link - return it elif len(results_hrefs) == 1: return results_hrefs[0]['href'] # If there are several protocols (links), try to return one which does not contain patterns to exclude # If did not manage, return the last one (it usually contains links to results we need) else: for r in results_hrefs: if not any_text_is_there(PATTERNS_TO_EXCLUDE, r['title']): return r['href'] return results_hrefs[len(results_hrefs) - 1]['href'] return None def parse_elections_list_row(self, elections_list, rr, n_filtered_out, nr): global nrec global no_results_href # date cc = rr[nrec].find_all('td') check_len(cc, 1, 'Must be 1 column here') dt = cc[0].get_text().strip() nrec += 1 cc = rr[nrec].find_all('td') region = '' while (nrec < nr) and (len(cc) == 2): if cc[0].get_text().strip() != '': region = cc[0].get_text().strip() if region == '': print 'ERRORERROR: Empty region, exiting' exit(1) links = cc[1].find_all('a', href=True) href = links[0]['href'] title = links[0].text.strip() print 'Region: ' + region + ' title: ' + title + ' date: ' + dt + ', row ' + str(nrec) + ' out of ' + str(nr) try: results_href = self.get_results_href(href) rec = {'date': dt, 'generic_href': href, 'results_href': results_href, 'title': title, 'location': region} except LoadFailedDoNotRetry: print 'ERROR: Exception at row number ' + str(nrec) + ' did not get results href, writing NULL' print 'Generic URL: ' + href rec = {'date': dt, 'generic_href': href, 'results_href': 'NULL', 'title': title, 'location': region} no_results_href += 1 except LoadRetryWithDifferentFormat: print 'WARNING: Exception at row number ' + str(nrec) + ' did not get results href, writing NULL' print 'Generic URL: ' + href rec = {'date': dt, 'generic_href': href, 'results_href': 'NULL', 'title': title, 'location': region} no_results_href += 1 elections_list['elections'].append(rec) nrec += 1 if nrec < nr: cc = rr[nrec].find_all('td') return n_filtered_out def parse_elections_list_table(self, table): elections_list = {'elections': []} # First row - date, next row - region, title and link rr = table.find_all('tr') nr = len(rr) global nrec n_filtered_out = 0 nexceptions = 0 nrec = 0 while nrec < nr: try: n_filtered_out = self.parse_elections_list_row(elections_list, rr, n_filtered_out, nr) except LoadFailedDoNotRetry: print 'WARNING: Exception, skipped row number ' + str(nrec) nexceptions += 1 nrec += 1 except LoadRetryWithDifferentFormat: print 'WARNING: Exception, skipped row number ' + str(nrec) nexceptions += 1 nrec += 1 print 'Returning ' + str(len(elections_list['elections'])) + ', filtered out ' + \ str(n_filtered_out) + ', exceptions: ' + str(nexceptions) + ' no_results_href: ' + str(no_results_href) print ' Total (taken + exceptions + filtered out): ' + \ str(len(elections_list['elections']) + n_filtered_out + nexceptions) return elections_list
ror_mess
identifier_name
wasitests.rs
//! This file will run at build time to autogenerate the WASI regression tests //! It will compile the files indicated in TESTS, to:executable and .wasm //! - Compile with the native rust target to get the expected output //! - Compile with the latest WASI target to get the wasm //! - Generate the test that will compare the output of running the .wasm file //! with wasmer with the expected output use glob::glob; use std::fs; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use std::io; use std::io::prelude::*;
#[derive(Debug, Clone, PartialEq, Eq)] pub struct NativeOutput { stdout: String, stderr: String, result: i64, } /// Compile and execute the test file as native code, saving the results to be /// compared against later. /// /// This function attempts to clean up its output after it executes it. fn generate_native_output( temp_dir: &Path, file: &str, normalized_name: &str, args: &[String], options: &WasiOptions, ) -> io::Result<NativeOutput> { let executable_path = temp_dir.join(normalized_name); println!( "Compiling program {} to native at {}", file, executable_path.to_string_lossy() ); let native_out = Command::new("rustc") .arg(file) .arg("-o") .args(args) .arg(&executable_path) .output() .expect("Failed to compile program to native code"); util::print_info_on_error(&native_out, "COMPILATION FAILED"); #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let mut perm = executable_path .metadata() .expect("native executable") .permissions(); perm.set_mode(0o766); println!( "Setting execute permissions on {}", executable_path.to_string_lossy() ); fs::set_permissions(&executable_path, perm)?; } println!( "Executing native program at {}", executable_path.to_string_lossy() ); // workspace root const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi"); let mut native_command = Command::new(&executable_path) .current_dir(EXECUTE_DIR) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() .unwrap(); if let Some(stdin_str) = &options.stdin { write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap(); } let result = native_command .wait() .expect("Failed to execute native program"); let stdout_str = { let mut stdout = native_command.stdout.unwrap(); let mut s = String::new(); stdout.read_to_string(&mut s).unwrap(); s }; let stderr_str = { let mut stderr = native_command.stderr.unwrap(); let mut s = String::new(); stderr.read_to_string(&mut s).unwrap(); s }; if !result.success() { println!("NATIVE PROGRAM FAILED"); println!("stdout:\n{}", stdout_str); eprintln!("stderr:\n{}", stderr_str); } let result = result.code().unwrap() as i64; Ok(NativeOutput { stdout: stdout_str, stderr: stderr_str, result, }) } /// compile the Wasm file for the given version of WASI /// /// returns the path of where the wasm file is fn compile_wasm_for_version( temp_dir: &Path, file: &str, out_dir: &Path, rs_mod_name: &str, version: WasiVersion, ) -> io::Result<PathBuf> { //let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name()); if !out_dir.exists() { fs::create_dir(out_dir)?; } let wasm_out_name = { let mut wasm_out_name = out_dir.join(rs_mod_name); wasm_out_name.set_extension("wasm"); wasm_out_name }; println!("Reading contents from file `{}`", file); let file_contents: String = { let mut fc = String::new(); let mut f = fs::OpenOptions::new().read(true).open(file)?; f.read_to_string(&mut fc)?; fc }; let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name)); { let mut actual_file = fs::OpenOptions::new() .write(true) .truncate(true) .create(true) .open(&temp_wasi_rs_file_name) .unwrap(); actual_file.write_all(file_contents.as_bytes()).unwrap(); } println!( "Compiling wasm module `{}` with toolchain `{}`", &wasm_out_name.to_string_lossy(), version.get_compiler_toolchain() ); let mut command = Command::new("rustc"); command .arg(format!("+{}", version.get_compiler_toolchain())) .arg("--target=wasm32-wasi") .arg("-C") .arg("opt-level=z") .arg(&temp_wasi_rs_file_name) .arg("-o") .arg(&wasm_out_name); println!("Command {:?}", command); let wasm_compilation_out = command.output().expect("Failed to compile program to wasm"); util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION"); println!( "Removing file `{}`", &temp_wasi_rs_file_name.to_string_lossy() ); // to prevent commiting huge binary blobs forever let wasm_strip_out = Command::new("wasm-strip") .arg(&wasm_out_name) .output() .expect("Failed to strip compiled wasm module"); util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM"); let wasm_opt_out = Command::new("wasm-opt") .arg("-Oz") .arg(&wasm_out_name) .arg("-o") .arg(&wasm_out_name) .output() .expect("Failed to optimize compiled wasm module with wasm-opt!"); util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM"); Ok(wasm_out_name) } /// Returns the a Vec of the test modules created fn compile(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) { let src_code: String = fs::read_to_string(file).unwrap(); let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default(); assert!(file.ends_with(".rs")); let rs_mod_name = { Path::new(&file.to_lowercase()) .file_stem() .unwrap() .to_string_lossy() .to_string() }; let base_dir = Path::new(file).parent().unwrap(); let NativeOutput { stdout, stderr, result, } = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options) .expect("Generate native output"); let test = WasiTest { wasm_prog_name: format!("{}.wasm", rs_mod_name), stdout, stderr, result, options, }; let test_serialized = test.into_wasi_wast(); println!("Generated test output: {}", &test_serialized); wasi_versions .iter() .map(|&version| { let out_dir = base_dir.join("..").join(version.get_directory_name()); if !out_dir.exists() { fs::create_dir(&out_dir).unwrap(); } let wasm_out_name = { let mut wasm_out_name = out_dir.join(rs_mod_name.clone()); wasm_out_name.set_extension("wast"); wasm_out_name }; println!("Writing test output to {}", wasm_out_name.to_string_lossy()); fs::write(&wasm_out_name, test_serialized.clone()).unwrap(); println!("Compiling wasm version {:?}", version); compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version) .unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain())); }).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated. } const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs"); pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) { let temp_dir = tempfile::TempDir::new().unwrap(); for entry in glob(WASI_TEST_SRC_DIR).unwrap() { match entry { Ok(path) => { let test = path.to_str().unwrap(); if !specific_tests.is_empty() { if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) { if specific_tests.contains(&filename) { compile(temp_dir.path(), test, wasi_versions); } } } else { compile(temp_dir.path(), test, wasi_versions); } } Err(e) => println!("{:?}", e), } } println!("All modules generated."); } /// This is the structure of the `.wast` file #[derive(Debug, Default, Serialize, Deserialize)] pub struct WasiTest { /// The name of the wasm module to run pub wasm_prog_name: String, /// The program expected output on stdout pub stdout: String, /// The program expected output on stderr pub stderr: String, /// The program expected result pub result: i64, /// The program options pub options: WasiOptions, } impl WasiTest { fn into_wasi_wast(self) -> String { use std::fmt::Write; let mut out = format!( ";; This file was generated by https://github.com/wasmerio/wasi-tests\n (wasi_test \"{}\"", self.wasm_prog_name ); if !self.options.env.is_empty() { let envs = self .options .env .iter() .map(|(name, value)| format!("\"{}={}\"", name, value)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (envs {})", envs); } if !self.options.args.is_empty() { let args = self .options .args .iter() .map(|v| format!("\"{}\"", v)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (args {})", args); } if !self.options.dir.is_empty() { let preopens = self .options .dir .iter() .map(|v| format!("\"{}\"", v)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (preopens {})", preopens); } if !self.options.mapdir.is_empty() { let map_dirs = self .options .mapdir .iter() .map(|(a, b)| format!("\"{}:{}\"", a, b)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (map_dirs {})", map_dirs); } if !self.options.tempdir.is_empty() { let temp_dirs = self .options .tempdir .iter() .map(|td| format!("\"{}\"", td)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (temp_dirs {})", temp_dirs); } let _ = write!(out, "\n (assert_return (i64.const {}))", self.result); if let Some(stdin) = &self.options.stdin { let _ = write!(out, "\n (stdin {:?})", stdin); } if !self.stdout.is_empty() { let _ = write!(out, "\n (assert_stdout {:?})", self.stdout); } if !self.stderr.is_empty() { let _ = write!(out, "\n (assert_stderr {:?})", self.stderr); } let _ = write!(out, "\n)\n"); out } } /// The options provied when executed a WASI Wasm program #[derive(Debug, Default, Serialize, Deserialize)] pub struct WasiOptions { /// Mapped pre-opened dirs pub mapdir: Vec<(String, String)>, /// Environment vars pub env: Vec<(String, String)>, /// Program arguments pub args: Vec<String>, /// Pre-opened directories pub dir: Vec<String>, /// The alias of the temporary directory to use pub tempdir: Vec<String>, /// Stdin to give to the native program and WASI program. pub stdin: Option<String>, } /// Pulls args to the program out of a comment at the top of the file starting with "// WasiOptions:" fn extract_args_from_source_file(source_code: &str) -> Option<WasiOptions> { if source_code.starts_with("// WASI:") { let mut args = WasiOptions::default(); for arg_line in source_code .lines() .skip(1) .take_while(|line| line.starts_with("// ")) { let arg_line = arg_line.strip_prefix("// ").unwrap(); let arg_line = arg_line.trim(); let colon_idx = arg_line .find(':') .expect("directives provided at the top must be separated by a `:`"); let (command_name, value) = arg_line.split_at(colon_idx); let value = value.strip_prefix(':').unwrap(); let value = value.trim(); match command_name { "mapdir" => // We try first splitting by `::` { if let [alias, real_dir] = value.split("::").collect::<Vec<&str>>()[..] { args.mapdir.push((alias.to_string(), real_dir.to_string())); } else if let [alias, real_dir] = value.split(':').collect::<Vec<&str>>()[..] { // And then we try splitting by `:` (for compatibility with previous API) args.mapdir.push((alias.to_string(), real_dir.to_string())); } else { eprintln!("Parse error in mapdir {} not parsed correctly", value); } } "env" => { if let [name, val] = value.split('=').collect::<Vec<&str>>()[..] { args.env.push((name.to_string(), val.to_string())); } else { eprintln!("Parse error in env {} not parsed correctly", value); } } "dir" => { args.dir.push(value.to_string()); } "arg" => { args.args.push(value.to_string()); } "tempdir" => { args.tempdir.push(value.to_string()); } "stdin" => { assert!(args.stdin.is_none(), "Only the first `stdin` directive is used! Please correct this or update this code"); let s = value; let s = s.strip_prefix('"').expect("expected leading '\"' in stdin"); let s = s .trim_end() .strip_suffix('\"') .expect("expected trailing '\"' in stdin"); args.stdin = Some(s.to_string()); } e => { eprintln!("WARN: comment arg: `{}` is not supported", e); } } } return Some(args); } None }
use super::util; use super::wasi_version::*;
random_line_split
wasitests.rs
//! This file will run at build time to autogenerate the WASI regression tests //! It will compile the files indicated in TESTS, to:executable and .wasm //! - Compile with the native rust target to get the expected output //! - Compile with the latest WASI target to get the wasm //! - Generate the test that will compare the output of running the .wasm file //! with wasmer with the expected output use glob::glob; use std::fs; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use std::io; use std::io::prelude::*; use super::util; use super::wasi_version::*; #[derive(Debug, Clone, PartialEq, Eq)] pub struct NativeOutput { stdout: String, stderr: String, result: i64, } /// Compile and execute the test file as native code, saving the results to be /// compared against later. /// /// This function attempts to clean up its output after it executes it. fn generate_native_output( temp_dir: &Path, file: &str, normalized_name: &str, args: &[String], options: &WasiOptions, ) -> io::Result<NativeOutput> { let executable_path = temp_dir.join(normalized_name); println!( "Compiling program {} to native at {}", file, executable_path.to_string_lossy() ); let native_out = Command::new("rustc") .arg(file) .arg("-o") .args(args) .arg(&executable_path) .output() .expect("Failed to compile program to native code"); util::print_info_on_error(&native_out, "COMPILATION FAILED"); #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let mut perm = executable_path .metadata() .expect("native executable") .permissions(); perm.set_mode(0o766); println!( "Setting execute permissions on {}", executable_path.to_string_lossy() ); fs::set_permissions(&executable_path, perm)?; } println!( "Executing native program at {}", executable_path.to_string_lossy() ); // workspace root const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi"); let mut native_command = Command::new(&executable_path) .current_dir(EXECUTE_DIR) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() .unwrap(); if let Some(stdin_str) = &options.stdin { write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap(); } let result = native_command .wait() .expect("Failed to execute native program"); let stdout_str = { let mut stdout = native_command.stdout.unwrap(); let mut s = String::new(); stdout.read_to_string(&mut s).unwrap(); s }; let stderr_str = { let mut stderr = native_command.stderr.unwrap(); let mut s = String::new(); stderr.read_to_string(&mut s).unwrap(); s }; if !result.success() { println!("NATIVE PROGRAM FAILED"); println!("stdout:\n{}", stdout_str); eprintln!("stderr:\n{}", stderr_str); } let result = result.code().unwrap() as i64; Ok(NativeOutput { stdout: stdout_str, stderr: stderr_str, result, }) } /// compile the Wasm file for the given version of WASI /// /// returns the path of where the wasm file is fn compile_wasm_for_version( temp_dir: &Path, file: &str, out_dir: &Path, rs_mod_name: &str, version: WasiVersion, ) -> io::Result<PathBuf> { //let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name()); if !out_dir.exists() { fs::create_dir(out_dir)?; } let wasm_out_name = { let mut wasm_out_name = out_dir.join(rs_mod_name); wasm_out_name.set_extension("wasm"); wasm_out_name }; println!("Reading contents from file `{}`", file); let file_contents: String = { let mut fc = String::new(); let mut f = fs::OpenOptions::new().read(true).open(file)?; f.read_to_string(&mut fc)?; fc }; let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name)); { let mut actual_file = fs::OpenOptions::new() .write(true) .truncate(true) .create(true) .open(&temp_wasi_rs_file_name) .unwrap(); actual_file.write_all(file_contents.as_bytes()).unwrap(); } println!( "Compiling wasm module `{}` with toolchain `{}`", &wasm_out_name.to_string_lossy(), version.get_compiler_toolchain() ); let mut command = Command::new("rustc"); command .arg(format!("+{}", version.get_compiler_toolchain())) .arg("--target=wasm32-wasi") .arg("-C") .arg("opt-level=z") .arg(&temp_wasi_rs_file_name) .arg("-o") .arg(&wasm_out_name); println!("Command {:?}", command); let wasm_compilation_out = command.output().expect("Failed to compile program to wasm"); util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION"); println!( "Removing file `{}`", &temp_wasi_rs_file_name.to_string_lossy() ); // to prevent commiting huge binary blobs forever let wasm_strip_out = Command::new("wasm-strip") .arg(&wasm_out_name) .output() .expect("Failed to strip compiled wasm module"); util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM"); let wasm_opt_out = Command::new("wasm-opt") .arg("-Oz") .arg(&wasm_out_name) .arg("-o") .arg(&wasm_out_name) .output() .expect("Failed to optimize compiled wasm module with wasm-opt!"); util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM"); Ok(wasm_out_name) } /// Returns the a Vec of the test modules created fn compile(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) { let src_code: String = fs::read_to_string(file).unwrap(); let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default(); assert!(file.ends_with(".rs")); let rs_mod_name = { Path::new(&file.to_lowercase()) .file_stem() .unwrap() .to_string_lossy() .to_string() }; let base_dir = Path::new(file).parent().unwrap(); let NativeOutput { stdout, stderr, result, } = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options) .expect("Generate native output"); let test = WasiTest { wasm_prog_name: format!("{}.wasm", rs_mod_name), stdout, stderr, result, options, }; let test_serialized = test.into_wasi_wast(); println!("Generated test output: {}", &test_serialized); wasi_versions .iter() .map(|&version| { let out_dir = base_dir.join("..").join(version.get_directory_name()); if !out_dir.exists() { fs::create_dir(&out_dir).unwrap(); } let wasm_out_name = { let mut wasm_out_name = out_dir.join(rs_mod_name.clone()); wasm_out_name.set_extension("wast"); wasm_out_name }; println!("Writing test output to {}", wasm_out_name.to_string_lossy()); fs::write(&wasm_out_name, test_serialized.clone()).unwrap(); println!("Compiling wasm version {:?}", version); compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version) .unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain())); }).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated. } const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs"); pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) { let temp_dir = tempfile::TempDir::new().unwrap(); for entry in glob(WASI_TEST_SRC_DIR).unwrap() { match entry { Ok(path) => { let test = path.to_str().unwrap(); if !specific_tests.is_empty() { if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) { if specific_tests.contains(&filename) { compile(temp_dir.path(), test, wasi_versions); } } } else
} Err(e) => println!("{:?}", e), } } println!("All modules generated."); } /// This is the structure of the `.wast` file #[derive(Debug, Default, Serialize, Deserialize)] pub struct WasiTest { /// The name of the wasm module to run pub wasm_prog_name: String, /// The program expected output on stdout pub stdout: String, /// The program expected output on stderr pub stderr: String, /// The program expected result pub result: i64, /// The program options pub options: WasiOptions, } impl WasiTest { fn into_wasi_wast(self) -> String { use std::fmt::Write; let mut out = format!( ";; This file was generated by https://github.com/wasmerio/wasi-tests\n (wasi_test \"{}\"", self.wasm_prog_name ); if !self.options.env.is_empty() { let envs = self .options .env .iter() .map(|(name, value)| format!("\"{}={}\"", name, value)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (envs {})", envs); } if !self.options.args.is_empty() { let args = self .options .args .iter() .map(|v| format!("\"{}\"", v)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (args {})", args); } if !self.options.dir.is_empty() { let preopens = self .options .dir .iter() .map(|v| format!("\"{}\"", v)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (preopens {})", preopens); } if !self.options.mapdir.is_empty() { let map_dirs = self .options .mapdir .iter() .map(|(a, b)| format!("\"{}:{}\"", a, b)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (map_dirs {})", map_dirs); } if !self.options.tempdir.is_empty() { let temp_dirs = self .options .tempdir .iter() .map(|td| format!("\"{}\"", td)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (temp_dirs {})", temp_dirs); } let _ = write!(out, "\n (assert_return (i64.const {}))", self.result); if let Some(stdin) = &self.options.stdin { let _ = write!(out, "\n (stdin {:?})", stdin); } if !self.stdout.is_empty() { let _ = write!(out, "\n (assert_stdout {:?})", self.stdout); } if !self.stderr.is_empty() { let _ = write!(out, "\n (assert_stderr {:?})", self.stderr); } let _ = write!(out, "\n)\n"); out } } /// The options provied when executed a WASI Wasm program #[derive(Debug, Default, Serialize, Deserialize)] pub struct WasiOptions { /// Mapped pre-opened dirs pub mapdir: Vec<(String, String)>, /// Environment vars pub env: Vec<(String, String)>, /// Program arguments pub args: Vec<String>, /// Pre-opened directories pub dir: Vec<String>, /// The alias of the temporary directory to use pub tempdir: Vec<String>, /// Stdin to give to the native program and WASI program. pub stdin: Option<String>, } /// Pulls args to the program out of a comment at the top of the file starting with "// WasiOptions:" fn extract_args_from_source_file(source_code: &str) -> Option<WasiOptions> { if source_code.starts_with("// WASI:") { let mut args = WasiOptions::default(); for arg_line in source_code .lines() .skip(1) .take_while(|line| line.starts_with("// ")) { let arg_line = arg_line.strip_prefix("// ").unwrap(); let arg_line = arg_line.trim(); let colon_idx = arg_line .find(':') .expect("directives provided at the top must be separated by a `:`"); let (command_name, value) = arg_line.split_at(colon_idx); let value = value.strip_prefix(':').unwrap(); let value = value.trim(); match command_name { "mapdir" => // We try first splitting by `::` { if let [alias, real_dir] = value.split("::").collect::<Vec<&str>>()[..] { args.mapdir.push((alias.to_string(), real_dir.to_string())); } else if let [alias, real_dir] = value.split(':').collect::<Vec<&str>>()[..] { // And then we try splitting by `:` (for compatibility with previous API) args.mapdir.push((alias.to_string(), real_dir.to_string())); } else { eprintln!("Parse error in mapdir {} not parsed correctly", value); } } "env" => { if let [name, val] = value.split('=').collect::<Vec<&str>>()[..] { args.env.push((name.to_string(), val.to_string())); } else { eprintln!("Parse error in env {} not parsed correctly", value); } } "dir" => { args.dir.push(value.to_string()); } "arg" => { args.args.push(value.to_string()); } "tempdir" => { args.tempdir.push(value.to_string()); } "stdin" => { assert!(args.stdin.is_none(), "Only the first `stdin` directive is used! Please correct this or update this code"); let s = value; let s = s.strip_prefix('"').expect("expected leading '\"' in stdin"); let s = s .trim_end() .strip_suffix('\"') .expect("expected trailing '\"' in stdin"); args.stdin = Some(s.to_string()); } e => { eprintln!("WARN: comment arg: `{}` is not supported", e); } } } return Some(args); } None }
{ compile(temp_dir.path(), test, wasi_versions); }
conditional_block
wasitests.rs
//! This file will run at build time to autogenerate the WASI regression tests //! It will compile the files indicated in TESTS, to:executable and .wasm //! - Compile with the native rust target to get the expected output //! - Compile with the latest WASI target to get the wasm //! - Generate the test that will compare the output of running the .wasm file //! with wasmer with the expected output use glob::glob; use std::fs; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use std::io; use std::io::prelude::*; use super::util; use super::wasi_version::*; #[derive(Debug, Clone, PartialEq, Eq)] pub struct NativeOutput { stdout: String, stderr: String, result: i64, } /// Compile and execute the test file as native code, saving the results to be /// compared against later. /// /// This function attempts to clean up its output after it executes it. fn generate_native_output( temp_dir: &Path, file: &str, normalized_name: &str, args: &[String], options: &WasiOptions, ) -> io::Result<NativeOutput> { let executable_path = temp_dir.join(normalized_name); println!( "Compiling program {} to native at {}", file, executable_path.to_string_lossy() ); let native_out = Command::new("rustc") .arg(file) .arg("-o") .args(args) .arg(&executable_path) .output() .expect("Failed to compile program to native code"); util::print_info_on_error(&native_out, "COMPILATION FAILED"); #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let mut perm = executable_path .metadata() .expect("native executable") .permissions(); perm.set_mode(0o766); println!( "Setting execute permissions on {}", executable_path.to_string_lossy() ); fs::set_permissions(&executable_path, perm)?; } println!( "Executing native program at {}", executable_path.to_string_lossy() ); // workspace root const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi"); let mut native_command = Command::new(&executable_path) .current_dir(EXECUTE_DIR) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() .unwrap(); if let Some(stdin_str) = &options.stdin { write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap(); } let result = native_command .wait() .expect("Failed to execute native program"); let stdout_str = { let mut stdout = native_command.stdout.unwrap(); let mut s = String::new(); stdout.read_to_string(&mut s).unwrap(); s }; let stderr_str = { let mut stderr = native_command.stderr.unwrap(); let mut s = String::new(); stderr.read_to_string(&mut s).unwrap(); s }; if !result.success() { println!("NATIVE PROGRAM FAILED"); println!("stdout:\n{}", stdout_str); eprintln!("stderr:\n{}", stderr_str); } let result = result.code().unwrap() as i64; Ok(NativeOutput { stdout: stdout_str, stderr: stderr_str, result, }) } /// compile the Wasm file for the given version of WASI /// /// returns the path of where the wasm file is fn compile_wasm_for_version( temp_dir: &Path, file: &str, out_dir: &Path, rs_mod_name: &str, version: WasiVersion, ) -> io::Result<PathBuf> { //let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name()); if !out_dir.exists() { fs::create_dir(out_dir)?; } let wasm_out_name = { let mut wasm_out_name = out_dir.join(rs_mod_name); wasm_out_name.set_extension("wasm"); wasm_out_name }; println!("Reading contents from file `{}`", file); let file_contents: String = { let mut fc = String::new(); let mut f = fs::OpenOptions::new().read(true).open(file)?; f.read_to_string(&mut fc)?; fc }; let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name)); { let mut actual_file = fs::OpenOptions::new() .write(true) .truncate(true) .create(true) .open(&temp_wasi_rs_file_name) .unwrap(); actual_file.write_all(file_contents.as_bytes()).unwrap(); } println!( "Compiling wasm module `{}` with toolchain `{}`", &wasm_out_name.to_string_lossy(), version.get_compiler_toolchain() ); let mut command = Command::new("rustc"); command .arg(format!("+{}", version.get_compiler_toolchain())) .arg("--target=wasm32-wasi") .arg("-C") .arg("opt-level=z") .arg(&temp_wasi_rs_file_name) .arg("-o") .arg(&wasm_out_name); println!("Command {:?}", command); let wasm_compilation_out = command.output().expect("Failed to compile program to wasm"); util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION"); println!( "Removing file `{}`", &temp_wasi_rs_file_name.to_string_lossy() ); // to prevent commiting huge binary blobs forever let wasm_strip_out = Command::new("wasm-strip") .arg(&wasm_out_name) .output() .expect("Failed to strip compiled wasm module"); util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM"); let wasm_opt_out = Command::new("wasm-opt") .arg("-Oz") .arg(&wasm_out_name) .arg("-o") .arg(&wasm_out_name) .output() .expect("Failed to optimize compiled wasm module with wasm-opt!"); util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM"); Ok(wasm_out_name) } /// Returns the a Vec of the test modules created fn
(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) { let src_code: String = fs::read_to_string(file).unwrap(); let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default(); assert!(file.ends_with(".rs")); let rs_mod_name = { Path::new(&file.to_lowercase()) .file_stem() .unwrap() .to_string_lossy() .to_string() }; let base_dir = Path::new(file).parent().unwrap(); let NativeOutput { stdout, stderr, result, } = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options) .expect("Generate native output"); let test = WasiTest { wasm_prog_name: format!("{}.wasm", rs_mod_name), stdout, stderr, result, options, }; let test_serialized = test.into_wasi_wast(); println!("Generated test output: {}", &test_serialized); wasi_versions .iter() .map(|&version| { let out_dir = base_dir.join("..").join(version.get_directory_name()); if !out_dir.exists() { fs::create_dir(&out_dir).unwrap(); } let wasm_out_name = { let mut wasm_out_name = out_dir.join(rs_mod_name.clone()); wasm_out_name.set_extension("wast"); wasm_out_name }; println!("Writing test output to {}", wasm_out_name.to_string_lossy()); fs::write(&wasm_out_name, test_serialized.clone()).unwrap(); println!("Compiling wasm version {:?}", version); compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version) .unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain())); }).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated. } const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs"); pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) { let temp_dir = tempfile::TempDir::new().unwrap(); for entry in glob(WASI_TEST_SRC_DIR).unwrap() { match entry { Ok(path) => { let test = path.to_str().unwrap(); if !specific_tests.is_empty() { if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) { if specific_tests.contains(&filename) { compile(temp_dir.path(), test, wasi_versions); } } } else { compile(temp_dir.path(), test, wasi_versions); } } Err(e) => println!("{:?}", e), } } println!("All modules generated."); } /// This is the structure of the `.wast` file #[derive(Debug, Default, Serialize, Deserialize)] pub struct WasiTest { /// The name of the wasm module to run pub wasm_prog_name: String, /// The program expected output on stdout pub stdout: String, /// The program expected output on stderr pub stderr: String, /// The program expected result pub result: i64, /// The program options pub options: WasiOptions, } impl WasiTest { fn into_wasi_wast(self) -> String { use std::fmt::Write; let mut out = format!( ";; This file was generated by https://github.com/wasmerio/wasi-tests\n (wasi_test \"{}\"", self.wasm_prog_name ); if !self.options.env.is_empty() { let envs = self .options .env .iter() .map(|(name, value)| format!("\"{}={}\"", name, value)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (envs {})", envs); } if !self.options.args.is_empty() { let args = self .options .args .iter() .map(|v| format!("\"{}\"", v)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (args {})", args); } if !self.options.dir.is_empty() { let preopens = self .options .dir .iter() .map(|v| format!("\"{}\"", v)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (preopens {})", preopens); } if !self.options.mapdir.is_empty() { let map_dirs = self .options .mapdir .iter() .map(|(a, b)| format!("\"{}:{}\"", a, b)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (map_dirs {})", map_dirs); } if !self.options.tempdir.is_empty() { let temp_dirs = self .options .tempdir .iter() .map(|td| format!("\"{}\"", td)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (temp_dirs {})", temp_dirs); } let _ = write!(out, "\n (assert_return (i64.const {}))", self.result); if let Some(stdin) = &self.options.stdin { let _ = write!(out, "\n (stdin {:?})", stdin); } if !self.stdout.is_empty() { let _ = write!(out, "\n (assert_stdout {:?})", self.stdout); } if !self.stderr.is_empty() { let _ = write!(out, "\n (assert_stderr {:?})", self.stderr); } let _ = write!(out, "\n)\n"); out } } /// The options provied when executed a WASI Wasm program #[derive(Debug, Default, Serialize, Deserialize)] pub struct WasiOptions { /// Mapped pre-opened dirs pub mapdir: Vec<(String, String)>, /// Environment vars pub env: Vec<(String, String)>, /// Program arguments pub args: Vec<String>, /// Pre-opened directories pub dir: Vec<String>, /// The alias of the temporary directory to use pub tempdir: Vec<String>, /// Stdin to give to the native program and WASI program. pub stdin: Option<String>, } /// Pulls args to the program out of a comment at the top of the file starting with "// WasiOptions:" fn extract_args_from_source_file(source_code: &str) -> Option<WasiOptions> { if source_code.starts_with("// WASI:") { let mut args = WasiOptions::default(); for arg_line in source_code .lines() .skip(1) .take_while(|line| line.starts_with("// ")) { let arg_line = arg_line.strip_prefix("// ").unwrap(); let arg_line = arg_line.trim(); let colon_idx = arg_line .find(':') .expect("directives provided at the top must be separated by a `:`"); let (command_name, value) = arg_line.split_at(colon_idx); let value = value.strip_prefix(':').unwrap(); let value = value.trim(); match command_name { "mapdir" => // We try first splitting by `::` { if let [alias, real_dir] = value.split("::").collect::<Vec<&str>>()[..] { args.mapdir.push((alias.to_string(), real_dir.to_string())); } else if let [alias, real_dir] = value.split(':').collect::<Vec<&str>>()[..] { // And then we try splitting by `:` (for compatibility with previous API) args.mapdir.push((alias.to_string(), real_dir.to_string())); } else { eprintln!("Parse error in mapdir {} not parsed correctly", value); } } "env" => { if let [name, val] = value.split('=').collect::<Vec<&str>>()[..] { args.env.push((name.to_string(), val.to_string())); } else { eprintln!("Parse error in env {} not parsed correctly", value); } } "dir" => { args.dir.push(value.to_string()); } "arg" => { args.args.push(value.to_string()); } "tempdir" => { args.tempdir.push(value.to_string()); } "stdin" => { assert!(args.stdin.is_none(), "Only the first `stdin` directive is used! Please correct this or update this code"); let s = value; let s = s.strip_prefix('"').expect("expected leading '\"' in stdin"); let s = s .trim_end() .strip_suffix('\"') .expect("expected trailing '\"' in stdin"); args.stdin = Some(s.to_string()); } e => { eprintln!("WARN: comment arg: `{}` is not supported", e); } } } return Some(args); } None }
compile
identifier_name
wasitests.rs
//! This file will run at build time to autogenerate the WASI regression tests //! It will compile the files indicated in TESTS, to:executable and .wasm //! - Compile with the native rust target to get the expected output //! - Compile with the latest WASI target to get the wasm //! - Generate the test that will compare the output of running the .wasm file //! with wasmer with the expected output use glob::glob; use std::fs; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use std::io; use std::io::prelude::*; use super::util; use super::wasi_version::*; #[derive(Debug, Clone, PartialEq, Eq)] pub struct NativeOutput { stdout: String, stderr: String, result: i64, } /// Compile and execute the test file as native code, saving the results to be /// compared against later. /// /// This function attempts to clean up its output after it executes it. fn generate_native_output( temp_dir: &Path, file: &str, normalized_name: &str, args: &[String], options: &WasiOptions, ) -> io::Result<NativeOutput> { let executable_path = temp_dir.join(normalized_name); println!( "Compiling program {} to native at {}", file, executable_path.to_string_lossy() ); let native_out = Command::new("rustc") .arg(file) .arg("-o") .args(args) .arg(&executable_path) .output() .expect("Failed to compile program to native code"); util::print_info_on_error(&native_out, "COMPILATION FAILED"); #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let mut perm = executable_path .metadata() .expect("native executable") .permissions(); perm.set_mode(0o766); println!( "Setting execute permissions on {}", executable_path.to_string_lossy() ); fs::set_permissions(&executable_path, perm)?; } println!( "Executing native program at {}", executable_path.to_string_lossy() ); // workspace root const EXECUTE_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi"); let mut native_command = Command::new(&executable_path) .current_dir(EXECUTE_DIR) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() .unwrap(); if let Some(stdin_str) = &options.stdin { write!(native_command.stdin.as_ref().unwrap(), "{}", stdin_str).unwrap(); } let result = native_command .wait() .expect("Failed to execute native program"); let stdout_str = { let mut stdout = native_command.stdout.unwrap(); let mut s = String::new(); stdout.read_to_string(&mut s).unwrap(); s }; let stderr_str = { let mut stderr = native_command.stderr.unwrap(); let mut s = String::new(); stderr.read_to_string(&mut s).unwrap(); s }; if !result.success() { println!("NATIVE PROGRAM FAILED"); println!("stdout:\n{}", stdout_str); eprintln!("stderr:\n{}", stderr_str); } let result = result.code().unwrap() as i64; Ok(NativeOutput { stdout: stdout_str, stderr: stderr_str, result, }) } /// compile the Wasm file for the given version of WASI /// /// returns the path of where the wasm file is fn compile_wasm_for_version( temp_dir: &Path, file: &str, out_dir: &Path, rs_mod_name: &str, version: WasiVersion, ) -> io::Result<PathBuf> { //let out_dir = base_dir; //base_dir.join("..").join(version.get_directory_name()); if !out_dir.exists() { fs::create_dir(out_dir)?; } let wasm_out_name = { let mut wasm_out_name = out_dir.join(rs_mod_name); wasm_out_name.set_extension("wasm"); wasm_out_name }; println!("Reading contents from file `{}`", file); let file_contents: String = { let mut fc = String::new(); let mut f = fs::OpenOptions::new().read(true).open(file)?; f.read_to_string(&mut fc)?; fc }; let temp_wasi_rs_file_name = temp_dir.join(format!("wasi_modified_version_{}.rs", rs_mod_name)); { let mut actual_file = fs::OpenOptions::new() .write(true) .truncate(true) .create(true) .open(&temp_wasi_rs_file_name) .unwrap(); actual_file.write_all(file_contents.as_bytes()).unwrap(); } println!( "Compiling wasm module `{}` with toolchain `{}`", &wasm_out_name.to_string_lossy(), version.get_compiler_toolchain() ); let mut command = Command::new("rustc"); command .arg(format!("+{}", version.get_compiler_toolchain())) .arg("--target=wasm32-wasi") .arg("-C") .arg("opt-level=z") .arg(&temp_wasi_rs_file_name) .arg("-o") .arg(&wasm_out_name); println!("Command {:?}", command); let wasm_compilation_out = command.output().expect("Failed to compile program to wasm"); util::print_info_on_error(&wasm_compilation_out, "WASM COMPILATION"); println!( "Removing file `{}`", &temp_wasi_rs_file_name.to_string_lossy() ); // to prevent commiting huge binary blobs forever let wasm_strip_out = Command::new("wasm-strip") .arg(&wasm_out_name) .output() .expect("Failed to strip compiled wasm module"); util::print_info_on_error(&wasm_strip_out, "STRIPPING WASM"); let wasm_opt_out = Command::new("wasm-opt") .arg("-Oz") .arg(&wasm_out_name) .arg("-o") .arg(&wasm_out_name) .output() .expect("Failed to optimize compiled wasm module with wasm-opt!"); util::print_info_on_error(&wasm_opt_out, "OPTIMIZING WASM"); Ok(wasm_out_name) } /// Returns the a Vec of the test modules created fn compile(temp_dir: &Path, file: &str, wasi_versions: &[WasiVersion]) { let src_code: String = fs::read_to_string(file).unwrap(); let options: WasiOptions = extract_args_from_source_file(&src_code).unwrap_or_default(); assert!(file.ends_with(".rs")); let rs_mod_name = { Path::new(&file.to_lowercase()) .file_stem() .unwrap() .to_string_lossy() .to_string() }; let base_dir = Path::new(file).parent().unwrap(); let NativeOutput { stdout, stderr, result, } = generate_native_output(temp_dir, file, &rs_mod_name, &options.args, &options) .expect("Generate native output"); let test = WasiTest { wasm_prog_name: format!("{}.wasm", rs_mod_name), stdout, stderr, result, options, }; let test_serialized = test.into_wasi_wast(); println!("Generated test output: {}", &test_serialized); wasi_versions .iter() .map(|&version| { let out_dir = base_dir.join("..").join(version.get_directory_name()); if !out_dir.exists() { fs::create_dir(&out_dir).unwrap(); } let wasm_out_name = { let mut wasm_out_name = out_dir.join(rs_mod_name.clone()); wasm_out_name.set_extension("wast"); wasm_out_name }; println!("Writing test output to {}", wasm_out_name.to_string_lossy()); fs::write(&wasm_out_name, test_serialized.clone()).unwrap(); println!("Compiling wasm version {:?}", version); compile_wasm_for_version(temp_dir, file, &out_dir, &rs_mod_name, version) .unwrap_or_else(|_| panic!("Could not compile Wasm to WASI version {:?}, perhaps you need to install the `{}` rust toolchain", version, version.get_compiler_toolchain())); }).for_each(drop); // Do nothing with it, but let the iterator be consumed/iterated. } const WASI_TEST_SRC_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/wasi/tests/*.rs"); pub fn build(wasi_versions: &[WasiVersion], specific_tests: &[&str]) { let temp_dir = tempfile::TempDir::new().unwrap(); for entry in glob(WASI_TEST_SRC_DIR).unwrap() { match entry { Ok(path) => { let test = path.to_str().unwrap(); if !specific_tests.is_empty() { if let Some(filename) = path.file_stem().and_then(|f| f.to_str()) { if specific_tests.contains(&filename) { compile(temp_dir.path(), test, wasi_versions); } } } else { compile(temp_dir.path(), test, wasi_versions); } } Err(e) => println!("{:?}", e), } } println!("All modules generated."); } /// This is the structure of the `.wast` file #[derive(Debug, Default, Serialize, Deserialize)] pub struct WasiTest { /// The name of the wasm module to run pub wasm_prog_name: String, /// The program expected output on stdout pub stdout: String, /// The program expected output on stderr pub stderr: String, /// The program expected result pub result: i64, /// The program options pub options: WasiOptions, } impl WasiTest { fn into_wasi_wast(self) -> String
} /// The options provied when executed a WASI Wasm program #[derive(Debug, Default, Serialize, Deserialize)] pub struct WasiOptions { /// Mapped pre-opened dirs pub mapdir: Vec<(String, String)>, /// Environment vars pub env: Vec<(String, String)>, /// Program arguments pub args: Vec<String>, /// Pre-opened directories pub dir: Vec<String>, /// The alias of the temporary directory to use pub tempdir: Vec<String>, /// Stdin to give to the native program and WASI program. pub stdin: Option<String>, } /// Pulls args to the program out of a comment at the top of the file starting with "// WasiOptions:" fn extract_args_from_source_file(source_code: &str) -> Option<WasiOptions> { if source_code.starts_with("// WASI:") { let mut args = WasiOptions::default(); for arg_line in source_code .lines() .skip(1) .take_while(|line| line.starts_with("// ")) { let arg_line = arg_line.strip_prefix("// ").unwrap(); let arg_line = arg_line.trim(); let colon_idx = arg_line .find(':') .expect("directives provided at the top must be separated by a `:`"); let (command_name, value) = arg_line.split_at(colon_idx); let value = value.strip_prefix(':').unwrap(); let value = value.trim(); match command_name { "mapdir" => // We try first splitting by `::` { if let [alias, real_dir] = value.split("::").collect::<Vec<&str>>()[..] { args.mapdir.push((alias.to_string(), real_dir.to_string())); } else if let [alias, real_dir] = value.split(':').collect::<Vec<&str>>()[..] { // And then we try splitting by `:` (for compatibility with previous API) args.mapdir.push((alias.to_string(), real_dir.to_string())); } else { eprintln!("Parse error in mapdir {} not parsed correctly", value); } } "env" => { if let [name, val] = value.split('=').collect::<Vec<&str>>()[..] { args.env.push((name.to_string(), val.to_string())); } else { eprintln!("Parse error in env {} not parsed correctly", value); } } "dir" => { args.dir.push(value.to_string()); } "arg" => { args.args.push(value.to_string()); } "tempdir" => { args.tempdir.push(value.to_string()); } "stdin" => { assert!(args.stdin.is_none(), "Only the first `stdin` directive is used! Please correct this or update this code"); let s = value; let s = s.strip_prefix('"').expect("expected leading '\"' in stdin"); let s = s .trim_end() .strip_suffix('\"') .expect("expected trailing '\"' in stdin"); args.stdin = Some(s.to_string()); } e => { eprintln!("WARN: comment arg: `{}` is not supported", e); } } } return Some(args); } None }
{ use std::fmt::Write; let mut out = format!( ";; This file was generated by https://github.com/wasmerio/wasi-tests\n (wasi_test \"{}\"", self.wasm_prog_name ); if !self.options.env.is_empty() { let envs = self .options .env .iter() .map(|(name, value)| format!("\"{}={}\"", name, value)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (envs {})", envs); } if !self.options.args.is_empty() { let args = self .options .args .iter() .map(|v| format!("\"{}\"", v)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (args {})", args); } if !self.options.dir.is_empty() { let preopens = self .options .dir .iter() .map(|v| format!("\"{}\"", v)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (preopens {})", preopens); } if !self.options.mapdir.is_empty() { let map_dirs = self .options .mapdir .iter() .map(|(a, b)| format!("\"{}:{}\"", a, b)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (map_dirs {})", map_dirs); } if !self.options.tempdir.is_empty() { let temp_dirs = self .options .tempdir .iter() .map(|td| format!("\"{}\"", td)) .collect::<Vec<String>>() .join(" "); let _ = write!(out, "\n (temp_dirs {})", temp_dirs); } let _ = write!(out, "\n (assert_return (i64.const {}))", self.result); if let Some(stdin) = &self.options.stdin { let _ = write!(out, "\n (stdin {:?})", stdin); } if !self.stdout.is_empty() { let _ = write!(out, "\n (assert_stdout {:?})", self.stdout); } if !self.stderr.is_empty() { let _ = write!(out, "\n (assert_stderr {:?})", self.stderr); } let _ = write!(out, "\n)\n"); out }
identifier_body
biology.js
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ $.fn.randomize = function(selector){ (selector ? this.find(selector) : this).parent().each(function(){ $(this).children(selector).sort(function(){ return Math.random() - 0.5; }).detach().appendTo(this); }); return this; }; var currentOption = 0; var currentAnimal; var correctAnswers = [ [ 0, 0, 2, 1, 3, 2, 2 ], [ 3, 0, 1, 3, 2, 2, 2 ], [ 0, 1, 3, 2, 0, 0, 0 ] ]; var classifications = [ "Kingdom", "Phylum", "Class", "Order", "Family", "Genus", "Species" ]; var descriptions = [ "This meat&#8209;eating organism spends a great deal of time alone on the ice. Its coat of white fur keeps it warm when it's in the water. Its length, measured from the tip of its nose, down its spine, and to the end of its short tail, is seven feet. It uses its five-digit, non-retractable claws to kill its prey.", "Each one of the small greenish&#8209;yellow flowers on this photosynthesizing organism has three sepals and three petals as well as a pistil and a single stamen that are fused together. One of three petals of each flower forms a wide lip. The veins in the two leaves that grow from the base run parallel to each other. This species is found in North America and Eurasia.", "This sea cucumber captures food on its 10 branching tentacles, which it then wipes off in its mouth. Waste is ejected from its anus. It has five-part symmetry, a flat underside, and three rows of tube feet. It does not have a backbone but it does have fleshy skin with low papillae (nipple-like projections) and embedded calcium carbonate crystals." ]; var options = [ [ "Animalia: Kingdom for heterotrophs (organisms that ingest others for food). They generally have multiple cells.", "Bacteria: Kingdom for organisms with a single cell that have no nucleus.", "Fungi: Kingdom for organisms that absorb nutrients for energy. They may have one or more cells.", "Plantae: Kingdom for autotrophs - organisms that use photosynthesis to make their own food. They usually have multiple cells.", "Protoctista: Kingdom for any organism that does not fit the other kingdoms." ], [ [ "Chordata: Phylum for animals with a notochord (a rodlike structure) at some stage of development that sometimes develops into a backbone.", "Echinodermata: Phylum for animals with five-part symmetry and an internal skeleton made from calcium carbonate.", "Arthropoda: Phylum for segmented animals consisting of a head, thorax, and abdomen. Their bodies are covered with an exoskeleton.", "Crustacea: Phylum for segmented animals with 18 to 20 segments, two pairs of antennae, and compound eyes that are usually on stalks." ], [ "Magnoliophyta: Phylum for plants that produce flowers and seeds.", "Pinophyta: Phylum for cone-bearing plants, mostly trees.", "Lycopodiophyta: Phylum for evergreen plants that include club and spike mosses. These plants do not produce flowers.", "Equisetophyta: Phylum for plants that have hollow, jointed stems with rough ribs." ] ], [ [ "Ascidiaceae: Class for cold-blooded marine animals that have neither a brain nor a skull and live inside a sac.", "Aves: Class for warm-blooded animals with beaks and light bones that are hollow in areas.", "Mammalia: Class for warm-blooded animals covered with fur or skin that may grow hair. The females have mammary glands.", "Reptilia: Class for cold-blooded animals with scaly skin and either short legs or no legs at all." ], [ "Magnoliopsida: Class for plants that sprout with two leaves. Veins in their leaves have a branching structure (dicots).", "Liliopsida: Class for plants that sprout with one leaf. Veins in their leaves are typically parallel to each other (monocots)." ], [ "Asteroidea: Class for echinoderms that are often shaped like a star, though they can be nearly circular in shape.", "Crinoidea: Class for echinoderms that can have the appearance of a simple bush with a stalk that attaches to the seafloor.", "Echinoidea: Class for echinoderms with a hard shell covered by spines.", "Holothuroidea: Class for echinoderms that are shaped like a cylinder and have a mouth at one end and an anus at the other end. Their outer surface is soft." ] ], [ [ "Artiodactyla: Order for two or four toed mammals that are usually found in groups or herds.", "Carnivora: Order for meat-eating mammals. Some supplement their diet with fruits, plants, and insects.", "Diprotodonts: Order for mammals in which two of the four digits of their hind legs are fused together up to the base of their claws.", "Primates: Order for mammals with opposable thumbs and hands that are able to grasp." ], [ "Alismatales: Order for Liliopsida plants that live submerged or mostly submerged in freshwater and marine environments. The flowers are pollinated by wind or water.", "Poales: Order for grass-like Liliopsida plants with green sepals and petals that are bract-like (look like leaves or scales).", "Liliales: Order for Liliopsida plants with flowers that have three sepals and three petals that are so similar that they cannot be distinguished from one another.", "Asparagales: Order for Liliopsida plants with flowers in which the sepals and petals are often distinguishable." ], [ "Apodida: Order for worm-like sea cucumbers that lack tube feet and have a thin outer covering that is often transparent.", "Aspidochirotida: Order for sea cucumbers with 15 to 30 short tentacles that are shaped like shields or mops.", "Oendrochirotida: Order for sea cucumbers with 10 to 30 highly branched tentacles.", "Molpadiida: Order for sea cucumbers with 15 short, stubby tentacles and no tube feet. Bodies taper toward the anus, forming a tail." ] ], [ [ "Felidae: Family for carnivores that have retractable claws and can either purr or roar.", "Mustelidae: Family for carnivores that typically have long tails. All of them have especially well-developed anal glands.", "Procyonidae: Family for small to medium sized mammals with short to long tails. They are found only from Canada to Argentina.", "Ursidae: Family for small-to-large mammals with large ears and short tails." ], [ "Alliaceae: Family for Asparagales plants with an onion-like odor, small flowers, and six stamens.", "Agavaceae: Family for Asparagales plants with large flowers and six stamens.", "Orchidaceae: Family for Asparagales plants with small to large flowers and one stamen.", "Iridaceae: Family for Asparagales plants with small to large flowers and three stamens." ], [ "Cucumariidae: Family for sea cucumbers with 10 branching tentacles that are used to capture particles from the surrounding water.", "Phyllophoridae: Family for sea cucumbers with more than 10 branching tentacles. The shorter tentacles are used for cleaning.", "Psolidae: Family for sea cucumbers covered on the top side by plates made of calcium carbonate.", "Sclerodactylidae: Family for sea cucumbers with 10 to 20 tentacles and scattered tube feet." ] ], [ [ "Melursus: Genus for bears with long narrow snouts, which look similar to an anteater's snout. They have small teeth and no incisors.", "Helarctos: Genus for small bears that stand only about 30 inches to the shoulder.", "Ursus: Genus for bears whose fur is typically uniform in color.", "Tremarctos: Genus for large bears with white fur circling or almost circling their eyes. They feed mostly on fruit." ], [ "Goodyera: Genus for long-stemmed orchids with small flowers. Within each flower's lip is a single patch of 'hair' (papillae).", "Platanthera: Genus for orchids with green, white, or yellow flowers that have a small lobed or fringed lip.", "Liparis: Genus for an orchid with one to a few leaves at its base and very small flowers. Each flower has a wide lip.", "Spathoglottis: Genus for flowers with a lip that has a callous-like growth near its base." ], [ "Pentacta: Genus for sea cucumbers with a flat underside and three distinct rows of tube feet, a firm body wall, and low papillae on the dorsal side.", "Pseudocolochirus: Genus for sea cucumbers with three rows of tube feet on the bottom and large obvious papillae on the top.", "Stolus: Genus for small sea cucumbers with tube feet that are distributed throughout the body.", "Thyone: Genus for sea cucumbers with tube feet scattered equally over the body." ] ], [ [ "arctos: A large bear known for its brown coat. It eats mostly vegetation.", "americanus: A medium to large bear known for its typically black or dark brown coat.", "maritimus: A large, aquatic bear that has adapted to a cold climate.", "ursinus: A small to medium bear with black fur, though sometimes with gray and brown fur mixed in." ], [ "hawaiensis: A species of wide-lipped orchid with green flowers that grows only in Hawaii.", "liliifolia: A species of wide-lipped orchid with brown flowers.", "loeselii: A species of wide-lipped orchid with green or greenish-yellow flowers.", "vexillifera: A species of wide-lipped orchid found in the Caribbean and Central and South America." ], [ "anceps: A species of yellow and pink sea cucumbers, with stiff, fleshy skin, low papillae on the body, and prominent papillae near the anus.", "australis: A species of sea cucumbers with a squarish body that are grey to orange in color.", "crassa: A species of grey sea cucumbers with a pink underside that are typically found on mud.", "quadrangularis: A species for grey sea cucumbers with prominent tapering papillae along the corners of their squarish bodies." ] ] ]; function setOptions(i, k) { if(k === undefined) k = 0; if(i > 6) throw "We use the six-kingdom system here"; $("#classifying").text(classifications[i]); for(var j = 1; j < 6; j++) { var option; if(i < 1) option = options[i][j-1]; else { if(k === 2 && i === 1) k = 0; option = options[i][k][j-1]; } if(option === undefined)
else { $("#option-" + j).show(); $("#label-" + j).text(option); } } $("#sets")[0].scrollTop = 0; $("#sets").randomize("label"); $("input[name=set]:checked").removeProp("checked"); } function goBack() { $("#content").hide(); $("#animal-selector").show(); $("#toolbar").hide(); } $(window).load(function() { goBack(); $("#check-button").click(function() { var id = parseInt($("input[name=set]:checked").val()); if(isNaN(id) || id > 5 || id < 1) return; id--; console.log("Current animal: " + currentAnimal + " option: " + currentOption); console.log("Id: " + id); console.log("Cid: " + correctAnswers[currentAnimal][currentOption]); if(correctAnswers[currentAnimal][currentOption] !== id) { $("#wrong-dialog").dialog({ modal: true }); console.log("NO!"); return; } else { $("#correct-dialog").dialog({ modal: true }); } ++currentOption; if(currentOption <= 6) setOptions(currentOption, currentAnimal); else { goBack(); } }); $("#select-button").click(function() { var $selected = $("input[name=animal]:checked"); currentAnimal = parseInt($selected.val()); if(isNaN(currentAnimal) || currentAnimal > 5 || currentAnimal < 0) return; $("#animal").attr("src", $selected.parent().find("img").attr("src")); $("#animal-selector").hide(); $(".description").html(descriptions[currentAnimal]); currentOption = 0; setOptions(currentOption, currentAnimal); $("#content").show(); $("#toolbar").show(); }); });
{ $("#option-" + j).hide(); }
conditional_block
biology.js
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ $.fn.randomize = function(selector){ (selector ? this.find(selector) : this).parent().each(function(){ $(this).children(selector).sort(function(){ return Math.random() - 0.5; }).detach().appendTo(this); }); return this; }; var currentOption = 0; var currentAnimal; var correctAnswers = [ [ 0, 0, 2, 1, 3, 2, 2 ], [ 3, 0, 1, 3, 2, 2, 2 ], [ 0, 1, 3, 2, 0, 0, 0 ] ]; var classifications = [ "Kingdom", "Phylum", "Class", "Order", "Family", "Genus", "Species" ]; var descriptions = [ "This meat&#8209;eating organism spends a great deal of time alone on the ice. Its coat of white fur keeps it warm when it's in the water. Its length, measured from the tip of its nose, down its spine, and to the end of its short tail, is seven feet. It uses its five-digit, non-retractable claws to kill its prey.", "Each one of the small greenish&#8209;yellow flowers on this photosynthesizing organism has three sepals and three petals as well as a pistil and a single stamen that are fused together. One of three petals of each flower forms a wide lip. The veins in the two leaves that grow from the base run parallel to each other. This species is found in North America and Eurasia.", "This sea cucumber captures food on its 10 branching tentacles, which it then wipes off in its mouth. Waste is ejected from its anus. It has five-part symmetry, a flat underside, and three rows of tube feet. It does not have a backbone but it does have fleshy skin with low papillae (nipple-like projections) and embedded calcium carbonate crystals." ]; var options = [ [ "Animalia: Kingdom for heterotrophs (organisms that ingest others for food). They generally have multiple cells.", "Bacteria: Kingdom for organisms with a single cell that have no nucleus.", "Fungi: Kingdom for organisms that absorb nutrients for energy. They may have one or more cells.", "Plantae: Kingdom for autotrophs - organisms that use photosynthesis to make their own food. They usually have multiple cells.", "Protoctista: Kingdom for any organism that does not fit the other kingdoms." ], [ [ "Chordata: Phylum for animals with a notochord (a rodlike structure) at some stage of development that sometimes develops into a backbone.", "Echinodermata: Phylum for animals with five-part symmetry and an internal skeleton made from calcium carbonate.", "Arthropoda: Phylum for segmented animals consisting of a head, thorax, and abdomen. Their bodies are covered with an exoskeleton.", "Crustacea: Phylum for segmented animals with 18 to 20 segments, two pairs of antennae, and compound eyes that are usually on stalks." ], [ "Magnoliophyta: Phylum for plants that produce flowers and seeds.", "Pinophyta: Phylum for cone-bearing plants, mostly trees.", "Lycopodiophyta: Phylum for evergreen plants that include club and spike mosses. These plants do not produce flowers.", "Equisetophyta: Phylum for plants that have hollow, jointed stems with rough ribs." ] ], [ [ "Ascidiaceae: Class for cold-blooded marine animals that have neither a brain nor a skull and live inside a sac.", "Aves: Class for warm-blooded animals with beaks and light bones that are hollow in areas.", "Mammalia: Class for warm-blooded animals covered with fur or skin that may grow hair. The females have mammary glands.", "Reptilia: Class for cold-blooded animals with scaly skin and either short legs or no legs at all." ], [ "Magnoliopsida: Class for plants that sprout with two leaves. Veins in their leaves have a branching structure (dicots).", "Liliopsida: Class for plants that sprout with one leaf. Veins in their leaves are typically parallel to each other (monocots)." ], [ "Asteroidea: Class for echinoderms that are often shaped like a star, though they can be nearly circular in shape.", "Crinoidea: Class for echinoderms that can have the appearance of a simple bush with a stalk that attaches to the seafloor.", "Echinoidea: Class for echinoderms with a hard shell covered by spines.", "Holothuroidea: Class for echinoderms that are shaped like a cylinder and have a mouth at one end and an anus at the other end. Their outer surface is soft." ] ], [ [ "Artiodactyla: Order for two or four toed mammals that are usually found in groups or herds.", "Carnivora: Order for meat-eating mammals. Some supplement their diet with fruits, plants, and insects.", "Diprotodonts: Order for mammals in which two of the four digits of their hind legs are fused together up to the base of their claws.", "Primates: Order for mammals with opposable thumbs and hands that are able to grasp." ], [ "Alismatales: Order for Liliopsida plants that live submerged or mostly submerged in freshwater and marine environments. The flowers are pollinated by wind or water.", "Poales: Order for grass-like Liliopsida plants with green sepals and petals that are bract-like (look like leaves or scales).", "Liliales: Order for Liliopsida plants with flowers that have three sepals and three petals that are so similar that they cannot be distinguished from one another.", "Asparagales: Order for Liliopsida plants with flowers in which the sepals and petals are often distinguishable." ], [ "Apodida: Order for worm-like sea cucumbers that lack tube feet and have a thin outer covering that is often transparent.", "Aspidochirotida: Order for sea cucumbers with 15 to 30 short tentacles that are shaped like shields or mops.", "Oendrochirotida: Order for sea cucumbers with 10 to 30 highly branched tentacles.", "Molpadiida: Order for sea cucumbers with 15 short, stubby tentacles and no tube feet. Bodies taper toward the anus, forming a tail." ] ], [ [ "Felidae: Family for carnivores that have retractable claws and can either purr or roar.", "Mustelidae: Family for carnivores that typically have long tails. All of them have especially well-developed anal glands.", "Procyonidae: Family for small to medium sized mammals with short to long tails. They are found only from Canada to Argentina.", "Ursidae: Family for small-to-large mammals with large ears and short tails." ], [ "Alliaceae: Family for Asparagales plants with an onion-like odor, small flowers, and six stamens.", "Agavaceae: Family for Asparagales plants with large flowers and six stamens.", "Orchidaceae: Family for Asparagales plants with small to large flowers and one stamen.", "Iridaceae: Family for Asparagales plants with small to large flowers and three stamens." ], [ "Cucumariidae: Family for sea cucumbers with 10 branching tentacles that are used to capture particles from the surrounding water.", "Phyllophoridae: Family for sea cucumbers with more than 10 branching tentacles. The shorter tentacles are used for cleaning.", "Psolidae: Family for sea cucumbers covered on the top side by plates made of calcium carbonate.", "Sclerodactylidae: Family for sea cucumbers with 10 to 20 tentacles and scattered tube feet." ] ], [ [ "Melursus: Genus for bears with long narrow snouts, which look similar to an anteater's snout. They have small teeth and no incisors.", "Helarctos: Genus for small bears that stand only about 30 inches to the shoulder.", "Ursus: Genus for bears whose fur is typically uniform in color.", "Tremarctos: Genus for large bears with white fur circling or almost circling their eyes. They feed mostly on fruit." ], [ "Goodyera: Genus for long-stemmed orchids with small flowers. Within each flower's lip is a single patch of 'hair' (papillae).", "Platanthera: Genus for orchids with green, white, or yellow flowers that have a small lobed or fringed lip.", "Liparis: Genus for an orchid with one to a few leaves at its base and very small flowers. Each flower has a wide lip.", "Spathoglottis: Genus for flowers with a lip that has a callous-like growth near its base." ], [ "Pentacta: Genus for sea cucumbers with a flat underside and three distinct rows of tube feet, a firm body wall, and low papillae on the dorsal side.", "Pseudocolochirus: Genus for sea cucumbers with three rows of tube feet on the bottom and large obvious papillae on the top.", "Stolus: Genus for small sea cucumbers with tube feet that are distributed throughout the body.", "Thyone: Genus for sea cucumbers with tube feet scattered equally over the body." ] ], [ [ "arctos: A large bear known for its brown coat. It eats mostly vegetation.", "americanus: A medium to large bear known for its typically black or dark brown coat.", "maritimus: A large, aquatic bear that has adapted to a cold climate.", "ursinus: A small to medium bear with black fur, though sometimes with gray and brown fur mixed in." ], [ "hawaiensis: A species of wide-lipped orchid with green flowers that grows only in Hawaii.", "liliifolia: A species of wide-lipped orchid with brown flowers.", "loeselii: A species of wide-lipped orchid with green or greenish-yellow flowers.", "vexillifera: A species of wide-lipped orchid found in the Caribbean and Central and South America." ], [ "anceps: A species of yellow and pink sea cucumbers, with stiff, fleshy skin, low papillae on the body, and prominent papillae near the anus.", "australis: A species of sea cucumbers with a squarish body that are grey to orange in color.", "crassa: A species of grey sea cucumbers with a pink underside that are typically found on mud.",
] ]; function setOptions(i, k) { if(k === undefined) k = 0; if(i > 6) throw "We use the six-kingdom system here"; $("#classifying").text(classifications[i]); for(var j = 1; j < 6; j++) { var option; if(i < 1) option = options[i][j-1]; else { if(k === 2 && i === 1) k = 0; option = options[i][k][j-1]; } if(option === undefined) { $("#option-" + j).hide(); } else { $("#option-" + j).show(); $("#label-" + j).text(option); } } $("#sets")[0].scrollTop = 0; $("#sets").randomize("label"); $("input[name=set]:checked").removeProp("checked"); } function goBack() { $("#content").hide(); $("#animal-selector").show(); $("#toolbar").hide(); } $(window).load(function() { goBack(); $("#check-button").click(function() { var id = parseInt($("input[name=set]:checked").val()); if(isNaN(id) || id > 5 || id < 1) return; id--; console.log("Current animal: " + currentAnimal + " option: " + currentOption); console.log("Id: " + id); console.log("Cid: " + correctAnswers[currentAnimal][currentOption]); if(correctAnswers[currentAnimal][currentOption] !== id) { $("#wrong-dialog").dialog({ modal: true }); console.log("NO!"); return; } else { $("#correct-dialog").dialog({ modal: true }); } ++currentOption; if(currentOption <= 6) setOptions(currentOption, currentAnimal); else { goBack(); } }); $("#select-button").click(function() { var $selected = $("input[name=animal]:checked"); currentAnimal = parseInt($selected.val()); if(isNaN(currentAnimal) || currentAnimal > 5 || currentAnimal < 0) return; $("#animal").attr("src", $selected.parent().find("img").attr("src")); $("#animal-selector").hide(); $(".description").html(descriptions[currentAnimal]); currentOption = 0; setOptions(currentOption, currentAnimal); $("#content").show(); $("#toolbar").show(); }); });
"quadrangularis: A species for grey sea cucumbers with prominent tapering papillae along the corners of their squarish bodies." ]
random_line_split
biology.js
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ $.fn.randomize = function(selector){ (selector ? this.find(selector) : this).parent().each(function(){ $(this).children(selector).sort(function(){ return Math.random() - 0.5; }).detach().appendTo(this); }); return this; }; var currentOption = 0; var currentAnimal; var correctAnswers = [ [ 0, 0, 2, 1, 3, 2, 2 ], [ 3, 0, 1, 3, 2, 2, 2 ], [ 0, 1, 3, 2, 0, 0, 0 ] ]; var classifications = [ "Kingdom", "Phylum", "Class", "Order", "Family", "Genus", "Species" ]; var descriptions = [ "This meat&#8209;eating organism spends a great deal of time alone on the ice. Its coat of white fur keeps it warm when it's in the water. Its length, measured from the tip of its nose, down its spine, and to the end of its short tail, is seven feet. It uses its five-digit, non-retractable claws to kill its prey.", "Each one of the small greenish&#8209;yellow flowers on this photosynthesizing organism has three sepals and three petals as well as a pistil and a single stamen that are fused together. One of three petals of each flower forms a wide lip. The veins in the two leaves that grow from the base run parallel to each other. This species is found in North America and Eurasia.", "This sea cucumber captures food on its 10 branching tentacles, which it then wipes off in its mouth. Waste is ejected from its anus. It has five-part symmetry, a flat underside, and three rows of tube feet. It does not have a backbone but it does have fleshy skin with low papillae (nipple-like projections) and embedded calcium carbonate crystals." ]; var options = [ [ "Animalia: Kingdom for heterotrophs (organisms that ingest others for food). They generally have multiple cells.", "Bacteria: Kingdom for organisms with a single cell that have no nucleus.", "Fungi: Kingdom for organisms that absorb nutrients for energy. They may have one or more cells.", "Plantae: Kingdom for autotrophs - organisms that use photosynthesis to make their own food. They usually have multiple cells.", "Protoctista: Kingdom for any organism that does not fit the other kingdoms." ], [ [ "Chordata: Phylum for animals with a notochord (a rodlike structure) at some stage of development that sometimes develops into a backbone.", "Echinodermata: Phylum for animals with five-part symmetry and an internal skeleton made from calcium carbonate.", "Arthropoda: Phylum for segmented animals consisting of a head, thorax, and abdomen. Their bodies are covered with an exoskeleton.", "Crustacea: Phylum for segmented animals with 18 to 20 segments, two pairs of antennae, and compound eyes that are usually on stalks." ], [ "Magnoliophyta: Phylum for plants that produce flowers and seeds.", "Pinophyta: Phylum for cone-bearing plants, mostly trees.", "Lycopodiophyta: Phylum for evergreen plants that include club and spike mosses. These plants do not produce flowers.", "Equisetophyta: Phylum for plants that have hollow, jointed stems with rough ribs." ] ], [ [ "Ascidiaceae: Class for cold-blooded marine animals that have neither a brain nor a skull and live inside a sac.", "Aves: Class for warm-blooded animals with beaks and light bones that are hollow in areas.", "Mammalia: Class for warm-blooded animals covered with fur or skin that may grow hair. The females have mammary glands.", "Reptilia: Class for cold-blooded animals with scaly skin and either short legs or no legs at all." ], [ "Magnoliopsida: Class for plants that sprout with two leaves. Veins in their leaves have a branching structure (dicots).", "Liliopsida: Class for plants that sprout with one leaf. Veins in their leaves are typically parallel to each other (monocots)." ], [ "Asteroidea: Class for echinoderms that are often shaped like a star, though they can be nearly circular in shape.", "Crinoidea: Class for echinoderms that can have the appearance of a simple bush with a stalk that attaches to the seafloor.", "Echinoidea: Class for echinoderms with a hard shell covered by spines.", "Holothuroidea: Class for echinoderms that are shaped like a cylinder and have a mouth at one end and an anus at the other end. Their outer surface is soft." ] ], [ [ "Artiodactyla: Order for two or four toed mammals that are usually found in groups or herds.", "Carnivora: Order for meat-eating mammals. Some supplement their diet with fruits, plants, and insects.", "Diprotodonts: Order for mammals in which two of the four digits of their hind legs are fused together up to the base of their claws.", "Primates: Order for mammals with opposable thumbs and hands that are able to grasp." ], [ "Alismatales: Order for Liliopsida plants that live submerged or mostly submerged in freshwater and marine environments. The flowers are pollinated by wind or water.", "Poales: Order for grass-like Liliopsida plants with green sepals and petals that are bract-like (look like leaves or scales).", "Liliales: Order for Liliopsida plants with flowers that have three sepals and three petals that are so similar that they cannot be distinguished from one another.", "Asparagales: Order for Liliopsida plants with flowers in which the sepals and petals are often distinguishable." ], [ "Apodida: Order for worm-like sea cucumbers that lack tube feet and have a thin outer covering that is often transparent.", "Aspidochirotida: Order for sea cucumbers with 15 to 30 short tentacles that are shaped like shields or mops.", "Oendrochirotida: Order for sea cucumbers with 10 to 30 highly branched tentacles.", "Molpadiida: Order for sea cucumbers with 15 short, stubby tentacles and no tube feet. Bodies taper toward the anus, forming a tail." ] ], [ [ "Felidae: Family for carnivores that have retractable claws and can either purr or roar.", "Mustelidae: Family for carnivores that typically have long tails. All of them have especially well-developed anal glands.", "Procyonidae: Family for small to medium sized mammals with short to long tails. They are found only from Canada to Argentina.", "Ursidae: Family for small-to-large mammals with large ears and short tails." ], [ "Alliaceae: Family for Asparagales plants with an onion-like odor, small flowers, and six stamens.", "Agavaceae: Family for Asparagales plants with large flowers and six stamens.", "Orchidaceae: Family for Asparagales plants with small to large flowers and one stamen.", "Iridaceae: Family for Asparagales plants with small to large flowers and three stamens." ], [ "Cucumariidae: Family for sea cucumbers with 10 branching tentacles that are used to capture particles from the surrounding water.", "Phyllophoridae: Family for sea cucumbers with more than 10 branching tentacles. The shorter tentacles are used for cleaning.", "Psolidae: Family for sea cucumbers covered on the top side by plates made of calcium carbonate.", "Sclerodactylidae: Family for sea cucumbers with 10 to 20 tentacles and scattered tube feet." ] ], [ [ "Melursus: Genus for bears with long narrow snouts, which look similar to an anteater's snout. They have small teeth and no incisors.", "Helarctos: Genus for small bears that stand only about 30 inches to the shoulder.", "Ursus: Genus for bears whose fur is typically uniform in color.", "Tremarctos: Genus for large bears with white fur circling or almost circling their eyes. They feed mostly on fruit." ], [ "Goodyera: Genus for long-stemmed orchids with small flowers. Within each flower's lip is a single patch of 'hair' (papillae).", "Platanthera: Genus for orchids with green, white, or yellow flowers that have a small lobed or fringed lip.", "Liparis: Genus for an orchid with one to a few leaves at its base and very small flowers. Each flower has a wide lip.", "Spathoglottis: Genus for flowers with a lip that has a callous-like growth near its base." ], [ "Pentacta: Genus for sea cucumbers with a flat underside and three distinct rows of tube feet, a firm body wall, and low papillae on the dorsal side.", "Pseudocolochirus: Genus for sea cucumbers with three rows of tube feet on the bottom and large obvious papillae on the top.", "Stolus: Genus for small sea cucumbers with tube feet that are distributed throughout the body.", "Thyone: Genus for sea cucumbers with tube feet scattered equally over the body." ] ], [ [ "arctos: A large bear known for its brown coat. It eats mostly vegetation.", "americanus: A medium to large bear known for its typically black or dark brown coat.", "maritimus: A large, aquatic bear that has adapted to a cold climate.", "ursinus: A small to medium bear with black fur, though sometimes with gray and brown fur mixed in." ], [ "hawaiensis: A species of wide-lipped orchid with green flowers that grows only in Hawaii.", "liliifolia: A species of wide-lipped orchid with brown flowers.", "loeselii: A species of wide-lipped orchid with green or greenish-yellow flowers.", "vexillifera: A species of wide-lipped orchid found in the Caribbean and Central and South America." ], [ "anceps: A species of yellow and pink sea cucumbers, with stiff, fleshy skin, low papillae on the body, and prominent papillae near the anus.", "australis: A species of sea cucumbers with a squarish body that are grey to orange in color.", "crassa: A species of grey sea cucumbers with a pink underside that are typically found on mud.", "quadrangularis: A species for grey sea cucumbers with prominent tapering papillae along the corners of their squarish bodies." ] ] ]; function
(i, k) { if(k === undefined) k = 0; if(i > 6) throw "We use the six-kingdom system here"; $("#classifying").text(classifications[i]); for(var j = 1; j < 6; j++) { var option; if(i < 1) option = options[i][j-1]; else { if(k === 2 && i === 1) k = 0; option = options[i][k][j-1]; } if(option === undefined) { $("#option-" + j).hide(); } else { $("#option-" + j).show(); $("#label-" + j).text(option); } } $("#sets")[0].scrollTop = 0; $("#sets").randomize("label"); $("input[name=set]:checked").removeProp("checked"); } function goBack() { $("#content").hide(); $("#animal-selector").show(); $("#toolbar").hide(); } $(window).load(function() { goBack(); $("#check-button").click(function() { var id = parseInt($("input[name=set]:checked").val()); if(isNaN(id) || id > 5 || id < 1) return; id--; console.log("Current animal: " + currentAnimal + " option: " + currentOption); console.log("Id: " + id); console.log("Cid: " + correctAnswers[currentAnimal][currentOption]); if(correctAnswers[currentAnimal][currentOption] !== id) { $("#wrong-dialog").dialog({ modal: true }); console.log("NO!"); return; } else { $("#correct-dialog").dialog({ modal: true }); } ++currentOption; if(currentOption <= 6) setOptions(currentOption, currentAnimal); else { goBack(); } }); $("#select-button").click(function() { var $selected = $("input[name=animal]:checked"); currentAnimal = parseInt($selected.val()); if(isNaN(currentAnimal) || currentAnimal > 5 || currentAnimal < 0) return; $("#animal").attr("src", $selected.parent().find("img").attr("src")); $("#animal-selector").hide(); $(".description").html(descriptions[currentAnimal]); currentOption = 0; setOptions(currentOption, currentAnimal); $("#content").show(); $("#toolbar").show(); }); });
setOptions
identifier_name
biology.js
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ $.fn.randomize = function(selector){ (selector ? this.find(selector) : this).parent().each(function(){ $(this).children(selector).sort(function(){ return Math.random() - 0.5; }).detach().appendTo(this); }); return this; }; var currentOption = 0; var currentAnimal; var correctAnswers = [ [ 0, 0, 2, 1, 3, 2, 2 ], [ 3, 0, 1, 3, 2, 2, 2 ], [ 0, 1, 3, 2, 0, 0, 0 ] ]; var classifications = [ "Kingdom", "Phylum", "Class", "Order", "Family", "Genus", "Species" ]; var descriptions = [ "This meat&#8209;eating organism spends a great deal of time alone on the ice. Its coat of white fur keeps it warm when it's in the water. Its length, measured from the tip of its nose, down its spine, and to the end of its short tail, is seven feet. It uses its five-digit, non-retractable claws to kill its prey.", "Each one of the small greenish&#8209;yellow flowers on this photosynthesizing organism has three sepals and three petals as well as a pistil and a single stamen that are fused together. One of three petals of each flower forms a wide lip. The veins in the two leaves that grow from the base run parallel to each other. This species is found in North America and Eurasia.", "This sea cucumber captures food on its 10 branching tentacles, which it then wipes off in its mouth. Waste is ejected from its anus. It has five-part symmetry, a flat underside, and three rows of tube feet. It does not have a backbone but it does have fleshy skin with low papillae (nipple-like projections) and embedded calcium carbonate crystals." ]; var options = [ [ "Animalia: Kingdom for heterotrophs (organisms that ingest others for food). They generally have multiple cells.", "Bacteria: Kingdom for organisms with a single cell that have no nucleus.", "Fungi: Kingdom for organisms that absorb nutrients for energy. They may have one or more cells.", "Plantae: Kingdom for autotrophs - organisms that use photosynthesis to make their own food. They usually have multiple cells.", "Protoctista: Kingdom for any organism that does not fit the other kingdoms." ], [ [ "Chordata: Phylum for animals with a notochord (a rodlike structure) at some stage of development that sometimes develops into a backbone.", "Echinodermata: Phylum for animals with five-part symmetry and an internal skeleton made from calcium carbonate.", "Arthropoda: Phylum for segmented animals consisting of a head, thorax, and abdomen. Their bodies are covered with an exoskeleton.", "Crustacea: Phylum for segmented animals with 18 to 20 segments, two pairs of antennae, and compound eyes that are usually on stalks." ], [ "Magnoliophyta: Phylum for plants that produce flowers and seeds.", "Pinophyta: Phylum for cone-bearing plants, mostly trees.", "Lycopodiophyta: Phylum for evergreen plants that include club and spike mosses. These plants do not produce flowers.", "Equisetophyta: Phylum for plants that have hollow, jointed stems with rough ribs." ] ], [ [ "Ascidiaceae: Class for cold-blooded marine animals that have neither a brain nor a skull and live inside a sac.", "Aves: Class for warm-blooded animals with beaks and light bones that are hollow in areas.", "Mammalia: Class for warm-blooded animals covered with fur or skin that may grow hair. The females have mammary glands.", "Reptilia: Class for cold-blooded animals with scaly skin and either short legs or no legs at all." ], [ "Magnoliopsida: Class for plants that sprout with two leaves. Veins in their leaves have a branching structure (dicots).", "Liliopsida: Class for plants that sprout with one leaf. Veins in their leaves are typically parallel to each other (monocots)." ], [ "Asteroidea: Class for echinoderms that are often shaped like a star, though they can be nearly circular in shape.", "Crinoidea: Class for echinoderms that can have the appearance of a simple bush with a stalk that attaches to the seafloor.", "Echinoidea: Class for echinoderms with a hard shell covered by spines.", "Holothuroidea: Class for echinoderms that are shaped like a cylinder and have a mouth at one end and an anus at the other end. Their outer surface is soft." ] ], [ [ "Artiodactyla: Order for two or four toed mammals that are usually found in groups or herds.", "Carnivora: Order for meat-eating mammals. Some supplement their diet with fruits, plants, and insects.", "Diprotodonts: Order for mammals in which two of the four digits of their hind legs are fused together up to the base of their claws.", "Primates: Order for mammals with opposable thumbs and hands that are able to grasp." ], [ "Alismatales: Order for Liliopsida plants that live submerged or mostly submerged in freshwater and marine environments. The flowers are pollinated by wind or water.", "Poales: Order for grass-like Liliopsida plants with green sepals and petals that are bract-like (look like leaves or scales).", "Liliales: Order for Liliopsida plants with flowers that have three sepals and three petals that are so similar that they cannot be distinguished from one another.", "Asparagales: Order for Liliopsida plants with flowers in which the sepals and petals are often distinguishable." ], [ "Apodida: Order for worm-like sea cucumbers that lack tube feet and have a thin outer covering that is often transparent.", "Aspidochirotida: Order for sea cucumbers with 15 to 30 short tentacles that are shaped like shields or mops.", "Oendrochirotida: Order for sea cucumbers with 10 to 30 highly branched tentacles.", "Molpadiida: Order for sea cucumbers with 15 short, stubby tentacles and no tube feet. Bodies taper toward the anus, forming a tail." ] ], [ [ "Felidae: Family for carnivores that have retractable claws and can either purr or roar.", "Mustelidae: Family for carnivores that typically have long tails. All of them have especially well-developed anal glands.", "Procyonidae: Family for small to medium sized mammals with short to long tails. They are found only from Canada to Argentina.", "Ursidae: Family for small-to-large mammals with large ears and short tails." ], [ "Alliaceae: Family for Asparagales plants with an onion-like odor, small flowers, and six stamens.", "Agavaceae: Family for Asparagales plants with large flowers and six stamens.", "Orchidaceae: Family for Asparagales plants with small to large flowers and one stamen.", "Iridaceae: Family for Asparagales plants with small to large flowers and three stamens." ], [ "Cucumariidae: Family for sea cucumbers with 10 branching tentacles that are used to capture particles from the surrounding water.", "Phyllophoridae: Family for sea cucumbers with more than 10 branching tentacles. The shorter tentacles are used for cleaning.", "Psolidae: Family for sea cucumbers covered on the top side by plates made of calcium carbonate.", "Sclerodactylidae: Family for sea cucumbers with 10 to 20 tentacles and scattered tube feet." ] ], [ [ "Melursus: Genus for bears with long narrow snouts, which look similar to an anteater's snout. They have small teeth and no incisors.", "Helarctos: Genus for small bears that stand only about 30 inches to the shoulder.", "Ursus: Genus for bears whose fur is typically uniform in color.", "Tremarctos: Genus for large bears with white fur circling or almost circling their eyes. They feed mostly on fruit." ], [ "Goodyera: Genus for long-stemmed orchids with small flowers. Within each flower's lip is a single patch of 'hair' (papillae).", "Platanthera: Genus for orchids with green, white, or yellow flowers that have a small lobed or fringed lip.", "Liparis: Genus for an orchid with one to a few leaves at its base and very small flowers. Each flower has a wide lip.", "Spathoglottis: Genus for flowers with a lip that has a callous-like growth near its base." ], [ "Pentacta: Genus for sea cucumbers with a flat underside and three distinct rows of tube feet, a firm body wall, and low papillae on the dorsal side.", "Pseudocolochirus: Genus for sea cucumbers with three rows of tube feet on the bottom and large obvious papillae on the top.", "Stolus: Genus for small sea cucumbers with tube feet that are distributed throughout the body.", "Thyone: Genus for sea cucumbers with tube feet scattered equally over the body." ] ], [ [ "arctos: A large bear known for its brown coat. It eats mostly vegetation.", "americanus: A medium to large bear known for its typically black or dark brown coat.", "maritimus: A large, aquatic bear that has adapted to a cold climate.", "ursinus: A small to medium bear with black fur, though sometimes with gray and brown fur mixed in." ], [ "hawaiensis: A species of wide-lipped orchid with green flowers that grows only in Hawaii.", "liliifolia: A species of wide-lipped orchid with brown flowers.", "loeselii: A species of wide-lipped orchid with green or greenish-yellow flowers.", "vexillifera: A species of wide-lipped orchid found in the Caribbean and Central and South America." ], [ "anceps: A species of yellow and pink sea cucumbers, with stiff, fleshy skin, low papillae on the body, and prominent papillae near the anus.", "australis: A species of sea cucumbers with a squarish body that are grey to orange in color.", "crassa: A species of grey sea cucumbers with a pink underside that are typically found on mud.", "quadrangularis: A species for grey sea cucumbers with prominent tapering papillae along the corners of their squarish bodies." ] ] ]; function setOptions(i, k)
function goBack() { $("#content").hide(); $("#animal-selector").show(); $("#toolbar").hide(); } $(window).load(function() { goBack(); $("#check-button").click(function() { var id = parseInt($("input[name=set]:checked").val()); if(isNaN(id) || id > 5 || id < 1) return; id--; console.log("Current animal: " + currentAnimal + " option: " + currentOption); console.log("Id: " + id); console.log("Cid: " + correctAnswers[currentAnimal][currentOption]); if(correctAnswers[currentAnimal][currentOption] !== id) { $("#wrong-dialog").dialog({ modal: true }); console.log("NO!"); return; } else { $("#correct-dialog").dialog({ modal: true }); } ++currentOption; if(currentOption <= 6) setOptions(currentOption, currentAnimal); else { goBack(); } }); $("#select-button").click(function() { var $selected = $("input[name=animal]:checked"); currentAnimal = parseInt($selected.val()); if(isNaN(currentAnimal) || currentAnimal > 5 || currentAnimal < 0) return; $("#animal").attr("src", $selected.parent().find("img").attr("src")); $("#animal-selector").hide(); $(".description").html(descriptions[currentAnimal]); currentOption = 0; setOptions(currentOption, currentAnimal); $("#content").show(); $("#toolbar").show(); }); });
{ if(k === undefined) k = 0; if(i > 6) throw "We use the six-kingdom system here"; $("#classifying").text(classifications[i]); for(var j = 1; j < 6; j++) { var option; if(i < 1) option = options[i][j-1]; else { if(k === 2 && i === 1) k = 0; option = options[i][k][j-1]; } if(option === undefined) { $("#option-" + j).hide(); } else { $("#option-" + j).show(); $("#label-" + j).text(option); } } $("#sets")[0].scrollTop = 0; $("#sets").randomize("label"); $("input[name=set]:checked").removeProp("checked"); }
identifier_body
app.js
/* * Register angular module with custom name myapp, all other Angular objects will add it to this custom angular module, * Here Other Anulag objects used are Controller, Service, RouteProvider etc. **/ // TODO: standardize module and function descriptions, maybe using jsdoc 'use strict'; var myapp = angular.module('myapp', [ 'angular.css.injector', 'mcfp.Overlay', 'ngRoute', 'ngResource', 'ngAnimate', 'ngSanitize', 'ngDropdowns', 'ui.bootstrap' ]) // Description: This service is a super cache and a data management service. // It is initialized with the data we will need to load the initial view in main.js // Other data can be dynamically loaded, and everything will be cached in the inspectable oMiCache object // It is a $cacheFactory instance, so the keys can be URLs or arbitrary strings. // // use $http.get(sDataKey) to get urls or constants and MiData.put() to add constants // angular is so backwards...factories are functional and services actually invoke Object.create() (like an OOP/Java Factory) .service('MiData', ['$cacheFactory', '$http', function($cacheFactory, $http){ var _oMiData = $cacheFactory('miData'), _context = this; this._constants = { oLookingForOptions: [ {type: 'Military installation'}, {type: 'State resources'}, {type: 'Program or service'} ], arroFilterByDefault: [ {type: 'Select Installation or zip code'}, {type: 'Zip code'}, {type: 'Installation'} ], arroFilterByOptions: [ {type: 'Zip code'}, {type: 'Installation'} ], arroMilesOptions: [ {type: '5 miles'}, {type: '10 miles'}, {type: '25 miles'}, {type: '50 miles'}, {type: '100 miles'}, {type: '250 miles'}, {type: '500 miles'} ], arroBranches: [ { "sName": "Air Force", "bActiveFilter": false }, { "sName": "Army", "bActiveFilter": false }, { "sName": "Navy", "bActiveFilter": false }, { "sName": "Marine Corps", "bActiveFilter": false }, { "sName": "Defense Logistics Agency", "bActiveFilter": false } ], arroConus: [{ sName: 'CONUS', bActiveFilter: false },{ sName: 'OCONUS', bActiveFilter: false }], arroViewBy: [ {type: 'Zip Code'}, {type: 'Installation'} ] }; $http.defaults.cache = _oMiData; // binds a series of uniquely named constants to a controller's scope this.constantify = function(scope, arrsConstants) { arrsConstants.forEach(function(sKeyName) { if (typeof _context._constants[sKeyName] === 'object') { scope[sKeyName] = JSON.parse(JSON.stringify(_context._constants[sKeyName])); } else { scope[sKeyName] = _context._constants[sKeyName]; } }); } this.get = function(sDataKey) { return new Promise(function(resolve, reject){ $http.get(sDataKey).then(function(response){ resolve(response.data); }, function(reason){ resolve({ error: 'An error occurred!' }); }) }); } // a syntactic sugar for mock data this.mock = function(sUriSubstring) { return this.get('/data/get-' + sUriSubstring + '.json'); } this.init = function() { $http.get('/data/get-installations.json'); $http.get('/data/get-program-cards.json'); $http.get('/data/get-programs.json'); $http.get('/data/states.json'); } }]) // ref: https://stackoverflow.com/questions/22408790/angularjs-passing-data-between-pages // description: this service allows passing data between views // TODO: maybe all gets should be byval, eg $scope.arroBranches = JSON.parse(JSON.stringify(MI.Data._constants.arroBranches)); .service('MiState', function(){ var _oState = {}; function set(data) { _oState = data; } function get() { return _oState; } return { set: set, get: get } }) // description: creates a base controller // TODO: a single, generic MI.get() which internally looks at State, Data, $templateCache, $http, then handle error or not found. // rule of thumb: If three views need it, add it here. .service('BaseController', ['$location', 'cssInjector', 'MiData', 'MiState', '$templateCache', '$compile', '$timeout', '$window', 'Overlay', function($location, cssInjector, MiData, MiState, $templateCache, $compile, $timeout, $window, Overlay) { var oContext = this, oLazyLoad = { miCard: function() { myapp._compileProvider.directive('miCard', function() { return { templateUrl: '/views/miCard/miCard.html' } }) } }; function getViewName() { var sResolvedViewName = MiData.sResolvedViewName; if (sResolvedViewName) { MiData.sResolvedViewName = ''; return sResolvedViewName; } return $location.$$path.slice(1).split('/')[0] || 'main'; } function getStyle(sViewName) { cssInjector.add('/views/' + sViewName + '/' + sViewName + '.css'); } // description: a utility method to quickly get a needed item from a list. // finds the first member of arr with a given key-value pair. // either returns a desired key's value or else the whole matched object. // can support up to 2 levels deep using subkey; objects more complex can't use this approach. function getFirstMatch(arr, sKey, sValToMatch, sValToReturn, sSubKey) { var arrFiltered = arr.filter(function(el){ return sSubKey ? el[sKey][sSubKey] === sValToMatch : el[sKey] === sValToMatch; }) || ''; if (arrFiltered && arrFiltered[0]) { return sValToReturn ? arrFiltered[0][sValToReturn] : arrFiltered[0]; } return ''; } function init(scope, oOptions) { var sViewName = getViewName(); scope.MI = this; MiData.init(); MiData.get('/views/' + sViewName + '/' + sViewName + '.html').then(function(sHtml) { $compile($(sHtml))(scope); // virtually add dynamic html to controller scope, but do not actually render to browser DOM scope.sViewName = sViewName; getStyle(sViewName); if (oOptions) { if (oOptions.constantify) MiData.constantify(scope, oOptions.constantify); if (oOptions.lazilyLoad) fLazyLoad(oOptions.lazilyLoad, scope); if (oOptions.fCallback) oOptions.fCallback(scope, oOptions);
scope.$apply(); }); } // bLazyLoadingDone is needed to trigger $compile because it creates a diff // also, make sure your lazily loaded element has some content. Or again, there will be no diff and no render will happen. // ref: https://stackoverflow.com/questions/38514918/when-lazy-loading-directive-its-never-run?noredirect=1&lq=1 function fLazyLoad(arrsfDirectives, scope) { arrsfDirectives.forEach(function(sfDirective){ oLazyLoad[sfDirective](); }); scope.bLazyLoadingDone = true; } return { $location: $location, Data: MiData, getStyle: getStyle, getFirstMatch: getFirstMatch, init: init, State: MiState, $window: $window, Overlay: Overlay } }]) // syntactic sugar. Maybe collapse into one service if you want, but this is modular. .service('MI', ['BaseController', function(BaseController){ return BaseController; }]) // don't lazy load these because they're needed on main/initial view .directive('miSearch', ['MI', function(MI){ return { templateUrl: 'looking-for', link: function(scope, element, attrs) { var oChangeOptions = scope.$parent.oLookingForSelectionChange; if (oChangeOptions) { scope.$watch('oLookingForSelection', oChangeOptions.fFunction, oChangeOptions.bDigest); } } } }]) // TODO: this directive needs it's own scope and data .directive('miTypeaheadPrograms', function(){ return { templateUrl: 'typeahead-programs' } }) .directive('miTypeaheadInstallations', function(){ return { templateUrl: 'typeahead-installations', link: function(scope, element, attrs) { // TODO: should MDInstallations be standard under MI.Data? // TODO: should we include a reference or element to return on click as a standard here? scope.fShowInstallationsOverlay = scope.$parent.fHandleInstallationTextClick || function () { scope.arroInstallations = scope.MDInstallations; scope.$overlay = scope.MI.Overlay.showByTemplateName(scope, 'installations-overlay'); } } } }) .directive('miRegionAccordions', function(){ return { restrict: 'E', templateUrl: 'region-accordions', scope: { arrdirectiveregions: '=' }, link: function(scope, element, attrs) { //ref: https://stackoverflow.com/questions/17900201/how-to-access-parent-scope-from-within-a-custom-directive-with-own-scope-in-an scope.fHandleInstallationTextClick = scope.$parent.fHandleInstallationTextClick; // animated open indicator scope.fIndicateOpen = function ($event) { var $panelHeading = $($event.currentTarget).find('.panel-heading'), $openIndicator = $panelHeading.prev(); if (!$openIndicator.length) { // create $openIndicator if it's not there $openIndicator = $('<div class="open-indicator">'); $panelHeading.before($openIndicator); } if ($event.currentTarget.classList.contains('panel-open')) { // ensure indicator is expanded in this case $openIndicator.animate({ width: 40 }, 300); } else { // ensure indicator is collapsed in this case $openIndicator.animate({ width: 0 }, 300); } } } } }) // TODO: delete this block // define other directives for lazy loading later on // ref: https://stackoverflow.com/questions/12538665/how-can-directives-be-lazy-loaded-in-angularjs .config(function ($compileProvider) { myapp._compileProvider = $compileProvider; }) .config(function(cssInjectorProvider){ cssInjectorProvider.setSinglePageMode(true); }) // ref: https://stackoverflow.com/questions/20836374/how-to-catch-angular-ng-include-error // ref: https://stackoverflow.com/questions/19711550/angularjs-how-to-prevent-a-request .config(function ($httpProvider) { $httpProvider.interceptors.push('templateInterceptor'); }) .factory('templateInterceptor', function($q) { return { 'request': function(config) { var canceler = $q.defer(); config.timeout = canceler.promise; if (config.url.slice(-1) === '-') { // this is an ng-include call without valid url. Don't execute call. canceler.resolve(); } return config; } } }) .config(function ($routeProvider) { $routeProvider .when('/', { templateUrl: 'views/main/main.html', controller: 'mainController', resolve: { MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/milinstall/:sInstallationId', { templateUrl: 'views/milinstall/milinstall.html', controller: 'miController' }) .when('/milinstall/:sInstallationId/:sCategoryId', { templateUrl: 'views/common/common.html', controller: 'categoriesController', resolve: { dependencies: function(MI) { MI.Data.sResolvedViewName = 'categories'; return; }, MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) // yes, use the categoriesController for the topic view // if possible, don't even reload the controller on nav from category to topic .when('/milinstall/:sInstallationId/:sCategoryId/:sTopicId', { templateUrl: 'views/common/common.html', controller: 'categoriesController', resolve: { dependencies: function(MI) { MI.Data.sResolvedViewName = 'categories'; return; }, MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/programs', { templateUrl: 'views/main/program-search.html', controller: 'ProgramsController' }) .when('/search', { templateUrl: 'views/common/common.html', controller: 'searchController', resolve: { MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/viewall', { templateUrl: 'views/common/common.html', controller: 'viewAllController', resolve: { MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/planmymove', { headerTemplateUrl: 'views/planmymove/planmymove-header.html', templateUrl: 'views/planmymove/planmymove.html', controller: 'planMyMoveController', resolve: { MDInstallations: function (MI) { return MI.Data.mock('installations'); } } }) .otherwise({ redirectTo: '/' }); }); myapp.factory('prgdata', function () { var savedData = {} function set(data) { savedData = data; } function get() { return savedData; } return { set: set, get: get } }); //dynamically sets the header partial for PMM and MI myapp.run(['$rootScope', function ($rootScope) { $rootScope.$on('$routeChangeSuccess', function (event, current, previous) { $rootScope.headerTemplateUrl = current.$$route.headerTemplateUrl || 'views/header/header.html'; }); }]); myapp.filter('unsafe', function($sce) { return $sce.trustAsHtml; }); //typeahead filter for first character only myapp.filter('selectFirstOnlyFilter', function () { return function (input, searchString) { return input.filter(function (item) { if (item.toLowerCase().search(searchString.toLowerCase()) === 0) { return true; } return false; }) } });
}
random_line_split
app.js
/* * Register angular module with custom name myapp, all other Angular objects will add it to this custom angular module, * Here Other Anulag objects used are Controller, Service, RouteProvider etc. **/ // TODO: standardize module and function descriptions, maybe using jsdoc 'use strict'; var myapp = angular.module('myapp', [ 'angular.css.injector', 'mcfp.Overlay', 'ngRoute', 'ngResource', 'ngAnimate', 'ngSanitize', 'ngDropdowns', 'ui.bootstrap' ]) // Description: This service is a super cache and a data management service. // It is initialized with the data we will need to load the initial view in main.js // Other data can be dynamically loaded, and everything will be cached in the inspectable oMiCache object // It is a $cacheFactory instance, so the keys can be URLs or arbitrary strings. // // use $http.get(sDataKey) to get urls or constants and MiData.put() to add constants // angular is so backwards...factories are functional and services actually invoke Object.create() (like an OOP/Java Factory) .service('MiData', ['$cacheFactory', '$http', function($cacheFactory, $http){ var _oMiData = $cacheFactory('miData'), _context = this; this._constants = { oLookingForOptions: [ {type: 'Military installation'}, {type: 'State resources'}, {type: 'Program or service'} ], arroFilterByDefault: [ {type: 'Select Installation or zip code'}, {type: 'Zip code'}, {type: 'Installation'} ], arroFilterByOptions: [ {type: 'Zip code'}, {type: 'Installation'} ], arroMilesOptions: [ {type: '5 miles'}, {type: '10 miles'}, {type: '25 miles'}, {type: '50 miles'}, {type: '100 miles'}, {type: '250 miles'}, {type: '500 miles'} ], arroBranches: [ { "sName": "Air Force", "bActiveFilter": false }, { "sName": "Army", "bActiveFilter": false }, { "sName": "Navy", "bActiveFilter": false }, { "sName": "Marine Corps", "bActiveFilter": false }, { "sName": "Defense Logistics Agency", "bActiveFilter": false } ], arroConus: [{ sName: 'CONUS', bActiveFilter: false },{ sName: 'OCONUS', bActiveFilter: false }], arroViewBy: [ {type: 'Zip Code'}, {type: 'Installation'} ] }; $http.defaults.cache = _oMiData; // binds a series of uniquely named constants to a controller's scope this.constantify = function(scope, arrsConstants) { arrsConstants.forEach(function(sKeyName) { if (typeof _context._constants[sKeyName] === 'object') { scope[sKeyName] = JSON.parse(JSON.stringify(_context._constants[sKeyName])); } else { scope[sKeyName] = _context._constants[sKeyName]; } }); } this.get = function(sDataKey) { return new Promise(function(resolve, reject){ $http.get(sDataKey).then(function(response){ resolve(response.data); }, function(reason){ resolve({ error: 'An error occurred!' }); }) }); } // a syntactic sugar for mock data this.mock = function(sUriSubstring) { return this.get('/data/get-' + sUriSubstring + '.json'); } this.init = function() { $http.get('/data/get-installations.json'); $http.get('/data/get-program-cards.json'); $http.get('/data/get-programs.json'); $http.get('/data/states.json'); } }]) // ref: https://stackoverflow.com/questions/22408790/angularjs-passing-data-between-pages // description: this service allows passing data between views // TODO: maybe all gets should be byval, eg $scope.arroBranches = JSON.parse(JSON.stringify(MI.Data._constants.arroBranches)); .service('MiState', function(){ var _oState = {}; function set(data) { _oState = data; } function get() { return _oState; } return { set: set, get: get } }) // description: creates a base controller // TODO: a single, generic MI.get() which internally looks at State, Data, $templateCache, $http, then handle error or not found. // rule of thumb: If three views need it, add it here. .service('BaseController', ['$location', 'cssInjector', 'MiData', 'MiState', '$templateCache', '$compile', '$timeout', '$window', 'Overlay', function($location, cssInjector, MiData, MiState, $templateCache, $compile, $timeout, $window, Overlay) { var oContext = this, oLazyLoad = { miCard: function() { myapp._compileProvider.directive('miCard', function() { return { templateUrl: '/views/miCard/miCard.html' } }) } }; function getViewName() { var sResolvedViewName = MiData.sResolvedViewName; if (sResolvedViewName) { MiData.sResolvedViewName = ''; return sResolvedViewName; } return $location.$$path.slice(1).split('/')[0] || 'main'; } function getStyle(sViewName) { cssInjector.add('/views/' + sViewName + '/' + sViewName + '.css'); } // description: a utility method to quickly get a needed item from a list. // finds the first member of arr with a given key-value pair. // either returns a desired key's value or else the whole matched object. // can support up to 2 levels deep using subkey; objects more complex can't use this approach. function getFirstMatch(arr, sKey, sValToMatch, sValToReturn, sSubKey) { var arrFiltered = arr.filter(function(el){ return sSubKey ? el[sKey][sSubKey] === sValToMatch : el[sKey] === sValToMatch; }) || ''; if (arrFiltered && arrFiltered[0]) { return sValToReturn ? arrFiltered[0][sValToReturn] : arrFiltered[0]; } return ''; } function init(scope, oOptions) { var sViewName = getViewName(); scope.MI = this; MiData.init(); MiData.get('/views/' + sViewName + '/' + sViewName + '.html').then(function(sHtml) { $compile($(sHtml))(scope); // virtually add dynamic html to controller scope, but do not actually render to browser DOM scope.sViewName = sViewName; getStyle(sViewName); if (oOptions) { if (oOptions.constantify) MiData.constantify(scope, oOptions.constantify); if (oOptions.lazilyLoad) fLazyLoad(oOptions.lazilyLoad, scope); if (oOptions.fCallback) oOptions.fCallback(scope, oOptions); } scope.$apply(); }); } // bLazyLoadingDone is needed to trigger $compile because it creates a diff // also, make sure your lazily loaded element has some content. Or again, there will be no diff and no render will happen. // ref: https://stackoverflow.com/questions/38514918/when-lazy-loading-directive-its-never-run?noredirect=1&lq=1 function fLazyLoad(arrsfDirectives, scope)
return { $location: $location, Data: MiData, getStyle: getStyle, getFirstMatch: getFirstMatch, init: init, State: MiState, $window: $window, Overlay: Overlay } }]) // syntactic sugar. Maybe collapse into one service if you want, but this is modular. .service('MI', ['BaseController', function(BaseController){ return BaseController; }]) // don't lazy load these because they're needed on main/initial view .directive('miSearch', ['MI', function(MI){ return { templateUrl: 'looking-for', link: function(scope, element, attrs) { var oChangeOptions = scope.$parent.oLookingForSelectionChange; if (oChangeOptions) { scope.$watch('oLookingForSelection', oChangeOptions.fFunction, oChangeOptions.bDigest); } } } }]) // TODO: this directive needs it's own scope and data .directive('miTypeaheadPrograms', function(){ return { templateUrl: 'typeahead-programs' } }) .directive('miTypeaheadInstallations', function(){ return { templateUrl: 'typeahead-installations', link: function(scope, element, attrs) { // TODO: should MDInstallations be standard under MI.Data? // TODO: should we include a reference or element to return on click as a standard here? scope.fShowInstallationsOverlay = scope.$parent.fHandleInstallationTextClick || function () { scope.arroInstallations = scope.MDInstallations; scope.$overlay = scope.MI.Overlay.showByTemplateName(scope, 'installations-overlay'); } } } }) .directive('miRegionAccordions', function(){ return { restrict: 'E', templateUrl: 'region-accordions', scope: { arrdirectiveregions: '=' }, link: function(scope, element, attrs) { //ref: https://stackoverflow.com/questions/17900201/how-to-access-parent-scope-from-within-a-custom-directive-with-own-scope-in-an scope.fHandleInstallationTextClick = scope.$parent.fHandleInstallationTextClick; // animated open indicator scope.fIndicateOpen = function ($event) { var $panelHeading = $($event.currentTarget).find('.panel-heading'), $openIndicator = $panelHeading.prev(); if (!$openIndicator.length) { // create $openIndicator if it's not there $openIndicator = $('<div class="open-indicator">'); $panelHeading.before($openIndicator); } if ($event.currentTarget.classList.contains('panel-open')) { // ensure indicator is expanded in this case $openIndicator.animate({ width: 40 }, 300); } else { // ensure indicator is collapsed in this case $openIndicator.animate({ width: 0 }, 300); } } } } }) // TODO: delete this block // define other directives for lazy loading later on // ref: https://stackoverflow.com/questions/12538665/how-can-directives-be-lazy-loaded-in-angularjs .config(function ($compileProvider) { myapp._compileProvider = $compileProvider; }) .config(function(cssInjectorProvider){ cssInjectorProvider.setSinglePageMode(true); }) // ref: https://stackoverflow.com/questions/20836374/how-to-catch-angular-ng-include-error // ref: https://stackoverflow.com/questions/19711550/angularjs-how-to-prevent-a-request .config(function ($httpProvider) { $httpProvider.interceptors.push('templateInterceptor'); }) .factory('templateInterceptor', function($q) { return { 'request': function(config) { var canceler = $q.defer(); config.timeout = canceler.promise; if (config.url.slice(-1) === '-') { // this is an ng-include call without valid url. Don't execute call. canceler.resolve(); } return config; } } }) .config(function ($routeProvider) { $routeProvider .when('/', { templateUrl: 'views/main/main.html', controller: 'mainController', resolve: { MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/milinstall/:sInstallationId', { templateUrl: 'views/milinstall/milinstall.html', controller: 'miController' }) .when('/milinstall/:sInstallationId/:sCategoryId', { templateUrl: 'views/common/common.html', controller: 'categoriesController', resolve: { dependencies: function(MI) { MI.Data.sResolvedViewName = 'categories'; return; }, MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) // yes, use the categoriesController for the topic view // if possible, don't even reload the controller on nav from category to topic .when('/milinstall/:sInstallationId/:sCategoryId/:sTopicId', { templateUrl: 'views/common/common.html', controller: 'categoriesController', resolve: { dependencies: function(MI) { MI.Data.sResolvedViewName = 'categories'; return; }, MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/programs', { templateUrl: 'views/main/program-search.html', controller: 'ProgramsController' }) .when('/search', { templateUrl: 'views/common/common.html', controller: 'searchController', resolve: { MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/viewall', { templateUrl: 'views/common/common.html', controller: 'viewAllController', resolve: { MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/planmymove', { headerTemplateUrl: 'views/planmymove/planmymove-header.html', templateUrl: 'views/planmymove/planmymove.html', controller: 'planMyMoveController', resolve: { MDInstallations: function (MI) { return MI.Data.mock('installations'); } } }) .otherwise({ redirectTo: '/' }); }); myapp.factory('prgdata', function () { var savedData = {} function set(data) { savedData = data; } function get() { return savedData; } return { set: set, get: get } }); //dynamically sets the header partial for PMM and MI myapp.run(['$rootScope', function ($rootScope) { $rootScope.$on('$routeChangeSuccess', function (event, current, previous) { $rootScope.headerTemplateUrl = current.$$route.headerTemplateUrl || 'views/header/header.html'; }); }]); myapp.filter('unsafe', function($sce) { return $sce.trustAsHtml; }); //typeahead filter for first character only myapp.filter('selectFirstOnlyFilter', function () { return function (input, searchString) { return input.filter(function (item) { if (item.toLowerCase().search(searchString.toLowerCase()) === 0) { return true; } return false; }) } });
{ arrsfDirectives.forEach(function(sfDirective){ oLazyLoad[sfDirective](); }); scope.bLazyLoadingDone = true; }
identifier_body
app.js
/* * Register angular module with custom name myapp, all other Angular objects will add it to this custom angular module, * Here Other Anulag objects used are Controller, Service, RouteProvider etc. **/ // TODO: standardize module and function descriptions, maybe using jsdoc 'use strict'; var myapp = angular.module('myapp', [ 'angular.css.injector', 'mcfp.Overlay', 'ngRoute', 'ngResource', 'ngAnimate', 'ngSanitize', 'ngDropdowns', 'ui.bootstrap' ]) // Description: This service is a super cache and a data management service. // It is initialized with the data we will need to load the initial view in main.js // Other data can be dynamically loaded, and everything will be cached in the inspectable oMiCache object // It is a $cacheFactory instance, so the keys can be URLs or arbitrary strings. // // use $http.get(sDataKey) to get urls or constants and MiData.put() to add constants // angular is so backwards...factories are functional and services actually invoke Object.create() (like an OOP/Java Factory) .service('MiData', ['$cacheFactory', '$http', function($cacheFactory, $http){ var _oMiData = $cacheFactory('miData'), _context = this; this._constants = { oLookingForOptions: [ {type: 'Military installation'}, {type: 'State resources'}, {type: 'Program or service'} ], arroFilterByDefault: [ {type: 'Select Installation or zip code'}, {type: 'Zip code'}, {type: 'Installation'} ], arroFilterByOptions: [ {type: 'Zip code'}, {type: 'Installation'} ], arroMilesOptions: [ {type: '5 miles'}, {type: '10 miles'}, {type: '25 miles'}, {type: '50 miles'}, {type: '100 miles'}, {type: '250 miles'}, {type: '500 miles'} ], arroBranches: [ { "sName": "Air Force", "bActiveFilter": false }, { "sName": "Army", "bActiveFilter": false }, { "sName": "Navy", "bActiveFilter": false }, { "sName": "Marine Corps", "bActiveFilter": false }, { "sName": "Defense Logistics Agency", "bActiveFilter": false } ], arroConus: [{ sName: 'CONUS', bActiveFilter: false },{ sName: 'OCONUS', bActiveFilter: false }], arroViewBy: [ {type: 'Zip Code'}, {type: 'Installation'} ] }; $http.defaults.cache = _oMiData; // binds a series of uniquely named constants to a controller's scope this.constantify = function(scope, arrsConstants) { arrsConstants.forEach(function(sKeyName) { if (typeof _context._constants[sKeyName] === 'object') { scope[sKeyName] = JSON.parse(JSON.stringify(_context._constants[sKeyName])); } else { scope[sKeyName] = _context._constants[sKeyName]; } }); } this.get = function(sDataKey) { return new Promise(function(resolve, reject){ $http.get(sDataKey).then(function(response){ resolve(response.data); }, function(reason){ resolve({ error: 'An error occurred!' }); }) }); } // a syntactic sugar for mock data this.mock = function(sUriSubstring) { return this.get('/data/get-' + sUriSubstring + '.json'); } this.init = function() { $http.get('/data/get-installations.json'); $http.get('/data/get-program-cards.json'); $http.get('/data/get-programs.json'); $http.get('/data/states.json'); } }]) // ref: https://stackoverflow.com/questions/22408790/angularjs-passing-data-between-pages // description: this service allows passing data between views // TODO: maybe all gets should be byval, eg $scope.arroBranches = JSON.parse(JSON.stringify(MI.Data._constants.arroBranches)); .service('MiState', function(){ var _oState = {}; function set(data) { _oState = data; } function get() { return _oState; } return { set: set, get: get } }) // description: creates a base controller // TODO: a single, generic MI.get() which internally looks at State, Data, $templateCache, $http, then handle error or not found. // rule of thumb: If three views need it, add it here. .service('BaseController', ['$location', 'cssInjector', 'MiData', 'MiState', '$templateCache', '$compile', '$timeout', '$window', 'Overlay', function($location, cssInjector, MiData, MiState, $templateCache, $compile, $timeout, $window, Overlay) { var oContext = this, oLazyLoad = { miCard: function() { myapp._compileProvider.directive('miCard', function() { return { templateUrl: '/views/miCard/miCard.html' } }) } }; function getViewName() { var sResolvedViewName = MiData.sResolvedViewName; if (sResolvedViewName) { MiData.sResolvedViewName = ''; return sResolvedViewName; } return $location.$$path.slice(1).split('/')[0] || 'main'; } function getStyle(sViewName) { cssInjector.add('/views/' + sViewName + '/' + sViewName + '.css'); } // description: a utility method to quickly get a needed item from a list. // finds the first member of arr with a given key-value pair. // either returns a desired key's value or else the whole matched object. // can support up to 2 levels deep using subkey; objects more complex can't use this approach. function getFirstMatch(arr, sKey, sValToMatch, sValToReturn, sSubKey) { var arrFiltered = arr.filter(function(el){ return sSubKey ? el[sKey][sSubKey] === sValToMatch : el[sKey] === sValToMatch; }) || ''; if (arrFiltered && arrFiltered[0]) { return sValToReturn ? arrFiltered[0][sValToReturn] : arrFiltered[0]; } return ''; } function init(scope, oOptions) { var sViewName = getViewName(); scope.MI = this; MiData.init(); MiData.get('/views/' + sViewName + '/' + sViewName + '.html').then(function(sHtml) { $compile($(sHtml))(scope); // virtually add dynamic html to controller scope, but do not actually render to browser DOM scope.sViewName = sViewName; getStyle(sViewName); if (oOptions) { if (oOptions.constantify) MiData.constantify(scope, oOptions.constantify); if (oOptions.lazilyLoad) fLazyLoad(oOptions.lazilyLoad, scope); if (oOptions.fCallback) oOptions.fCallback(scope, oOptions); } scope.$apply(); }); } // bLazyLoadingDone is needed to trigger $compile because it creates a diff // also, make sure your lazily loaded element has some content. Or again, there will be no diff and no render will happen. // ref: https://stackoverflow.com/questions/38514918/when-lazy-loading-directive-its-never-run?noredirect=1&lq=1 function
(arrsfDirectives, scope) { arrsfDirectives.forEach(function(sfDirective){ oLazyLoad[sfDirective](); }); scope.bLazyLoadingDone = true; } return { $location: $location, Data: MiData, getStyle: getStyle, getFirstMatch: getFirstMatch, init: init, State: MiState, $window: $window, Overlay: Overlay } }]) // syntactic sugar. Maybe collapse into one service if you want, but this is modular. .service('MI', ['BaseController', function(BaseController){ return BaseController; }]) // don't lazy load these because they're needed on main/initial view .directive('miSearch', ['MI', function(MI){ return { templateUrl: 'looking-for', link: function(scope, element, attrs) { var oChangeOptions = scope.$parent.oLookingForSelectionChange; if (oChangeOptions) { scope.$watch('oLookingForSelection', oChangeOptions.fFunction, oChangeOptions.bDigest); } } } }]) // TODO: this directive needs it's own scope and data .directive('miTypeaheadPrograms', function(){ return { templateUrl: 'typeahead-programs' } }) .directive('miTypeaheadInstallations', function(){ return { templateUrl: 'typeahead-installations', link: function(scope, element, attrs) { // TODO: should MDInstallations be standard under MI.Data? // TODO: should we include a reference or element to return on click as a standard here? scope.fShowInstallationsOverlay = scope.$parent.fHandleInstallationTextClick || function () { scope.arroInstallations = scope.MDInstallations; scope.$overlay = scope.MI.Overlay.showByTemplateName(scope, 'installations-overlay'); } } } }) .directive('miRegionAccordions', function(){ return { restrict: 'E', templateUrl: 'region-accordions', scope: { arrdirectiveregions: '=' }, link: function(scope, element, attrs) { //ref: https://stackoverflow.com/questions/17900201/how-to-access-parent-scope-from-within-a-custom-directive-with-own-scope-in-an scope.fHandleInstallationTextClick = scope.$parent.fHandleInstallationTextClick; // animated open indicator scope.fIndicateOpen = function ($event) { var $panelHeading = $($event.currentTarget).find('.panel-heading'), $openIndicator = $panelHeading.prev(); if (!$openIndicator.length) { // create $openIndicator if it's not there $openIndicator = $('<div class="open-indicator">'); $panelHeading.before($openIndicator); } if ($event.currentTarget.classList.contains('panel-open')) { // ensure indicator is expanded in this case $openIndicator.animate({ width: 40 }, 300); } else { // ensure indicator is collapsed in this case $openIndicator.animate({ width: 0 }, 300); } } } } }) // TODO: delete this block // define other directives for lazy loading later on // ref: https://stackoverflow.com/questions/12538665/how-can-directives-be-lazy-loaded-in-angularjs .config(function ($compileProvider) { myapp._compileProvider = $compileProvider; }) .config(function(cssInjectorProvider){ cssInjectorProvider.setSinglePageMode(true); }) // ref: https://stackoverflow.com/questions/20836374/how-to-catch-angular-ng-include-error // ref: https://stackoverflow.com/questions/19711550/angularjs-how-to-prevent-a-request .config(function ($httpProvider) { $httpProvider.interceptors.push('templateInterceptor'); }) .factory('templateInterceptor', function($q) { return { 'request': function(config) { var canceler = $q.defer(); config.timeout = canceler.promise; if (config.url.slice(-1) === '-') { // this is an ng-include call without valid url. Don't execute call. canceler.resolve(); } return config; } } }) .config(function ($routeProvider) { $routeProvider .when('/', { templateUrl: 'views/main/main.html', controller: 'mainController', resolve: { MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/milinstall/:sInstallationId', { templateUrl: 'views/milinstall/milinstall.html', controller: 'miController' }) .when('/milinstall/:sInstallationId/:sCategoryId', { templateUrl: 'views/common/common.html', controller: 'categoriesController', resolve: { dependencies: function(MI) { MI.Data.sResolvedViewName = 'categories'; return; }, MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) // yes, use the categoriesController for the topic view // if possible, don't even reload the controller on nav from category to topic .when('/milinstall/:sInstallationId/:sCategoryId/:sTopicId', { templateUrl: 'views/common/common.html', controller: 'categoriesController', resolve: { dependencies: function(MI) { MI.Data.sResolvedViewName = 'categories'; return; }, MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/programs', { templateUrl: 'views/main/program-search.html', controller: 'ProgramsController' }) .when('/search', { templateUrl: 'views/common/common.html', controller: 'searchController', resolve: { MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/viewall', { templateUrl: 'views/common/common.html', controller: 'viewAllController', resolve: { MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/planmymove', { headerTemplateUrl: 'views/planmymove/planmymove-header.html', templateUrl: 'views/planmymove/planmymove.html', controller: 'planMyMoveController', resolve: { MDInstallations: function (MI) { return MI.Data.mock('installations'); } } }) .otherwise({ redirectTo: '/' }); }); myapp.factory('prgdata', function () { var savedData = {} function set(data) { savedData = data; } function get() { return savedData; } return { set: set, get: get } }); //dynamically sets the header partial for PMM and MI myapp.run(['$rootScope', function ($rootScope) { $rootScope.$on('$routeChangeSuccess', function (event, current, previous) { $rootScope.headerTemplateUrl = current.$$route.headerTemplateUrl || 'views/header/header.html'; }); }]); myapp.filter('unsafe', function($sce) { return $sce.trustAsHtml; }); //typeahead filter for first character only myapp.filter('selectFirstOnlyFilter', function () { return function (input, searchString) { return input.filter(function (item) { if (item.toLowerCase().search(searchString.toLowerCase()) === 0) { return true; } return false; }) } });
fLazyLoad
identifier_name
app.js
/* * Register angular module with custom name myapp, all other Angular objects will add it to this custom angular module, * Here Other Anulag objects used are Controller, Service, RouteProvider etc. **/ // TODO: standardize module and function descriptions, maybe using jsdoc 'use strict'; var myapp = angular.module('myapp', [ 'angular.css.injector', 'mcfp.Overlay', 'ngRoute', 'ngResource', 'ngAnimate', 'ngSanitize', 'ngDropdowns', 'ui.bootstrap' ]) // Description: This service is a super cache and a data management service. // It is initialized with the data we will need to load the initial view in main.js // Other data can be dynamically loaded, and everything will be cached in the inspectable oMiCache object // It is a $cacheFactory instance, so the keys can be URLs or arbitrary strings. // // use $http.get(sDataKey) to get urls or constants and MiData.put() to add constants // angular is so backwards...factories are functional and services actually invoke Object.create() (like an OOP/Java Factory) .service('MiData', ['$cacheFactory', '$http', function($cacheFactory, $http){ var _oMiData = $cacheFactory('miData'), _context = this; this._constants = { oLookingForOptions: [ {type: 'Military installation'}, {type: 'State resources'}, {type: 'Program or service'} ], arroFilterByDefault: [ {type: 'Select Installation or zip code'}, {type: 'Zip code'}, {type: 'Installation'} ], arroFilterByOptions: [ {type: 'Zip code'}, {type: 'Installation'} ], arroMilesOptions: [ {type: '5 miles'}, {type: '10 miles'}, {type: '25 miles'}, {type: '50 miles'}, {type: '100 miles'}, {type: '250 miles'}, {type: '500 miles'} ], arroBranches: [ { "sName": "Air Force", "bActiveFilter": false }, { "sName": "Army", "bActiveFilter": false }, { "sName": "Navy", "bActiveFilter": false }, { "sName": "Marine Corps", "bActiveFilter": false }, { "sName": "Defense Logistics Agency", "bActiveFilter": false } ], arroConus: [{ sName: 'CONUS', bActiveFilter: false },{ sName: 'OCONUS', bActiveFilter: false }], arroViewBy: [ {type: 'Zip Code'}, {type: 'Installation'} ] }; $http.defaults.cache = _oMiData; // binds a series of uniquely named constants to a controller's scope this.constantify = function(scope, arrsConstants) { arrsConstants.forEach(function(sKeyName) { if (typeof _context._constants[sKeyName] === 'object') { scope[sKeyName] = JSON.parse(JSON.stringify(_context._constants[sKeyName])); } else { scope[sKeyName] = _context._constants[sKeyName]; } }); } this.get = function(sDataKey) { return new Promise(function(resolve, reject){ $http.get(sDataKey).then(function(response){ resolve(response.data); }, function(reason){ resolve({ error: 'An error occurred!' }); }) }); } // a syntactic sugar for mock data this.mock = function(sUriSubstring) { return this.get('/data/get-' + sUriSubstring + '.json'); } this.init = function() { $http.get('/data/get-installations.json'); $http.get('/data/get-program-cards.json'); $http.get('/data/get-programs.json'); $http.get('/data/states.json'); } }]) // ref: https://stackoverflow.com/questions/22408790/angularjs-passing-data-between-pages // description: this service allows passing data between views // TODO: maybe all gets should be byval, eg $scope.arroBranches = JSON.parse(JSON.stringify(MI.Data._constants.arroBranches)); .service('MiState', function(){ var _oState = {}; function set(data) { _oState = data; } function get() { return _oState; } return { set: set, get: get } }) // description: creates a base controller // TODO: a single, generic MI.get() which internally looks at State, Data, $templateCache, $http, then handle error or not found. // rule of thumb: If three views need it, add it here. .service('BaseController', ['$location', 'cssInjector', 'MiData', 'MiState', '$templateCache', '$compile', '$timeout', '$window', 'Overlay', function($location, cssInjector, MiData, MiState, $templateCache, $compile, $timeout, $window, Overlay) { var oContext = this, oLazyLoad = { miCard: function() { myapp._compileProvider.directive('miCard', function() { return { templateUrl: '/views/miCard/miCard.html' } }) } }; function getViewName() { var sResolvedViewName = MiData.sResolvedViewName; if (sResolvedViewName) { MiData.sResolvedViewName = ''; return sResolvedViewName; } return $location.$$path.slice(1).split('/')[0] || 'main'; } function getStyle(sViewName) { cssInjector.add('/views/' + sViewName + '/' + sViewName + '.css'); } // description: a utility method to quickly get a needed item from a list. // finds the first member of arr with a given key-value pair. // either returns a desired key's value or else the whole matched object. // can support up to 2 levels deep using subkey; objects more complex can't use this approach. function getFirstMatch(arr, sKey, sValToMatch, sValToReturn, sSubKey) { var arrFiltered = arr.filter(function(el){ return sSubKey ? el[sKey][sSubKey] === sValToMatch : el[sKey] === sValToMatch; }) || ''; if (arrFiltered && arrFiltered[0]) { return sValToReturn ? arrFiltered[0][sValToReturn] : arrFiltered[0]; } return ''; } function init(scope, oOptions) { var sViewName = getViewName(); scope.MI = this; MiData.init(); MiData.get('/views/' + sViewName + '/' + sViewName + '.html').then(function(sHtml) { $compile($(sHtml))(scope); // virtually add dynamic html to controller scope, but do not actually render to browser DOM scope.sViewName = sViewName; getStyle(sViewName); if (oOptions) { if (oOptions.constantify) MiData.constantify(scope, oOptions.constantify); if (oOptions.lazilyLoad) fLazyLoad(oOptions.lazilyLoad, scope); if (oOptions.fCallback) oOptions.fCallback(scope, oOptions); } scope.$apply(); }); } // bLazyLoadingDone is needed to trigger $compile because it creates a diff // also, make sure your lazily loaded element has some content. Or again, there will be no diff and no render will happen. // ref: https://stackoverflow.com/questions/38514918/when-lazy-loading-directive-its-never-run?noredirect=1&lq=1 function fLazyLoad(arrsfDirectives, scope) { arrsfDirectives.forEach(function(sfDirective){ oLazyLoad[sfDirective](); }); scope.bLazyLoadingDone = true; } return { $location: $location, Data: MiData, getStyle: getStyle, getFirstMatch: getFirstMatch, init: init, State: MiState, $window: $window, Overlay: Overlay } }]) // syntactic sugar. Maybe collapse into one service if you want, but this is modular. .service('MI', ['BaseController', function(BaseController){ return BaseController; }]) // don't lazy load these because they're needed on main/initial view .directive('miSearch', ['MI', function(MI){ return { templateUrl: 'looking-for', link: function(scope, element, attrs) { var oChangeOptions = scope.$parent.oLookingForSelectionChange; if (oChangeOptions) { scope.$watch('oLookingForSelection', oChangeOptions.fFunction, oChangeOptions.bDigest); } } } }]) // TODO: this directive needs it's own scope and data .directive('miTypeaheadPrograms', function(){ return { templateUrl: 'typeahead-programs' } }) .directive('miTypeaheadInstallations', function(){ return { templateUrl: 'typeahead-installations', link: function(scope, element, attrs) { // TODO: should MDInstallations be standard under MI.Data? // TODO: should we include a reference or element to return on click as a standard here? scope.fShowInstallationsOverlay = scope.$parent.fHandleInstallationTextClick || function () { scope.arroInstallations = scope.MDInstallations; scope.$overlay = scope.MI.Overlay.showByTemplateName(scope, 'installations-overlay'); } } } }) .directive('miRegionAccordions', function(){ return { restrict: 'E', templateUrl: 'region-accordions', scope: { arrdirectiveregions: '=' }, link: function(scope, element, attrs) { //ref: https://stackoverflow.com/questions/17900201/how-to-access-parent-scope-from-within-a-custom-directive-with-own-scope-in-an scope.fHandleInstallationTextClick = scope.$parent.fHandleInstallationTextClick; // animated open indicator scope.fIndicateOpen = function ($event) { var $panelHeading = $($event.currentTarget).find('.panel-heading'), $openIndicator = $panelHeading.prev(); if (!$openIndicator.length)
if ($event.currentTarget.classList.contains('panel-open')) { // ensure indicator is expanded in this case $openIndicator.animate({ width: 40 }, 300); } else { // ensure indicator is collapsed in this case $openIndicator.animate({ width: 0 }, 300); } } } } }) // TODO: delete this block // define other directives for lazy loading later on // ref: https://stackoverflow.com/questions/12538665/how-can-directives-be-lazy-loaded-in-angularjs .config(function ($compileProvider) { myapp._compileProvider = $compileProvider; }) .config(function(cssInjectorProvider){ cssInjectorProvider.setSinglePageMode(true); }) // ref: https://stackoverflow.com/questions/20836374/how-to-catch-angular-ng-include-error // ref: https://stackoverflow.com/questions/19711550/angularjs-how-to-prevent-a-request .config(function ($httpProvider) { $httpProvider.interceptors.push('templateInterceptor'); }) .factory('templateInterceptor', function($q) { return { 'request': function(config) { var canceler = $q.defer(); config.timeout = canceler.promise; if (config.url.slice(-1) === '-') { // this is an ng-include call without valid url. Don't execute call. canceler.resolve(); } return config; } } }) .config(function ($routeProvider) { $routeProvider .when('/', { templateUrl: 'views/main/main.html', controller: 'mainController', resolve: { MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/milinstall/:sInstallationId', { templateUrl: 'views/milinstall/milinstall.html', controller: 'miController' }) .when('/milinstall/:sInstallationId/:sCategoryId', { templateUrl: 'views/common/common.html', controller: 'categoriesController', resolve: { dependencies: function(MI) { MI.Data.sResolvedViewName = 'categories'; return; }, MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) // yes, use the categoriesController for the topic view // if possible, don't even reload the controller on nav from category to topic .when('/milinstall/:sInstallationId/:sCategoryId/:sTopicId', { templateUrl: 'views/common/common.html', controller: 'categoriesController', resolve: { dependencies: function(MI) { MI.Data.sResolvedViewName = 'categories'; return; }, MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/programs', { templateUrl: 'views/main/program-search.html', controller: 'ProgramsController' }) .when('/search', { templateUrl: 'views/common/common.html', controller: 'searchController', resolve: { MDPrograms: function(MI) { return MI.Data.mock('programs'); }, MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/viewall', { templateUrl: 'views/common/common.html', controller: 'viewAllController', resolve: { MDInstallations: function(MI) { return MI.Data.mock('installations'); } } }) .when('/planmymove', { headerTemplateUrl: 'views/planmymove/planmymove-header.html', templateUrl: 'views/planmymove/planmymove.html', controller: 'planMyMoveController', resolve: { MDInstallations: function (MI) { return MI.Data.mock('installations'); } } }) .otherwise({ redirectTo: '/' }); }); myapp.factory('prgdata', function () { var savedData = {} function set(data) { savedData = data; } function get() { return savedData; } return { set: set, get: get } }); //dynamically sets the header partial for PMM and MI myapp.run(['$rootScope', function ($rootScope) { $rootScope.$on('$routeChangeSuccess', function (event, current, previous) { $rootScope.headerTemplateUrl = current.$$route.headerTemplateUrl || 'views/header/header.html'; }); }]); myapp.filter('unsafe', function($sce) { return $sce.trustAsHtml; }); //typeahead filter for first character only myapp.filter('selectFirstOnlyFilter', function () { return function (input, searchString) { return input.filter(function (item) { if (item.toLowerCase().search(searchString.toLowerCase()) === 0) { return true; } return false; }) } });
{ // create $openIndicator if it's not there $openIndicator = $('<div class="open-indicator">'); $panelHeading.before($openIndicator); }
conditional_block
test.rs
use super::{cmd, Expression}; use std; use std::collections::HashMap; use std::env; use std::env::consts::EXE_EXTENSION; use std::ffi::OsString; use std::fs::File; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::Command; use std::str; use std::sync::{Arc, Once}; // Include a copy of the sh function, because we have a lot of old tests that // use it, and it's a lot easier than managing a circular dependency between // duct and duct_sh. pub fn sh(command: &'static str) -> Expression { let argv = shell_command_argv(command.into()); cmd(&argv[0], &argv[1..]) } #[cfg(unix)] fn shell_command_argv(command: OsString) -> Vec<OsString> { vec!["/bin/sh".into(), "-c".into(), command] } #[cfg(windows)] fn shell_command_argv(command: OsString) -> Vec<OsString> { let comspec = std::env::var_os("COMSPEC").unwrap_or_else(|| "cmd.exe".into()); vec![comspec, "/C".into(), command] } pub fn path_to_exe(name: &str) -> PathBuf { // This project defines some associated binaries for testing, and we shell out to them in // these tests. `cargo test` doesn't automatically build associated binaries, so this // function takes care of building them explicitly. static CARGO_BUILD_ONCE: Once = Once::new(); CARGO_BUILD_ONCE.call_once(|| { let build_status = Command::new("cargo") .arg("build") .arg("--quiet") .status() .unwrap(); assert!( build_status.success(), "Cargo failed to build associated binaries." ); }); Path::new("target") .join("debug") .join(name) .with_extension(EXE_EXTENSION) } pub fn true_cmd() -> Expression { cmd!(path_to_exe("status"), "0") } fn false_cmd() -> Expression { cmd!(path_to_exe("status"), "1") } #[test] fn test_cmd() { let output = cmd!(path_to_exe("echo"), "hi").read().unwrap(); assert_eq!("hi", output); } #[test] fn
() { // Windows compatible. let output = sh("echo hi").read().unwrap(); assert_eq!("hi", output); } #[test] fn test_start() { let handle1 = cmd!(path_to_exe("echo"), "hi") .stdout_capture() .start() .unwrap(); let handle2 = cmd!(path_to_exe("echo"), "lo") .stdout_capture() .start() .unwrap(); let output1 = handle1.wait().unwrap(); let output2 = handle2.wait().unwrap(); assert_eq!("hi", str::from_utf8(&output1.stdout).unwrap().trim()); assert_eq!("lo", str::from_utf8(&output2.stdout).unwrap().trim()); } #[test] fn test_error() { let result = false_cmd().run(); if let Err(err) = result { assert_eq!(err.kind(), io::ErrorKind::Other); } else { panic!("Expected a status error."); } } #[test] fn test_unchecked() { let unchecked_false = false_cmd().unchecked(); // Unchecked errors shouldn't cause `run` to return an error. let output = unchecked_false .pipe(cmd!(path_to_exe("echo"), "waa")) .stdout_capture() .run() .unwrap(); // The value of the exit code is preserved. assert_eq!(1, output.status.code().unwrap()); assert_eq!("waa", String::from_utf8_lossy(&output.stdout).trim()); } #[test] fn test_unchecked_in_pipe() { let zero = cmd!(path_to_exe("status"), "0"); let one = cmd!(path_to_exe("status"), "1"); let two = cmd!(path_to_exe("status"), "2"); // Right takes precedence over left. let output = one.pipe(two.clone()).unchecked().run().unwrap(); assert_eq!(2, output.status.code().unwrap()); // Except that checked on the left takes precedence over unchecked on // the right. let output = one.pipe(two.unchecked()).unchecked().run().unwrap(); assert_eq!(1, output.status.code().unwrap()); // Right takes precedence over the left again if they're both unchecked. let output = one .unchecked() .pipe(two.unchecked()) .unchecked() .run() .unwrap(); assert_eq!(2, output.status.code().unwrap()); // Except that if the right is a success, the left takes precedence. let output = one .unchecked() .pipe(zero.unchecked()) .unchecked() .run() .unwrap(); assert_eq!(1, output.status.code().unwrap()); // Even if the right is checked. let output = one.unchecked().pipe(zero).unchecked().run().unwrap(); assert_eq!(1, output.status.code().unwrap()); } #[test] fn test_pipe() { let output = sh("echo xxx") .pipe(cmd!(path_to_exe("x_to_y"))) .read() .unwrap(); assert_eq!("yyy", output); // Check that errors on either side are propagated. let result = true_cmd().pipe(false_cmd()).run(); assert!(result.is_err()); let result = false_cmd().pipe(true_cmd()).run(); assert!(result.is_err()); } #[test] fn test_pipe_with_kill() { // Make sure both sides get killed. let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Note that we don't use unchecked() here. This tests that kill suppresses // exit status errors. let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap(); handle.kill().unwrap(); // But calling wait again should be an error, because of the status. handle.wait().unwrap_err(); } #[test] fn test_pipe_start() { let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!")); let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Errors starting the left side of a pipe are returned immediately, and // the right side is never started. nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err(); // Errors starting the right side are also returned immediately, and the // the left side is killed first. sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err(); } #[test] fn test_multiple_threads() { // Wait on the sleep command in a background thread, while the main thread // kills it. let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); let handle = Arc::new(sleep_cmd.unchecked().start().unwrap()); let arc_clone = handle.clone(); let wait_thread = std::thread::spawn(move || { arc_clone.wait().unwrap(); }); handle.kill().unwrap(); wait_thread.join().unwrap(); } #[test] fn test_nonblocking_waits() { let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Make sure pipelines handle try_wait correctly. let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap(); // Make sure try_wait doesn't block on it. assert!(handle.try_wait().unwrap().is_none()); handle.kill().unwrap(); } #[test] fn test_input() { let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx"); let output = expr.read().unwrap(); assert_eq!("yyy", output); } #[test] fn test_stderr() { let (mut reader, writer) = ::os_pipe::pipe().unwrap(); sh("echo hi>&2").stderr_file(writer).run().unwrap(); let mut s = String::new(); reader.read_to_string(&mut s).unwrap(); assert_eq!(s.trim(), "hi"); } #[test] fn test_null() { let expr = cmd!(path_to_exe("cat")) .stdin_null() .stdout_null() .stderr_null(); let output = expr.read().unwrap(); assert_eq!("", output); } #[test] fn test_path() { let dir = tempfile::tempdir().unwrap(); let input_file = dir.path().join("input_file"); let output_file = dir.path().join("output_file"); File::create(&input_file) .unwrap() .write_all(b"xxx") .unwrap(); let expr = cmd!(path_to_exe("x_to_y")) .stdin_path(&input_file) .stdout_path(&output_file); let output = expr.read().unwrap(); assert_eq!("", output); let mut file_output = String::new(); File::open(&output_file) .unwrap() .read_to_string(&mut file_output) .unwrap(); assert_eq!("yyy", file_output); } #[test] fn test_swapping() { let output = sh("echo hi") .stdout_to_stderr() .stderr_capture() .run() .unwrap(); let stderr = str::from_utf8(&output.stderr).unwrap().trim(); assert_eq!("hi", stderr); // Windows compatible. (Requires no space before the ">".) let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap(); assert_eq!("hi", output); } #[test] fn test_file() { let dir = tempfile::tempdir().unwrap(); let file = dir.path().join("file"); File::create(&file).unwrap().write_all(b"example").unwrap(); let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap()); let output = expr.read().unwrap(); assert_eq!(output, "example"); } #[test] fn test_ergonomics() { let mystr = "owned string".to_owned(); let mypathbuf = Path::new("a/b/c").to_owned(); let myvec = vec![1, 2, 3]; // These are nonsense expressions. We just want to make sure they compile. let _ = sh("true") .stdin_path(&*mystr) .stdin_bytes(&*myvec) .stdout_path(&*mypathbuf); let _ = sh("true") .stdin_path(mystr) .stdin_bytes(myvec) .stdout_path(mypathbuf); // Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input(). // TODO: Is it worth having these impls for &Vec in other cases? // let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf); } #[test] fn test_capture_both() { // Windows compatible, no space before ">", and we trim newlines at the end to avoid // dealing with the different kinds. let output = sh("echo hi && echo lo>&2") .stdout_capture() .stderr_capture() .run() .unwrap(); assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim()); assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim()); } #[test] fn test_dir() { // This test checks the interaction of `dir` and relative exe paths. // Make sure that's actually what we're testing. let pwd_path = path_to_exe("pwd"); assert!(pwd_path.is_relative()); let pwd = cmd!(pwd_path); // First assert that ordinary commands happen in the parent's dir. let pwd_output = pwd.read().unwrap(); let pwd_path = Path::new(&pwd_output); assert_eq!(pwd_path, env::current_dir().unwrap()); // Now create a temp dir and make sure we can set dir to it. This // also tests the interaction of `dir` and relative exe paths. let dir = tempfile::tempdir().unwrap(); let pwd_output = pwd.dir(dir.path()).read().unwrap(); let pwd_path = Path::new(&pwd_output); // pwd_path isn't totally canonical on Windows, because it // doesn't have a prefix. Thus we have to canonicalize both // sides. (This also handles symlinks in TMP_DIR.) assert_eq!( pwd_path.canonicalize().unwrap(), dir.path().canonicalize().unwrap() ); } #[test] fn test_env() { let output = cmd!(path_to_exe("print_env"), "foo") .env("foo", "bar") .read() .unwrap(); assert_eq!("bar", output); } #[test] fn test_full_env() { // Note that it's important that no other tests use this variable name, // because the test runner is multithreaded. let var_name = "TEST_FULL_ENV"; // Capture the parent env, and make sure it does *not* contain our variable. let clean_env: HashMap<String, String> = env::vars().collect(); assert!( !clean_env.contains_key(var_name), "why is this variable set?" ); // Run a child process with that map passed to full_env(). It should be guaranteed not to // see our variable, regardless of any outer env() calls or changes in the parent. let clean_child = cmd!(path_to_exe("print_env"), var_name).full_env(clean_env); // Dirty the parent env. Should be suppressed. env::set_var(var_name, "junk1"); // And make an outer env() call. Should also be suppressed. let dirty_child = clean_child.env(var_name, "junk2"); // Check that neither of those have any effect. let output = dirty_child.read().unwrap(); assert_eq!("", output); } #[test] fn test_env_remove() { // Set an environment variable in the parent. Note that it's important that // no other tests use this variable name, because the test runner is // multithreaded. let var_name = "TEST_ENV_REMOVE"; env::set_var(var_name, "junk2"); // Run a command that observes the variable. let output1 = cmd!(path_to_exe("print_env"), var_name).read().unwrap(); assert_eq!("junk2", output1); // Run the same command with that variable removed. let output2 = cmd!(path_to_exe("print_env"), var_name) .env_remove(var_name) .read() .unwrap(); assert_eq!("", output2); } #[test] fn test_env_remove_case_sensitivity() { // Env var deletion is particularly sensitive to the differences in // case-sensitivity between Unix and Windows. The semantics of env_remove // in duct must *match the platform*. // Set an environment variable in the parent. Note that it's important that // no other tests use this variable name, because the test runner is // multithreaded. let var_name = "TEST_ENV_REMOVE_CASE_SENSITIVITY"; env::set_var(var_name, "abc123"); // Run a command that tries to clear the same variable, but in lowercase. let output1 = cmd!(path_to_exe("print_env"), var_name) .env_remove(var_name.to_lowercase()) .read() .unwrap(); // Now try to clear that variable from the parent environment, again using // lowercase, and run the same command without `env_remove`. env::remove_var(var_name.to_lowercase()); let output2 = cmd!(path_to_exe("print_env"), var_name).read().unwrap(); // On Unix, env vars are case sensitive, and we don't expect either removal // to have any effect. On Windows, they're insensitive, and we expect both // removals to work. The key thing is that both approaches to removal have // the *same effect*. assert_eq!(output1, output2, "failed to match platform behavior!!!"); // Go ahead and assert the exact expected output, just in case. If these // assertions ever break, it might be this test's fault and not the code's. if cfg!(windows) { assert_eq!(output1, ""); } else { assert_eq!(output1, "abc123"); } } #[test] fn test_broken_pipe() { // If the input writing thread fills up its pipe buffer, writing will block. If the process // on the other end of the pipe exits while writer is waiting, the write will return an // error. We need to swallow that error, rather than returning it. let myvec = vec![0; 1_000_000]; true_cmd().stdin_bytes(myvec).run().unwrap(); } #[test] fn test_silly() { // A silly test, purely for coverage. crate::IoValue::Null.try_clone().unwrap(); } #[test] fn test_path_sanitization() { // We don't do any chdir'ing in this process, because the tests runner is multithreaded, // and we don't want to screw up anyone else's relative paths. Instead, we shell out to a // small test process that does that for us. cmd!(path_to_exe("exe_in_dir"), path_to_exe("status"), "0") .run() .unwrap(); } #[test] fn test_before_spawn_hook() { let (reader, mut writer) = os_pipe::pipe().unwrap(); let expr = cmd!(path_to_exe("cat")).before_spawn(move |cmd| { let reader_clone = reader.try_clone()?; cmd.stdin(reader_clone); Ok(()) }); writer.write_all(b"foobar").unwrap(); drop(writer); let output = expr.read().unwrap(); assert_eq!("foobar", output); } #[test] fn test_trailing_comma() { let output = cmd!(path_to_exe("echo"), "trailing",).read().unwrap(); assert_eq!("trailing", output); } #[test] fn test_no_argument() { let output = cmd!(path_to_exe("echo")).read().unwrap(); assert_eq!("", output); } #[test] fn test_dropping_reader() { // Use an explicit stderr pipe to test the ReaderHandle's drop behavior. let (mut stderr_reader, stderr_writer) = os_pipe::pipe().unwrap(); let mut reader_handle = cmd!(path_to_exe("sleep"), "1000000") .stdout_file(stderr_writer) .reader() .unwrap(); // A zero-length read doesn't block. let n = reader_handle.read(&mut []).unwrap(); assert_eq!(n, 0); // Try-wait returns None. let output = reader_handle.try_wait().unwrap(); assert!(output.is_none()); // Now we drop the reader. This kills the child. drop(reader_handle); // Now that the child is killed, reading the stderr pipe will not block. // (Note that our copy was closed when the temporary Expression above // dropped.) let mut stderr = Vec::new(); let n = stderr_reader.read_to_end(&mut stderr).unwrap(); assert_eq!(n, 0); } #[test] fn test_kill_with_grandchild() -> io::Result<()> { // We're going to start a child process, and that child is going to start a // grandchild. The grandchild is going to sleep forever (1 day). We'll read // some output from the child to make sure it's done starting the // grandchild, and then we'll kill the child. Now, the grandchild will not // be killed, and it will still hold a write handle to the stdout pipe. So // this tests that the wait done by kill only waits on the child to exit, // and does not wait on IO to finish. // // This test leaks the grandchild process. I'm sorry. // Capturing stderr means an IO thread is spawned, even though we're using // a ReaderHandle to read stdout. What we're testing here is that kill() // doesn't wait on that IO thread. let mut reader = cmd!(path_to_exe("child_grandchild")) .stderr_capture() .reader()?; // Read "started" from the child to make sure we don't kill it before it // starts the grandchild. let mut started_read = [0; 7]; reader.read_exact(&mut started_read)?; assert_eq!(&started_read, b"started"); // Ok, this had better not block! reader.kill() } #[test] fn test_debug_format() { let e = cmd!("foo", "bar", "baz").pipe(cmd!("bing", "bong")); assert_eq!( format!("{:?}", e), r#"Pipe(Cmd(["foo", "bar", "baz"]), Cmd(["bing", "bong"]))"#, ); } #[test] fn test_reader_try_wait() -> io::Result<()> { // Create a ReaderHandle for a cat process. Give cat 1 MB of data to echo // back to us, so that it will block on its stdout pipe until we start // reading. let bytes = vec![42; 1_000_000]; let mut cat_reader = cmd!(path_to_exe("cat")) .stdin_bytes(bytes.clone()) .reader()?; assert!(cat_reader.try_wait()?.is_none()); let mut output = Vec::new(); cat_reader.read_to_end(&mut output)?; assert_eq!(output, bytes); let output = cat_reader.try_wait()?.expect("is some"); assert!(output.status.success()); assert!(output.stdout.is_empty()); assert!(output.stderr.is_empty()); Ok(()) } #[test] fn test_pids() -> io::Result<()> { let handle = true_cmd().start()?; let pids = handle.pids(); assert_eq!(pids.len(), 1); handle.wait()?; let reader = true_cmd().reader()?; let pids = reader.pids(); assert_eq!(pids.len(), 1); std::io::copy(&mut &reader, &mut std::io::sink())?; let handle = true_cmd() .pipe(true_cmd().stdout_null().pipe(true_cmd())) .start()?; let pids = handle.pids(); assert_eq!(pids.len(), 3); handle.wait()?; let reader = true_cmd() .pipe(true_cmd().stdout_null().pipe(true_cmd())) .reader()?; let pids = reader.pids(); assert_eq!(pids.len(), 3); std::io::copy(&mut &reader, &mut std::io::sink())?; Ok(()) }
test_sh
identifier_name
test.rs
use super::{cmd, Expression}; use std; use std::collections::HashMap; use std::env; use std::env::consts::EXE_EXTENSION; use std::ffi::OsString; use std::fs::File; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::Command; use std::str; use std::sync::{Arc, Once}; // Include a copy of the sh function, because we have a lot of old tests that // use it, and it's a lot easier than managing a circular dependency between // duct and duct_sh. pub fn sh(command: &'static str) -> Expression { let argv = shell_command_argv(command.into()); cmd(&argv[0], &argv[1..]) } #[cfg(unix)] fn shell_command_argv(command: OsString) -> Vec<OsString> { vec!["/bin/sh".into(), "-c".into(), command] } #[cfg(windows)] fn shell_command_argv(command: OsString) -> Vec<OsString> { let comspec = std::env::var_os("COMSPEC").unwrap_or_else(|| "cmd.exe".into()); vec![comspec, "/C".into(), command] } pub fn path_to_exe(name: &str) -> PathBuf { // This project defines some associated binaries for testing, and we shell out to them in // these tests. `cargo test` doesn't automatically build associated binaries, so this // function takes care of building them explicitly. static CARGO_BUILD_ONCE: Once = Once::new(); CARGO_BUILD_ONCE.call_once(|| { let build_status = Command::new("cargo") .arg("build") .arg("--quiet") .status() .unwrap(); assert!( build_status.success(), "Cargo failed to build associated binaries." ); }); Path::new("target") .join("debug") .join(name) .with_extension(EXE_EXTENSION) } pub fn true_cmd() -> Expression { cmd!(path_to_exe("status"), "0") } fn false_cmd() -> Expression { cmd!(path_to_exe("status"), "1") } #[test] fn test_cmd() { let output = cmd!(path_to_exe("echo"), "hi").read().unwrap(); assert_eq!("hi", output); } #[test] fn test_sh() { // Windows compatible. let output = sh("echo hi").read().unwrap(); assert_eq!("hi", output); } #[test] fn test_start() { let handle1 = cmd!(path_to_exe("echo"), "hi") .stdout_capture() .start() .unwrap(); let handle2 = cmd!(path_to_exe("echo"), "lo") .stdout_capture() .start() .unwrap(); let output1 = handle1.wait().unwrap(); let output2 = handle2.wait().unwrap(); assert_eq!("hi", str::from_utf8(&output1.stdout).unwrap().trim()); assert_eq!("lo", str::from_utf8(&output2.stdout).unwrap().trim()); } #[test] fn test_error() { let result = false_cmd().run(); if let Err(err) = result { assert_eq!(err.kind(), io::ErrorKind::Other); } else { panic!("Expected a status error."); } } #[test] fn test_unchecked() { let unchecked_false = false_cmd().unchecked(); // Unchecked errors shouldn't cause `run` to return an error. let output = unchecked_false .pipe(cmd!(path_to_exe("echo"), "waa")) .stdout_capture() .run() .unwrap(); // The value of the exit code is preserved. assert_eq!(1, output.status.code().unwrap()); assert_eq!("waa", String::from_utf8_lossy(&output.stdout).trim()); } #[test] fn test_unchecked_in_pipe() { let zero = cmd!(path_to_exe("status"), "0"); let one = cmd!(path_to_exe("status"), "1"); let two = cmd!(path_to_exe("status"), "2"); // Right takes precedence over left. let output = one.pipe(two.clone()).unchecked().run().unwrap(); assert_eq!(2, output.status.code().unwrap()); // Except that checked on the left takes precedence over unchecked on // the right. let output = one.pipe(two.unchecked()).unchecked().run().unwrap(); assert_eq!(1, output.status.code().unwrap()); // Right takes precedence over the left again if they're both unchecked. let output = one .unchecked() .pipe(two.unchecked()) .unchecked() .run() .unwrap(); assert_eq!(2, output.status.code().unwrap()); // Except that if the right is a success, the left takes precedence. let output = one .unchecked() .pipe(zero.unchecked()) .unchecked() .run() .unwrap(); assert_eq!(1, output.status.code().unwrap()); // Even if the right is checked. let output = one.unchecked().pipe(zero).unchecked().run().unwrap(); assert_eq!(1, output.status.code().unwrap()); } #[test] fn test_pipe() { let output = sh("echo xxx") .pipe(cmd!(path_to_exe("x_to_y"))) .read() .unwrap(); assert_eq!("yyy", output); // Check that errors on either side are propagated. let result = true_cmd().pipe(false_cmd()).run(); assert!(result.is_err()); let result = false_cmd().pipe(true_cmd()).run(); assert!(result.is_err()); } #[test] fn test_pipe_with_kill() { // Make sure both sides get killed. let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Note that we don't use unchecked() here. This tests that kill suppresses // exit status errors. let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap(); handle.kill().unwrap(); // But calling wait again should be an error, because of the status. handle.wait().unwrap_err(); } #[test] fn test_pipe_start() { let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!")); let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Errors starting the left side of a pipe are returned immediately, and // the right side is never started. nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err(); // Errors starting the right side are also returned immediately, and the // the left side is killed first. sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err(); } #[test] fn test_multiple_threads() { // Wait on the sleep command in a background thread, while the main thread // kills it. let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); let handle = Arc::new(sleep_cmd.unchecked().start().unwrap()); let arc_clone = handle.clone(); let wait_thread = std::thread::spawn(move || { arc_clone.wait().unwrap(); }); handle.kill().unwrap(); wait_thread.join().unwrap(); } #[test] fn test_nonblocking_waits() { let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Make sure pipelines handle try_wait correctly. let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap(); // Make sure try_wait doesn't block on it. assert!(handle.try_wait().unwrap().is_none()); handle.kill().unwrap(); } #[test] fn test_input() { let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx"); let output = expr.read().unwrap(); assert_eq!("yyy", output); } #[test] fn test_stderr() { let (mut reader, writer) = ::os_pipe::pipe().unwrap(); sh("echo hi>&2").stderr_file(writer).run().unwrap(); let mut s = String::new(); reader.read_to_string(&mut s).unwrap(); assert_eq!(s.trim(), "hi"); } #[test] fn test_null() { let expr = cmd!(path_to_exe("cat")) .stdin_null() .stdout_null() .stderr_null(); let output = expr.read().unwrap(); assert_eq!("", output); } #[test] fn test_path() { let dir = tempfile::tempdir().unwrap(); let input_file = dir.path().join("input_file"); let output_file = dir.path().join("output_file"); File::create(&input_file) .unwrap() .write_all(b"xxx") .unwrap(); let expr = cmd!(path_to_exe("x_to_y")) .stdin_path(&input_file) .stdout_path(&output_file); let output = expr.read().unwrap(); assert_eq!("", output); let mut file_output = String::new(); File::open(&output_file) .unwrap() .read_to_string(&mut file_output) .unwrap(); assert_eq!("yyy", file_output); } #[test] fn test_swapping() { let output = sh("echo hi") .stdout_to_stderr() .stderr_capture() .run() .unwrap(); let stderr = str::from_utf8(&output.stderr).unwrap().trim(); assert_eq!("hi", stderr); // Windows compatible. (Requires no space before the ">".) let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap(); assert_eq!("hi", output); } #[test] fn test_file() { let dir = tempfile::tempdir().unwrap(); let file = dir.path().join("file"); File::create(&file).unwrap().write_all(b"example").unwrap(); let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap()); let output = expr.read().unwrap(); assert_eq!(output, "example"); } #[test] fn test_ergonomics() { let mystr = "owned string".to_owned(); let mypathbuf = Path::new("a/b/c").to_owned(); let myvec = vec![1, 2, 3]; // These are nonsense expressions. We just want to make sure they compile. let _ = sh("true") .stdin_path(&*mystr) .stdin_bytes(&*myvec) .stdout_path(&*mypathbuf); let _ = sh("true") .stdin_path(mystr) .stdin_bytes(myvec) .stdout_path(mypathbuf); // Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input(). // TODO: Is it worth having these impls for &Vec in other cases? // let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf); } #[test] fn test_capture_both() { // Windows compatible, no space before ">", and we trim newlines at the end to avoid // dealing with the different kinds. let output = sh("echo hi && echo lo>&2") .stdout_capture() .stderr_capture() .run() .unwrap(); assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim()); assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim()); } #[test] fn test_dir() { // This test checks the interaction of `dir` and relative exe paths. // Make sure that's actually what we're testing. let pwd_path = path_to_exe("pwd"); assert!(pwd_path.is_relative()); let pwd = cmd!(pwd_path); // First assert that ordinary commands happen in the parent's dir. let pwd_output = pwd.read().unwrap(); let pwd_path = Path::new(&pwd_output); assert_eq!(pwd_path, env::current_dir().unwrap()); // Now create a temp dir and make sure we can set dir to it. This // also tests the interaction of `dir` and relative exe paths. let dir = tempfile::tempdir().unwrap(); let pwd_output = pwd.dir(dir.path()).read().unwrap(); let pwd_path = Path::new(&pwd_output); // pwd_path isn't totally canonical on Windows, because it // doesn't have a prefix. Thus we have to canonicalize both // sides. (This also handles symlinks in TMP_DIR.) assert_eq!( pwd_path.canonicalize().unwrap(), dir.path().canonicalize().unwrap() ); } #[test] fn test_env() { let output = cmd!(path_to_exe("print_env"), "foo") .env("foo", "bar") .read() .unwrap(); assert_eq!("bar", output); } #[test] fn test_full_env() { // Note that it's important that no other tests use this variable name, // because the test runner is multithreaded. let var_name = "TEST_FULL_ENV"; // Capture the parent env, and make sure it does *not* contain our variable. let clean_env: HashMap<String, String> = env::vars().collect(); assert!( !clean_env.contains_key(var_name), "why is this variable set?" ); // Run a child process with that map passed to full_env(). It should be guaranteed not to // see our variable, regardless of any outer env() calls or changes in the parent. let clean_child = cmd!(path_to_exe("print_env"), var_name).full_env(clean_env); // Dirty the parent env. Should be suppressed. env::set_var(var_name, "junk1"); // And make an outer env() call. Should also be suppressed. let dirty_child = clean_child.env(var_name, "junk2"); // Check that neither of those have any effect. let output = dirty_child.read().unwrap(); assert_eq!("", output); } #[test] fn test_env_remove() { // Set an environment variable in the parent. Note that it's important that // no other tests use this variable name, because the test runner is // multithreaded. let var_name = "TEST_ENV_REMOVE"; env::set_var(var_name, "junk2"); // Run a command that observes the variable. let output1 = cmd!(path_to_exe("print_env"), var_name).read().unwrap(); assert_eq!("junk2", output1); // Run the same command with that variable removed. let output2 = cmd!(path_to_exe("print_env"), var_name) .env_remove(var_name) .read() .unwrap(); assert_eq!("", output2); } #[test] fn test_env_remove_case_sensitivity() { // Env var deletion is particularly sensitive to the differences in // case-sensitivity between Unix and Windows. The semantics of env_remove // in duct must *match the platform*. // Set an environment variable in the parent. Note that it's important that // no other tests use this variable name, because the test runner is // multithreaded. let var_name = "TEST_ENV_REMOVE_CASE_SENSITIVITY"; env::set_var(var_name, "abc123"); // Run a command that tries to clear the same variable, but in lowercase. let output1 = cmd!(path_to_exe("print_env"), var_name) .env_remove(var_name.to_lowercase()) .read() .unwrap(); // Now try to clear that variable from the parent environment, again using // lowercase, and run the same command without `env_remove`. env::remove_var(var_name.to_lowercase()); let output2 = cmd!(path_to_exe("print_env"), var_name).read().unwrap(); // On Unix, env vars are case sensitive, and we don't expect either removal // to have any effect. On Windows, they're insensitive, and we expect both // removals to work. The key thing is that both approaches to removal have // the *same effect*. assert_eq!(output1, output2, "failed to match platform behavior!!!"); // Go ahead and assert the exact expected output, just in case. If these // assertions ever break, it might be this test's fault and not the code's. if cfg!(windows)
else { assert_eq!(output1, "abc123"); } } #[test] fn test_broken_pipe() { // If the input writing thread fills up its pipe buffer, writing will block. If the process // on the other end of the pipe exits while writer is waiting, the write will return an // error. We need to swallow that error, rather than returning it. let myvec = vec![0; 1_000_000]; true_cmd().stdin_bytes(myvec).run().unwrap(); } #[test] fn test_silly() { // A silly test, purely for coverage. crate::IoValue::Null.try_clone().unwrap(); } #[test] fn test_path_sanitization() { // We don't do any chdir'ing in this process, because the tests runner is multithreaded, // and we don't want to screw up anyone else's relative paths. Instead, we shell out to a // small test process that does that for us. cmd!(path_to_exe("exe_in_dir"), path_to_exe("status"), "0") .run() .unwrap(); } #[test] fn test_before_spawn_hook() { let (reader, mut writer) = os_pipe::pipe().unwrap(); let expr = cmd!(path_to_exe("cat")).before_spawn(move |cmd| { let reader_clone = reader.try_clone()?; cmd.stdin(reader_clone); Ok(()) }); writer.write_all(b"foobar").unwrap(); drop(writer); let output = expr.read().unwrap(); assert_eq!("foobar", output); } #[test] fn test_trailing_comma() { let output = cmd!(path_to_exe("echo"), "trailing",).read().unwrap(); assert_eq!("trailing", output); } #[test] fn test_no_argument() { let output = cmd!(path_to_exe("echo")).read().unwrap(); assert_eq!("", output); } #[test] fn test_dropping_reader() { // Use an explicit stderr pipe to test the ReaderHandle's drop behavior. let (mut stderr_reader, stderr_writer) = os_pipe::pipe().unwrap(); let mut reader_handle = cmd!(path_to_exe("sleep"), "1000000") .stdout_file(stderr_writer) .reader() .unwrap(); // A zero-length read doesn't block. let n = reader_handle.read(&mut []).unwrap(); assert_eq!(n, 0); // Try-wait returns None. let output = reader_handle.try_wait().unwrap(); assert!(output.is_none()); // Now we drop the reader. This kills the child. drop(reader_handle); // Now that the child is killed, reading the stderr pipe will not block. // (Note that our copy was closed when the temporary Expression above // dropped.) let mut stderr = Vec::new(); let n = stderr_reader.read_to_end(&mut stderr).unwrap(); assert_eq!(n, 0); } #[test] fn test_kill_with_grandchild() -> io::Result<()> { // We're going to start a child process, and that child is going to start a // grandchild. The grandchild is going to sleep forever (1 day). We'll read // some output from the child to make sure it's done starting the // grandchild, and then we'll kill the child. Now, the grandchild will not // be killed, and it will still hold a write handle to the stdout pipe. So // this tests that the wait done by kill only waits on the child to exit, // and does not wait on IO to finish. // // This test leaks the grandchild process. I'm sorry. // Capturing stderr means an IO thread is spawned, even though we're using // a ReaderHandle to read stdout. What we're testing here is that kill() // doesn't wait on that IO thread. let mut reader = cmd!(path_to_exe("child_grandchild")) .stderr_capture() .reader()?; // Read "started" from the child to make sure we don't kill it before it // starts the grandchild. let mut started_read = [0; 7]; reader.read_exact(&mut started_read)?; assert_eq!(&started_read, b"started"); // Ok, this had better not block! reader.kill() } #[test] fn test_debug_format() { let e = cmd!("foo", "bar", "baz").pipe(cmd!("bing", "bong")); assert_eq!( format!("{:?}", e), r#"Pipe(Cmd(["foo", "bar", "baz"]), Cmd(["bing", "bong"]))"#, ); } #[test] fn test_reader_try_wait() -> io::Result<()> { // Create a ReaderHandle for a cat process. Give cat 1 MB of data to echo // back to us, so that it will block on its stdout pipe until we start // reading. let bytes = vec![42; 1_000_000]; let mut cat_reader = cmd!(path_to_exe("cat")) .stdin_bytes(bytes.clone()) .reader()?; assert!(cat_reader.try_wait()?.is_none()); let mut output = Vec::new(); cat_reader.read_to_end(&mut output)?; assert_eq!(output, bytes); let output = cat_reader.try_wait()?.expect("is some"); assert!(output.status.success()); assert!(output.stdout.is_empty()); assert!(output.stderr.is_empty()); Ok(()) } #[test] fn test_pids() -> io::Result<()> { let handle = true_cmd().start()?; let pids = handle.pids(); assert_eq!(pids.len(), 1); handle.wait()?; let reader = true_cmd().reader()?; let pids = reader.pids(); assert_eq!(pids.len(), 1); std::io::copy(&mut &reader, &mut std::io::sink())?; let handle = true_cmd() .pipe(true_cmd().stdout_null().pipe(true_cmd())) .start()?; let pids = handle.pids(); assert_eq!(pids.len(), 3); handle.wait()?; let reader = true_cmd() .pipe(true_cmd().stdout_null().pipe(true_cmd())) .reader()?; let pids = reader.pids(); assert_eq!(pids.len(), 3); std::io::copy(&mut &reader, &mut std::io::sink())?; Ok(()) }
{ assert_eq!(output1, ""); }
conditional_block
test.rs
use super::{cmd, Expression}; use std; use std::collections::HashMap; use std::env; use std::env::consts::EXE_EXTENSION; use std::ffi::OsString; use std::fs::File; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::Command; use std::str; use std::sync::{Arc, Once}; // Include a copy of the sh function, because we have a lot of old tests that // use it, and it's a lot easier than managing a circular dependency between // duct and duct_sh. pub fn sh(command: &'static str) -> Expression { let argv = shell_command_argv(command.into()); cmd(&argv[0], &argv[1..]) } #[cfg(unix)] fn shell_command_argv(command: OsString) -> Vec<OsString> { vec!["/bin/sh".into(), "-c".into(), command] } #[cfg(windows)] fn shell_command_argv(command: OsString) -> Vec<OsString> { let comspec = std::env::var_os("COMSPEC").unwrap_or_else(|| "cmd.exe".into()); vec![comspec, "/C".into(), command] } pub fn path_to_exe(name: &str) -> PathBuf { // This project defines some associated binaries for testing, and we shell out to them in // these tests. `cargo test` doesn't automatically build associated binaries, so this // function takes care of building them explicitly. static CARGO_BUILD_ONCE: Once = Once::new(); CARGO_BUILD_ONCE.call_once(|| { let build_status = Command::new("cargo") .arg("build") .arg("--quiet") .status() .unwrap(); assert!( build_status.success(), "Cargo failed to build associated binaries." ); }); Path::new("target") .join("debug") .join(name) .with_extension(EXE_EXTENSION) } pub fn true_cmd() -> Expression { cmd!(path_to_exe("status"), "0") } fn false_cmd() -> Expression { cmd!(path_to_exe("status"), "1") } #[test] fn test_cmd() { let output = cmd!(path_to_exe("echo"), "hi").read().unwrap(); assert_eq!("hi", output); } #[test] fn test_sh() { // Windows compatible. let output = sh("echo hi").read().unwrap(); assert_eq!("hi", output); } #[test] fn test_start() { let handle1 = cmd!(path_to_exe("echo"), "hi")
let handle2 = cmd!(path_to_exe("echo"), "lo") .stdout_capture() .start() .unwrap(); let output1 = handle1.wait().unwrap(); let output2 = handle2.wait().unwrap(); assert_eq!("hi", str::from_utf8(&output1.stdout).unwrap().trim()); assert_eq!("lo", str::from_utf8(&output2.stdout).unwrap().trim()); } #[test] fn test_error() { let result = false_cmd().run(); if let Err(err) = result { assert_eq!(err.kind(), io::ErrorKind::Other); } else { panic!("Expected a status error."); } } #[test] fn test_unchecked() { let unchecked_false = false_cmd().unchecked(); // Unchecked errors shouldn't cause `run` to return an error. let output = unchecked_false .pipe(cmd!(path_to_exe("echo"), "waa")) .stdout_capture() .run() .unwrap(); // The value of the exit code is preserved. assert_eq!(1, output.status.code().unwrap()); assert_eq!("waa", String::from_utf8_lossy(&output.stdout).trim()); } #[test] fn test_unchecked_in_pipe() { let zero = cmd!(path_to_exe("status"), "0"); let one = cmd!(path_to_exe("status"), "1"); let two = cmd!(path_to_exe("status"), "2"); // Right takes precedence over left. let output = one.pipe(two.clone()).unchecked().run().unwrap(); assert_eq!(2, output.status.code().unwrap()); // Except that checked on the left takes precedence over unchecked on // the right. let output = one.pipe(two.unchecked()).unchecked().run().unwrap(); assert_eq!(1, output.status.code().unwrap()); // Right takes precedence over the left again if they're both unchecked. let output = one .unchecked() .pipe(two.unchecked()) .unchecked() .run() .unwrap(); assert_eq!(2, output.status.code().unwrap()); // Except that if the right is a success, the left takes precedence. let output = one .unchecked() .pipe(zero.unchecked()) .unchecked() .run() .unwrap(); assert_eq!(1, output.status.code().unwrap()); // Even if the right is checked. let output = one.unchecked().pipe(zero).unchecked().run().unwrap(); assert_eq!(1, output.status.code().unwrap()); } #[test] fn test_pipe() { let output = sh("echo xxx") .pipe(cmd!(path_to_exe("x_to_y"))) .read() .unwrap(); assert_eq!("yyy", output); // Check that errors on either side are propagated. let result = true_cmd().pipe(false_cmd()).run(); assert!(result.is_err()); let result = false_cmd().pipe(true_cmd()).run(); assert!(result.is_err()); } #[test] fn test_pipe_with_kill() { // Make sure both sides get killed. let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Note that we don't use unchecked() here. This tests that kill suppresses // exit status errors. let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap(); handle.kill().unwrap(); // But calling wait again should be an error, because of the status. handle.wait().unwrap_err(); } #[test] fn test_pipe_start() { let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!")); let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Errors starting the left side of a pipe are returned immediately, and // the right side is never started. nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err(); // Errors starting the right side are also returned immediately, and the // the left side is killed first. sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err(); } #[test] fn test_multiple_threads() { // Wait on the sleep command in a background thread, while the main thread // kills it. let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); let handle = Arc::new(sleep_cmd.unchecked().start().unwrap()); let arc_clone = handle.clone(); let wait_thread = std::thread::spawn(move || { arc_clone.wait().unwrap(); }); handle.kill().unwrap(); wait_thread.join().unwrap(); } #[test] fn test_nonblocking_waits() { let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Make sure pipelines handle try_wait correctly. let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap(); // Make sure try_wait doesn't block on it. assert!(handle.try_wait().unwrap().is_none()); handle.kill().unwrap(); } #[test] fn test_input() { let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx"); let output = expr.read().unwrap(); assert_eq!("yyy", output); } #[test] fn test_stderr() { let (mut reader, writer) = ::os_pipe::pipe().unwrap(); sh("echo hi>&2").stderr_file(writer).run().unwrap(); let mut s = String::new(); reader.read_to_string(&mut s).unwrap(); assert_eq!(s.trim(), "hi"); } #[test] fn test_null() { let expr = cmd!(path_to_exe("cat")) .stdin_null() .stdout_null() .stderr_null(); let output = expr.read().unwrap(); assert_eq!("", output); } #[test] fn test_path() { let dir = tempfile::tempdir().unwrap(); let input_file = dir.path().join("input_file"); let output_file = dir.path().join("output_file"); File::create(&input_file) .unwrap() .write_all(b"xxx") .unwrap(); let expr = cmd!(path_to_exe("x_to_y")) .stdin_path(&input_file) .stdout_path(&output_file); let output = expr.read().unwrap(); assert_eq!("", output); let mut file_output = String::new(); File::open(&output_file) .unwrap() .read_to_string(&mut file_output) .unwrap(); assert_eq!("yyy", file_output); } #[test] fn test_swapping() { let output = sh("echo hi") .stdout_to_stderr() .stderr_capture() .run() .unwrap(); let stderr = str::from_utf8(&output.stderr).unwrap().trim(); assert_eq!("hi", stderr); // Windows compatible. (Requires no space before the ">".) let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap(); assert_eq!("hi", output); } #[test] fn test_file() { let dir = tempfile::tempdir().unwrap(); let file = dir.path().join("file"); File::create(&file).unwrap().write_all(b"example").unwrap(); let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap()); let output = expr.read().unwrap(); assert_eq!(output, "example"); } #[test] fn test_ergonomics() { let mystr = "owned string".to_owned(); let mypathbuf = Path::new("a/b/c").to_owned(); let myvec = vec![1, 2, 3]; // These are nonsense expressions. We just want to make sure they compile. let _ = sh("true") .stdin_path(&*mystr) .stdin_bytes(&*myvec) .stdout_path(&*mypathbuf); let _ = sh("true") .stdin_path(mystr) .stdin_bytes(myvec) .stdout_path(mypathbuf); // Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input(). // TODO: Is it worth having these impls for &Vec in other cases? // let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf); } #[test] fn test_capture_both() { // Windows compatible, no space before ">", and we trim newlines at the end to avoid // dealing with the different kinds. let output = sh("echo hi && echo lo>&2") .stdout_capture() .stderr_capture() .run() .unwrap(); assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim()); assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim()); } #[test] fn test_dir() { // This test checks the interaction of `dir` and relative exe paths. // Make sure that's actually what we're testing. let pwd_path = path_to_exe("pwd"); assert!(pwd_path.is_relative()); let pwd = cmd!(pwd_path); // First assert that ordinary commands happen in the parent's dir. let pwd_output = pwd.read().unwrap(); let pwd_path = Path::new(&pwd_output); assert_eq!(pwd_path, env::current_dir().unwrap()); // Now create a temp dir and make sure we can set dir to it. This // also tests the interaction of `dir` and relative exe paths. let dir = tempfile::tempdir().unwrap(); let pwd_output = pwd.dir(dir.path()).read().unwrap(); let pwd_path = Path::new(&pwd_output); // pwd_path isn't totally canonical on Windows, because it // doesn't have a prefix. Thus we have to canonicalize both // sides. (This also handles symlinks in TMP_DIR.) assert_eq!( pwd_path.canonicalize().unwrap(), dir.path().canonicalize().unwrap() ); } #[test] fn test_env() { let output = cmd!(path_to_exe("print_env"), "foo") .env("foo", "bar") .read() .unwrap(); assert_eq!("bar", output); } #[test] fn test_full_env() { // Note that it's important that no other tests use this variable name, // because the test runner is multithreaded. let var_name = "TEST_FULL_ENV"; // Capture the parent env, and make sure it does *not* contain our variable. let clean_env: HashMap<String, String> = env::vars().collect(); assert!( !clean_env.contains_key(var_name), "why is this variable set?" ); // Run a child process with that map passed to full_env(). It should be guaranteed not to // see our variable, regardless of any outer env() calls or changes in the parent. let clean_child = cmd!(path_to_exe("print_env"), var_name).full_env(clean_env); // Dirty the parent env. Should be suppressed. env::set_var(var_name, "junk1"); // And make an outer env() call. Should also be suppressed. let dirty_child = clean_child.env(var_name, "junk2"); // Check that neither of those have any effect. let output = dirty_child.read().unwrap(); assert_eq!("", output); } #[test] fn test_env_remove() { // Set an environment variable in the parent. Note that it's important that // no other tests use this variable name, because the test runner is // multithreaded. let var_name = "TEST_ENV_REMOVE"; env::set_var(var_name, "junk2"); // Run a command that observes the variable. let output1 = cmd!(path_to_exe("print_env"), var_name).read().unwrap(); assert_eq!("junk2", output1); // Run the same command with that variable removed. let output2 = cmd!(path_to_exe("print_env"), var_name) .env_remove(var_name) .read() .unwrap(); assert_eq!("", output2); } #[test] fn test_env_remove_case_sensitivity() { // Env var deletion is particularly sensitive to the differences in // case-sensitivity between Unix and Windows. The semantics of env_remove // in duct must *match the platform*. // Set an environment variable in the parent. Note that it's important that // no other tests use this variable name, because the test runner is // multithreaded. let var_name = "TEST_ENV_REMOVE_CASE_SENSITIVITY"; env::set_var(var_name, "abc123"); // Run a command that tries to clear the same variable, but in lowercase. let output1 = cmd!(path_to_exe("print_env"), var_name) .env_remove(var_name.to_lowercase()) .read() .unwrap(); // Now try to clear that variable from the parent environment, again using // lowercase, and run the same command without `env_remove`. env::remove_var(var_name.to_lowercase()); let output2 = cmd!(path_to_exe("print_env"), var_name).read().unwrap(); // On Unix, env vars are case sensitive, and we don't expect either removal // to have any effect. On Windows, they're insensitive, and we expect both // removals to work. The key thing is that both approaches to removal have // the *same effect*. assert_eq!(output1, output2, "failed to match platform behavior!!!"); // Go ahead and assert the exact expected output, just in case. If these // assertions ever break, it might be this test's fault and not the code's. if cfg!(windows) { assert_eq!(output1, ""); } else { assert_eq!(output1, "abc123"); } } #[test] fn test_broken_pipe() { // If the input writing thread fills up its pipe buffer, writing will block. If the process // on the other end of the pipe exits while writer is waiting, the write will return an // error. We need to swallow that error, rather than returning it. let myvec = vec![0; 1_000_000]; true_cmd().stdin_bytes(myvec).run().unwrap(); } #[test] fn test_silly() { // A silly test, purely for coverage. crate::IoValue::Null.try_clone().unwrap(); } #[test] fn test_path_sanitization() { // We don't do any chdir'ing in this process, because the tests runner is multithreaded, // and we don't want to screw up anyone else's relative paths. Instead, we shell out to a // small test process that does that for us. cmd!(path_to_exe("exe_in_dir"), path_to_exe("status"), "0") .run() .unwrap(); } #[test] fn test_before_spawn_hook() { let (reader, mut writer) = os_pipe::pipe().unwrap(); let expr = cmd!(path_to_exe("cat")).before_spawn(move |cmd| { let reader_clone = reader.try_clone()?; cmd.stdin(reader_clone); Ok(()) }); writer.write_all(b"foobar").unwrap(); drop(writer); let output = expr.read().unwrap(); assert_eq!("foobar", output); } #[test] fn test_trailing_comma() { let output = cmd!(path_to_exe("echo"), "trailing",).read().unwrap(); assert_eq!("trailing", output); } #[test] fn test_no_argument() { let output = cmd!(path_to_exe("echo")).read().unwrap(); assert_eq!("", output); } #[test] fn test_dropping_reader() { // Use an explicit stderr pipe to test the ReaderHandle's drop behavior. let (mut stderr_reader, stderr_writer) = os_pipe::pipe().unwrap(); let mut reader_handle = cmd!(path_to_exe("sleep"), "1000000") .stdout_file(stderr_writer) .reader() .unwrap(); // A zero-length read doesn't block. let n = reader_handle.read(&mut []).unwrap(); assert_eq!(n, 0); // Try-wait returns None. let output = reader_handle.try_wait().unwrap(); assert!(output.is_none()); // Now we drop the reader. This kills the child. drop(reader_handle); // Now that the child is killed, reading the stderr pipe will not block. // (Note that our copy was closed when the temporary Expression above // dropped.) let mut stderr = Vec::new(); let n = stderr_reader.read_to_end(&mut stderr).unwrap(); assert_eq!(n, 0); } #[test] fn test_kill_with_grandchild() -> io::Result<()> { // We're going to start a child process, and that child is going to start a // grandchild. The grandchild is going to sleep forever (1 day). We'll read // some output from the child to make sure it's done starting the // grandchild, and then we'll kill the child. Now, the grandchild will not // be killed, and it will still hold a write handle to the stdout pipe. So // this tests that the wait done by kill only waits on the child to exit, // and does not wait on IO to finish. // // This test leaks the grandchild process. I'm sorry. // Capturing stderr means an IO thread is spawned, even though we're using // a ReaderHandle to read stdout. What we're testing here is that kill() // doesn't wait on that IO thread. let mut reader = cmd!(path_to_exe("child_grandchild")) .stderr_capture() .reader()?; // Read "started" from the child to make sure we don't kill it before it // starts the grandchild. let mut started_read = [0; 7]; reader.read_exact(&mut started_read)?; assert_eq!(&started_read, b"started"); // Ok, this had better not block! reader.kill() } #[test] fn test_debug_format() { let e = cmd!("foo", "bar", "baz").pipe(cmd!("bing", "bong")); assert_eq!( format!("{:?}", e), r#"Pipe(Cmd(["foo", "bar", "baz"]), Cmd(["bing", "bong"]))"#, ); } #[test] fn test_reader_try_wait() -> io::Result<()> { // Create a ReaderHandle for a cat process. Give cat 1 MB of data to echo // back to us, so that it will block on its stdout pipe until we start // reading. let bytes = vec![42; 1_000_000]; let mut cat_reader = cmd!(path_to_exe("cat")) .stdin_bytes(bytes.clone()) .reader()?; assert!(cat_reader.try_wait()?.is_none()); let mut output = Vec::new(); cat_reader.read_to_end(&mut output)?; assert_eq!(output, bytes); let output = cat_reader.try_wait()?.expect("is some"); assert!(output.status.success()); assert!(output.stdout.is_empty()); assert!(output.stderr.is_empty()); Ok(()) } #[test] fn test_pids() -> io::Result<()> { let handle = true_cmd().start()?; let pids = handle.pids(); assert_eq!(pids.len(), 1); handle.wait()?; let reader = true_cmd().reader()?; let pids = reader.pids(); assert_eq!(pids.len(), 1); std::io::copy(&mut &reader, &mut std::io::sink())?; let handle = true_cmd() .pipe(true_cmd().stdout_null().pipe(true_cmd())) .start()?; let pids = handle.pids(); assert_eq!(pids.len(), 3); handle.wait()?; let reader = true_cmd() .pipe(true_cmd().stdout_null().pipe(true_cmd())) .reader()?; let pids = reader.pids(); assert_eq!(pids.len(), 3); std::io::copy(&mut &reader, &mut std::io::sink())?; Ok(()) }
.stdout_capture() .start() .unwrap();
random_line_split
test.rs
use super::{cmd, Expression}; use std; use std::collections::HashMap; use std::env; use std::env::consts::EXE_EXTENSION; use std::ffi::OsString; use std::fs::File; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::Command; use std::str; use std::sync::{Arc, Once}; // Include a copy of the sh function, because we have a lot of old tests that // use it, and it's a lot easier than managing a circular dependency between // duct and duct_sh. pub fn sh(command: &'static str) -> Expression { let argv = shell_command_argv(command.into()); cmd(&argv[0], &argv[1..]) } #[cfg(unix)] fn shell_command_argv(command: OsString) -> Vec<OsString> { vec!["/bin/sh".into(), "-c".into(), command] } #[cfg(windows)] fn shell_command_argv(command: OsString) -> Vec<OsString> { let comspec = std::env::var_os("COMSPEC").unwrap_or_else(|| "cmd.exe".into()); vec![comspec, "/C".into(), command] } pub fn path_to_exe(name: &str) -> PathBuf { // This project defines some associated binaries for testing, and we shell out to them in // these tests. `cargo test` doesn't automatically build associated binaries, so this // function takes care of building them explicitly. static CARGO_BUILD_ONCE: Once = Once::new(); CARGO_BUILD_ONCE.call_once(|| { let build_status = Command::new("cargo") .arg("build") .arg("--quiet") .status() .unwrap(); assert!( build_status.success(), "Cargo failed to build associated binaries." ); }); Path::new("target") .join("debug") .join(name) .with_extension(EXE_EXTENSION) } pub fn true_cmd() -> Expression { cmd!(path_to_exe("status"), "0") } fn false_cmd() -> Expression { cmd!(path_to_exe("status"), "1") } #[test] fn test_cmd() { let output = cmd!(path_to_exe("echo"), "hi").read().unwrap(); assert_eq!("hi", output); } #[test] fn test_sh() { // Windows compatible. let output = sh("echo hi").read().unwrap(); assert_eq!("hi", output); } #[test] fn test_start() { let handle1 = cmd!(path_to_exe("echo"), "hi") .stdout_capture() .start() .unwrap(); let handle2 = cmd!(path_to_exe("echo"), "lo") .stdout_capture() .start() .unwrap(); let output1 = handle1.wait().unwrap(); let output2 = handle2.wait().unwrap(); assert_eq!("hi", str::from_utf8(&output1.stdout).unwrap().trim()); assert_eq!("lo", str::from_utf8(&output2.stdout).unwrap().trim()); } #[test] fn test_error() { let result = false_cmd().run(); if let Err(err) = result { assert_eq!(err.kind(), io::ErrorKind::Other); } else { panic!("Expected a status error."); } } #[test] fn test_unchecked() { let unchecked_false = false_cmd().unchecked(); // Unchecked errors shouldn't cause `run` to return an error. let output = unchecked_false .pipe(cmd!(path_to_exe("echo"), "waa")) .stdout_capture() .run() .unwrap(); // The value of the exit code is preserved. assert_eq!(1, output.status.code().unwrap()); assert_eq!("waa", String::from_utf8_lossy(&output.stdout).trim()); } #[test] fn test_unchecked_in_pipe() { let zero = cmd!(path_to_exe("status"), "0"); let one = cmd!(path_to_exe("status"), "1"); let two = cmd!(path_to_exe("status"), "2"); // Right takes precedence over left. let output = one.pipe(two.clone()).unchecked().run().unwrap(); assert_eq!(2, output.status.code().unwrap()); // Except that checked on the left takes precedence over unchecked on // the right. let output = one.pipe(two.unchecked()).unchecked().run().unwrap(); assert_eq!(1, output.status.code().unwrap()); // Right takes precedence over the left again if they're both unchecked. let output = one .unchecked() .pipe(two.unchecked()) .unchecked() .run() .unwrap(); assert_eq!(2, output.status.code().unwrap()); // Except that if the right is a success, the left takes precedence. let output = one .unchecked() .pipe(zero.unchecked()) .unchecked() .run() .unwrap(); assert_eq!(1, output.status.code().unwrap()); // Even if the right is checked. let output = one.unchecked().pipe(zero).unchecked().run().unwrap(); assert_eq!(1, output.status.code().unwrap()); } #[test] fn test_pipe() { let output = sh("echo xxx") .pipe(cmd!(path_to_exe("x_to_y"))) .read() .unwrap(); assert_eq!("yyy", output); // Check that errors on either side are propagated. let result = true_cmd().pipe(false_cmd()).run(); assert!(result.is_err()); let result = false_cmd().pipe(true_cmd()).run(); assert!(result.is_err()); } #[test] fn test_pipe_with_kill()
#[test] fn test_pipe_start() { let nonexistent_cmd = cmd!(path_to_exe("nonexistent!!!")); let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Errors starting the left side of a pipe are returned immediately, and // the right side is never started. nonexistent_cmd.pipe(&sleep_cmd).start().unwrap_err(); // Errors starting the right side are also returned immediately, and the // the left side is killed first. sleep_cmd.pipe(nonexistent_cmd).start().unwrap_err(); } #[test] fn test_multiple_threads() { // Wait on the sleep command in a background thread, while the main thread // kills it. let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); let handle = Arc::new(sleep_cmd.unchecked().start().unwrap()); let arc_clone = handle.clone(); let wait_thread = std::thread::spawn(move || { arc_clone.wait().unwrap(); }); handle.kill().unwrap(); wait_thread.join().unwrap(); } #[test] fn test_nonblocking_waits() { let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Make sure pipelines handle try_wait correctly. let handle = sleep_cmd.pipe(&sleep_cmd).unchecked().start().unwrap(); // Make sure try_wait doesn't block on it. assert!(handle.try_wait().unwrap().is_none()); handle.kill().unwrap(); } #[test] fn test_input() { let expr = cmd!(path_to_exe("x_to_y")).stdin_bytes("xxx"); let output = expr.read().unwrap(); assert_eq!("yyy", output); } #[test] fn test_stderr() { let (mut reader, writer) = ::os_pipe::pipe().unwrap(); sh("echo hi>&2").stderr_file(writer).run().unwrap(); let mut s = String::new(); reader.read_to_string(&mut s).unwrap(); assert_eq!(s.trim(), "hi"); } #[test] fn test_null() { let expr = cmd!(path_to_exe("cat")) .stdin_null() .stdout_null() .stderr_null(); let output = expr.read().unwrap(); assert_eq!("", output); } #[test] fn test_path() { let dir = tempfile::tempdir().unwrap(); let input_file = dir.path().join("input_file"); let output_file = dir.path().join("output_file"); File::create(&input_file) .unwrap() .write_all(b"xxx") .unwrap(); let expr = cmd!(path_to_exe("x_to_y")) .stdin_path(&input_file) .stdout_path(&output_file); let output = expr.read().unwrap(); assert_eq!("", output); let mut file_output = String::new(); File::open(&output_file) .unwrap() .read_to_string(&mut file_output) .unwrap(); assert_eq!("yyy", file_output); } #[test] fn test_swapping() { let output = sh("echo hi") .stdout_to_stderr() .stderr_capture() .run() .unwrap(); let stderr = str::from_utf8(&output.stderr).unwrap().trim(); assert_eq!("hi", stderr); // Windows compatible. (Requires no space before the ">".) let output = sh("echo hi>&2").stderr_to_stdout().read().unwrap(); assert_eq!("hi", output); } #[test] fn test_file() { let dir = tempfile::tempdir().unwrap(); let file = dir.path().join("file"); File::create(&file).unwrap().write_all(b"example").unwrap(); let expr = cmd!(path_to_exe("cat")).stdin_file(File::open(&file).unwrap()); let output = expr.read().unwrap(); assert_eq!(output, "example"); } #[test] fn test_ergonomics() { let mystr = "owned string".to_owned(); let mypathbuf = Path::new("a/b/c").to_owned(); let myvec = vec![1, 2, 3]; // These are nonsense expressions. We just want to make sure they compile. let _ = sh("true") .stdin_path(&*mystr) .stdin_bytes(&*myvec) .stdout_path(&*mypathbuf); let _ = sh("true") .stdin_path(mystr) .stdin_bytes(myvec) .stdout_path(mypathbuf); // Unfortunately, this one doesn't work with our Into<Vec<u8>> bound on input(). // TODO: Is it worth having these impls for &Vec in other cases? // let _ = sh("true").stdin_path(&mystr).stdin_bytes(&myvec).stdout_path(&mypathbuf); } #[test] fn test_capture_both() { // Windows compatible, no space before ">", and we trim newlines at the end to avoid // dealing with the different kinds. let output = sh("echo hi && echo lo>&2") .stdout_capture() .stderr_capture() .run() .unwrap(); assert_eq!("hi", str::from_utf8(&output.stdout).unwrap().trim()); assert_eq!("lo", str::from_utf8(&output.stderr).unwrap().trim()); } #[test] fn test_dir() { // This test checks the interaction of `dir` and relative exe paths. // Make sure that's actually what we're testing. let pwd_path = path_to_exe("pwd"); assert!(pwd_path.is_relative()); let pwd = cmd!(pwd_path); // First assert that ordinary commands happen in the parent's dir. let pwd_output = pwd.read().unwrap(); let pwd_path = Path::new(&pwd_output); assert_eq!(pwd_path, env::current_dir().unwrap()); // Now create a temp dir and make sure we can set dir to it. This // also tests the interaction of `dir` and relative exe paths. let dir = tempfile::tempdir().unwrap(); let pwd_output = pwd.dir(dir.path()).read().unwrap(); let pwd_path = Path::new(&pwd_output); // pwd_path isn't totally canonical on Windows, because it // doesn't have a prefix. Thus we have to canonicalize both // sides. (This also handles symlinks in TMP_DIR.) assert_eq!( pwd_path.canonicalize().unwrap(), dir.path().canonicalize().unwrap() ); } #[test] fn test_env() { let output = cmd!(path_to_exe("print_env"), "foo") .env("foo", "bar") .read() .unwrap(); assert_eq!("bar", output); } #[test] fn test_full_env() { // Note that it's important that no other tests use this variable name, // because the test runner is multithreaded. let var_name = "TEST_FULL_ENV"; // Capture the parent env, and make sure it does *not* contain our variable. let clean_env: HashMap<String, String> = env::vars().collect(); assert!( !clean_env.contains_key(var_name), "why is this variable set?" ); // Run a child process with that map passed to full_env(). It should be guaranteed not to // see our variable, regardless of any outer env() calls or changes in the parent. let clean_child = cmd!(path_to_exe("print_env"), var_name).full_env(clean_env); // Dirty the parent env. Should be suppressed. env::set_var(var_name, "junk1"); // And make an outer env() call. Should also be suppressed. let dirty_child = clean_child.env(var_name, "junk2"); // Check that neither of those have any effect. let output = dirty_child.read().unwrap(); assert_eq!("", output); } #[test] fn test_env_remove() { // Set an environment variable in the parent. Note that it's important that // no other tests use this variable name, because the test runner is // multithreaded. let var_name = "TEST_ENV_REMOVE"; env::set_var(var_name, "junk2"); // Run a command that observes the variable. let output1 = cmd!(path_to_exe("print_env"), var_name).read().unwrap(); assert_eq!("junk2", output1); // Run the same command with that variable removed. let output2 = cmd!(path_to_exe("print_env"), var_name) .env_remove(var_name) .read() .unwrap(); assert_eq!("", output2); } #[test] fn test_env_remove_case_sensitivity() { // Env var deletion is particularly sensitive to the differences in // case-sensitivity between Unix and Windows. The semantics of env_remove // in duct must *match the platform*. // Set an environment variable in the parent. Note that it's important that // no other tests use this variable name, because the test runner is // multithreaded. let var_name = "TEST_ENV_REMOVE_CASE_SENSITIVITY"; env::set_var(var_name, "abc123"); // Run a command that tries to clear the same variable, but in lowercase. let output1 = cmd!(path_to_exe("print_env"), var_name) .env_remove(var_name.to_lowercase()) .read() .unwrap(); // Now try to clear that variable from the parent environment, again using // lowercase, and run the same command without `env_remove`. env::remove_var(var_name.to_lowercase()); let output2 = cmd!(path_to_exe("print_env"), var_name).read().unwrap(); // On Unix, env vars are case sensitive, and we don't expect either removal // to have any effect. On Windows, they're insensitive, and we expect both // removals to work. The key thing is that both approaches to removal have // the *same effect*. assert_eq!(output1, output2, "failed to match platform behavior!!!"); // Go ahead and assert the exact expected output, just in case. If these // assertions ever break, it might be this test's fault and not the code's. if cfg!(windows) { assert_eq!(output1, ""); } else { assert_eq!(output1, "abc123"); } } #[test] fn test_broken_pipe() { // If the input writing thread fills up its pipe buffer, writing will block. If the process // on the other end of the pipe exits while writer is waiting, the write will return an // error. We need to swallow that error, rather than returning it. let myvec = vec![0; 1_000_000]; true_cmd().stdin_bytes(myvec).run().unwrap(); } #[test] fn test_silly() { // A silly test, purely for coverage. crate::IoValue::Null.try_clone().unwrap(); } #[test] fn test_path_sanitization() { // We don't do any chdir'ing in this process, because the tests runner is multithreaded, // and we don't want to screw up anyone else's relative paths. Instead, we shell out to a // small test process that does that for us. cmd!(path_to_exe("exe_in_dir"), path_to_exe("status"), "0") .run() .unwrap(); } #[test] fn test_before_spawn_hook() { let (reader, mut writer) = os_pipe::pipe().unwrap(); let expr = cmd!(path_to_exe("cat")).before_spawn(move |cmd| { let reader_clone = reader.try_clone()?; cmd.stdin(reader_clone); Ok(()) }); writer.write_all(b"foobar").unwrap(); drop(writer); let output = expr.read().unwrap(); assert_eq!("foobar", output); } #[test] fn test_trailing_comma() { let output = cmd!(path_to_exe("echo"), "trailing",).read().unwrap(); assert_eq!("trailing", output); } #[test] fn test_no_argument() { let output = cmd!(path_to_exe("echo")).read().unwrap(); assert_eq!("", output); } #[test] fn test_dropping_reader() { // Use an explicit stderr pipe to test the ReaderHandle's drop behavior. let (mut stderr_reader, stderr_writer) = os_pipe::pipe().unwrap(); let mut reader_handle = cmd!(path_to_exe("sleep"), "1000000") .stdout_file(stderr_writer) .reader() .unwrap(); // A zero-length read doesn't block. let n = reader_handle.read(&mut []).unwrap(); assert_eq!(n, 0); // Try-wait returns None. let output = reader_handle.try_wait().unwrap(); assert!(output.is_none()); // Now we drop the reader. This kills the child. drop(reader_handle); // Now that the child is killed, reading the stderr pipe will not block. // (Note that our copy was closed when the temporary Expression above // dropped.) let mut stderr = Vec::new(); let n = stderr_reader.read_to_end(&mut stderr).unwrap(); assert_eq!(n, 0); } #[test] fn test_kill_with_grandchild() -> io::Result<()> { // We're going to start a child process, and that child is going to start a // grandchild. The grandchild is going to sleep forever (1 day). We'll read // some output from the child to make sure it's done starting the // grandchild, and then we'll kill the child. Now, the grandchild will not // be killed, and it will still hold a write handle to the stdout pipe. So // this tests that the wait done by kill only waits on the child to exit, // and does not wait on IO to finish. // // This test leaks the grandchild process. I'm sorry. // Capturing stderr means an IO thread is spawned, even though we're using // a ReaderHandle to read stdout. What we're testing here is that kill() // doesn't wait on that IO thread. let mut reader = cmd!(path_to_exe("child_grandchild")) .stderr_capture() .reader()?; // Read "started" from the child to make sure we don't kill it before it // starts the grandchild. let mut started_read = [0; 7]; reader.read_exact(&mut started_read)?; assert_eq!(&started_read, b"started"); // Ok, this had better not block! reader.kill() } #[test] fn test_debug_format() { let e = cmd!("foo", "bar", "baz").pipe(cmd!("bing", "bong")); assert_eq!( format!("{:?}", e), r#"Pipe(Cmd(["foo", "bar", "baz"]), Cmd(["bing", "bong"]))"#, ); } #[test] fn test_reader_try_wait() -> io::Result<()> { // Create a ReaderHandle for a cat process. Give cat 1 MB of data to echo // back to us, so that it will block on its stdout pipe until we start // reading. let bytes = vec![42; 1_000_000]; let mut cat_reader = cmd!(path_to_exe("cat")) .stdin_bytes(bytes.clone()) .reader()?; assert!(cat_reader.try_wait()?.is_none()); let mut output = Vec::new(); cat_reader.read_to_end(&mut output)?; assert_eq!(output, bytes); let output = cat_reader.try_wait()?.expect("is some"); assert!(output.status.success()); assert!(output.stdout.is_empty()); assert!(output.stderr.is_empty()); Ok(()) } #[test] fn test_pids() -> io::Result<()> { let handle = true_cmd().start()?; let pids = handle.pids(); assert_eq!(pids.len(), 1); handle.wait()?; let reader = true_cmd().reader()?; let pids = reader.pids(); assert_eq!(pids.len(), 1); std::io::copy(&mut &reader, &mut std::io::sink())?; let handle = true_cmd() .pipe(true_cmd().stdout_null().pipe(true_cmd())) .start()?; let pids = handle.pids(); assert_eq!(pids.len(), 3); handle.wait()?; let reader = true_cmd() .pipe(true_cmd().stdout_null().pipe(true_cmd())) .reader()?; let pids = reader.pids(); assert_eq!(pids.len(), 3); std::io::copy(&mut &reader, &mut std::io::sink())?; Ok(()) }
{ // Make sure both sides get killed. let sleep_cmd = cmd!(path_to_exe("sleep"), "1000000"); // Note that we don't use unchecked() here. This tests that kill suppresses // exit status errors. let handle = sleep_cmd.pipe(sleep_cmd.clone()).start().unwrap(); handle.kill().unwrap(); // But calling wait again should be an error, because of the status. handle.wait().unwrap_err(); }
identifier_body
writebacker.go
package workqueue import ( "bytes" "context" "encoding/hex" "errors" "time" "github.com/apex/log" "github.com/gocraft/work" "github.com/gomodule/redigo/redis" pkgErrors "github.com/pkg/errors" "gopkg.in/tomb.v2" "git.scc.kit.edu/sdm/lsdf-checksum/internal/lifecycle" "git.scc.kit.edu/sdm/lsdf-checksum/meda" "git.scc.kit.edu/sdm/lsdf-checksum/workqueue" ) var ( ErrFetchedInsufficientFiles = errors.New("database returned insufficient files, some queried files are missing") ErrFetchedUnexpectedFile = errors.New("database returned unexpected file, id not included in query") ErrFetchedDuplicateFile = errors.New("database returned duplicate file") ) //go:generate confions config WriteBackerConfig type WriteBackerConfig struct { Batcher batcherConfig Transactioner transactionerConfig FileSystemName string Namespace string RunID uint64 SnapshotName string Pool *redis.Pool `yaml:"-"` DB *meda.DB `yaml:"-"` Logger log.Interface `yaml:"-"` } var WriteBackerDefaultConfig = &WriteBackerConfig{ Batcher: batcherConfig{ // Complete batch after 500 items (as identified in checksum_write_back // benchmark). MaxItems: 500, // Complete batch 30 s after first item has been added. MaxWaitTime: 30 * time.Second, // Only allow 1 batch to be in-flight. BatchBufferSize: 1, }, Transactioner: transactionerConfig{ // Commit transaction once 20 write queries have been performed (as // identified in checksum_write_back benchmark). MaxTransactionSize: 20, // Commit transaction after 2 minutes regardless of the number of // queries performed. MaxTransactionLifetime: 2 * time.Minute, }, } type WriteBacker struct { Config *WriteBackerConfig tomb *tomb.Tomb batcher *batcher endOfQueueSignal chan struct{} workerPool *work.WorkerPool workerPoolStopped chan struct{} fieldLogger log.Interface } func NewWriteBacker(config *WriteBackerConfig) *WriteBacker { return &WriteBacker{ Config: config, } } func (w *WriteBacker) Start(ctx context.Context) { w.tomb, _ = tomb.WithContext(ctx) w.fieldLogger = w.Config.Logger.WithFields(log.Fields{ "run": w.Config.RunID, "snapshot": w.Config.SnapshotName, "filesystem": w.Config.FileSystemName, "namespace": w.Config.Namespace, "component": "workqueue.WriteBacker", }) w.endOfQueueSignal = make(chan struct{}) w.workerPoolStopped = make(chan struct{}) w.batcher = w.createBatcher() w.workerPool = w.createWorkerPool() w.tomb.Go(func() error { concurrency, err := w.Config.DB.ServerConcurrency() if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).Start") } w.batcher.Start(w.tomb.Context(nil)) for i := 0; i < concurrency; i++ { w.tomb.Go(w.processor) } w.workerPool.Start() w.tomb.Go(w.endOfQueueHandler) w.tomb.Go(w.batcherManager) w.tomb.Go(w.workerPoolManager) return nil }) } func (w *WriteBacker) SignalEndOfQueue() { close(w.endOfQueueSignal) } func (w *WriteBacker) SignalStop() { w.tomb.Kill(lifecycle.ErrStopSignalled) } func (w *WriteBacker) Wait() error { return w.tomb.Wait() } func (w *WriteBacker)
() <-chan struct{} { return w.tomb.Dead() } func (w *WriteBacker) Err() error { return w.tomb.Err() } func (w *WriteBacker) endOfQueueHandler() error { select { case <-w.endOfQueueSignal: w.fieldLogger.Info("Received end-of-queue signal") w.fieldLogger.Debug("Draining and stopping worker pool as end-of-queue is reached") w.workerPool.Drain() w.workerPool.Stop() close(w.workerPoolStopped) w.fieldLogger.Debug("Closing batcher as end-of-queue is reached") err := w.batcher.Close(w.tomb.Context(nil)) if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).endOfQueueHandler") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Encountered error while closing batcher") return err } return nil case <-w.tomb.Dying(): return tomb.ErrDying } } func (w *WriteBacker) workerPoolManager() error { select { case <-w.workerPoolStopped: // There is no way of receiving errors from the worker pool return nil case <-w.tomb.Dying(): w.fieldLogger.Debug("Stopping and waiting for worker pool as component is dying") w.workerPool.Stop() return tomb.ErrDying } } func (w *WriteBacker) batcherManager() error { select { case <-w.batcher.Dead(): err := w.batcher.Err() if err == lifecycle.ErrStopSignalled { return nil } else if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).batcherManager") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Batcher died") return err } return nil case <-w.tomb.Dying(): w.fieldLogger.Debug("Waiting for batcher to finish as component is dying") _ = w.batcher.Wait() return tomb.ErrDying } } func (w *WriteBacker) createBatcher() *batcher { config := &batcherConfig{} *config = w.Config.Batcher return newBatcher(config) } func (w *WriteBacker) createWorkerPool() *work.WorkerPool { workerPool := work.NewWorkerPool(writeBackerContext{}, 1, w.Config.Namespace, w.Config.Pool) workerPool.Middleware( func(wBCtx *writeBackerContext, job *work.Job, next work.NextMiddlewareFunc) error { wBCtx.WriteBacker = w return next() }, ) jobName := workqueue.WriteBackJobName(w.Config.FileSystemName, w.Config.SnapshotName) workerPool.Job(jobName, (*writeBackerContext).Process) return workerPool } func (w *WriteBacker) createTransactioner() *transactioner { config := &transactionerConfig{} *config = w.Config.Transactioner config.DB = w.Config.DB return newTransactioner(w.tomb.Context(nil), config) } func (w *WriteBacker) processor() error { ctx := w.tomb.Context(nil) transactioner := w.createTransactioner() batchChan := w.batcher.Out() dying := w.tomb.Dying() for { select { case batch, ok := <-batchChan: if !ok { transactioner.Commit() return nil } err := w.processBatch(ctx, batch, transactioner) batch.Return() if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).processor") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Encountered error while processing batch") transactioner.Close() return err } case <-dying: transactioner.Close() return tomb.ErrDying } } } func (w *WriteBacker) processBatch(ctx context.Context, batch *filesBatch, transactioner *transactioner) error { w.fieldLogger.Debug("Starting processing of batch") checksums, fileIDs := w.collectFilesInBatch(batch) // TODO pool files var files []meda.File files, err := transactioner.AppendFilesByIDs(files, ctx, fileIDs) if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: fetch files from database") } for i := range files { // Pointer to file in files, don't copy file := &files[i] checksum, ok := checksums[file.ID] if !ok { return pkgErrors.Wrapf(ErrFetchedUnexpectedFile, "(*WriteBacker).processBatch: process file with id %d", file.ID) } else if checksum == nil { return pkgErrors.Wrapf(ErrFetchedDuplicateFile, "(*WriteBacker).processBatch: process file with id %d", file.ID) } checksums[file.ID] = nil if file.Checksum != nil && file.ToBeCompared == 1 && !bytes.Equal(checksum, file.Checksum) { // info logging is handled by issueChecksumWarning err = w.issueChecksumWarning(ctx, file, checksum, transactioner) if err != nil { // error logging is handled by issueChecksumWarning (escalating) return pkgErrors.Wrapf(err, "(*WriteBacker).processBatch: process file with id %d", file.ID) } } file.Checksum = checksum file.LastRead.Uint64, file.LastRead.Valid = w.Config.RunID, true file.ToBeCompared = 0 // ToBeRead is set in a separate loop } // Check that all files in the batch have been processed for _, checksum := range checksums { if checksum != nil { return pkgErrors.Wrap(ErrFetchedInsufficientFiles, "(*WriteBacker).processBatch: check all files processed") } } // Set ToBeRead to 0 and drop files, which have already been written for i := 0; i < len(files); { if files[i].ToBeRead != 1 { files[i] = files[len(files)-1] files = files[:len(files)-1] } else { files[i].ToBeRead = 0 i++ } } err = transactioner.UpdateFilesChecksums(ctx, files, w.Config.RunID) if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: update files in database") } w.fieldLogger.Debug("Finished processing of batch") return nil } func (w *WriteBacker) collectFilesInBatch(batch *filesBatch) (map[uint64][]byte, []uint64) { checksums := make(map[uint64][]byte) // TODO use pool for fileIDs fileIDs := make([]uint64, 0, len(batch.Files)) for i := range batch.Files { file := &batch.Files[i] if checksum, ok := checksums[file.ID]; ok { w.fieldLogger.WithFields(log.Fields{ "action": "skipping", "file_id": file.ID, "first_checksum": checksum, "subsequent_checksum": file.Checksum, }).Warn("Received same file multiple times in job batch, dropping all but first encounter") continue } checksums[file.ID] = file.Checksum fileIDs = append(fileIDs, file.ID) } return checksums, fileIDs } func (w *WriteBacker) issueChecksumWarning(ctx context.Context, file *meda.File, checksum []byte, transactioner *transactioner) error { fieldLogger := w.fieldLogger.WithFields(log.Fields{ "file_id": file.ID, "file_path": file.Path, "file_modification_time": file.ModificationTime, "file_size": file.FileSize, "expected_checksum": hex.EncodeToString(file.Checksum), "actual_checksum": hex.EncodeToString(checksum), "file_last_read": file.LastRead.Uint64, }) fieldLogger.Info("Discovered checksum mismatch, writing checksum warning") checksumWarning := meda.ChecksumWarning{ FileID: file.ID, Path: file.Path, ModificationTime: file.ModificationTime, FileSize: file.FileSize, ExpectedChecksum: file.Checksum, ActualChecksum: checksum, Discovered: w.Config.RunID, LastRead: file.LastRead.Uint64, Created: meda.Time(time.Now()), } err := transactioner.InsertChecksumWarning(ctx, &checksumWarning) if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).issueChecksumWarning") fieldLogger.WithError(err).WithFields(log.Fields{ "action": "escalating", }).Error("Encountered error while issuing checksum warning") } return nil } type writeBackerContext struct { WriteBacker *WriteBacker } func (w *writeBackerContext) Process(job *work.Job) error { ctx := w.WriteBacker.tomb.Context(nil) // TODO pool writeBackPack := workqueue.WriteBackPack{} err := writeBackPack.FromJobArgs(job.Args) if err != nil { err = pkgErrors.Wrap(err, "(*writeBackerContext).Process: unmarshal WriteBackPack from job") w.WriteBacker.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", "args": job.Args, "job_name": job.Name, }).Error("Encountered error while unmarshaling WriteBackPack from job") w.WriteBacker.tomb.Kill(err) // return nil as to not re-queue the job return nil } w.WriteBacker.fieldLogger.WithFields(log.Fields{ "write_back_pack": writeBackPack, }).Debug("Received and unmarshaled WriteBackPack") for i := range writeBackPack.Files { // Pointer to file in files, don't copy file := &writeBackPack.Files[i] err := w.WriteBacker.batcher.Add(ctx, file) if err != nil { err = pkgErrors.Wrap(err, "(*writeBackerContext).Process: add received file to batcher") w.WriteBacker.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", "file_id": file.ID, "file_checksum": file.Checksum, }).Error("Encountered error while adding received file to batcher") w.WriteBacker.tomb.Kill(err) // return nil as to not re-queue the job return nil } } return nil }
Dead
identifier_name
writebacker.go
package workqueue import ( "bytes" "context" "encoding/hex" "errors" "time" "github.com/apex/log" "github.com/gocraft/work" "github.com/gomodule/redigo/redis" pkgErrors "github.com/pkg/errors" "gopkg.in/tomb.v2" "git.scc.kit.edu/sdm/lsdf-checksum/internal/lifecycle" "git.scc.kit.edu/sdm/lsdf-checksum/meda" "git.scc.kit.edu/sdm/lsdf-checksum/workqueue" ) var ( ErrFetchedInsufficientFiles = errors.New("database returned insufficient files, some queried files are missing") ErrFetchedUnexpectedFile = errors.New("database returned unexpected file, id not included in query") ErrFetchedDuplicateFile = errors.New("database returned duplicate file") ) //go:generate confions config WriteBackerConfig type WriteBackerConfig struct { Batcher batcherConfig Transactioner transactionerConfig FileSystemName string Namespace string RunID uint64 SnapshotName string Pool *redis.Pool `yaml:"-"` DB *meda.DB `yaml:"-"` Logger log.Interface `yaml:"-"` } var WriteBackerDefaultConfig = &WriteBackerConfig{ Batcher: batcherConfig{ // Complete batch after 500 items (as identified in checksum_write_back // benchmark). MaxItems: 500, // Complete batch 30 s after first item has been added. MaxWaitTime: 30 * time.Second, // Only allow 1 batch to be in-flight. BatchBufferSize: 1, }, Transactioner: transactionerConfig{ // Commit transaction once 20 write queries have been performed (as // identified in checksum_write_back benchmark). MaxTransactionSize: 20, // Commit transaction after 2 minutes regardless of the number of // queries performed. MaxTransactionLifetime: 2 * time.Minute, }, } type WriteBacker struct { Config *WriteBackerConfig tomb *tomb.Tomb batcher *batcher endOfQueueSignal chan struct{} workerPool *work.WorkerPool workerPoolStopped chan struct{} fieldLogger log.Interface } func NewWriteBacker(config *WriteBackerConfig) *WriteBacker { return &WriteBacker{ Config: config, } } func (w *WriteBacker) Start(ctx context.Context) { w.tomb, _ = tomb.WithContext(ctx) w.fieldLogger = w.Config.Logger.WithFields(log.Fields{ "run": w.Config.RunID, "snapshot": w.Config.SnapshotName, "filesystem": w.Config.FileSystemName, "namespace": w.Config.Namespace, "component": "workqueue.WriteBacker", }) w.endOfQueueSignal = make(chan struct{}) w.workerPoolStopped = make(chan struct{}) w.batcher = w.createBatcher() w.workerPool = w.createWorkerPool() w.tomb.Go(func() error { concurrency, err := w.Config.DB.ServerConcurrency() if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).Start") } w.batcher.Start(w.tomb.Context(nil)) for i := 0; i < concurrency; i++ { w.tomb.Go(w.processor) } w.workerPool.Start() w.tomb.Go(w.endOfQueueHandler) w.tomb.Go(w.batcherManager) w.tomb.Go(w.workerPoolManager) return nil }) } func (w *WriteBacker) SignalEndOfQueue() { close(w.endOfQueueSignal) } func (w *WriteBacker) SignalStop() { w.tomb.Kill(lifecycle.ErrStopSignalled) } func (w *WriteBacker) Wait() error { return w.tomb.Wait() } func (w *WriteBacker) Dead() <-chan struct{} { return w.tomb.Dead() } func (w *WriteBacker) Err() error { return w.tomb.Err() } func (w *WriteBacker) endOfQueueHandler() error { select { case <-w.endOfQueueSignal: w.fieldLogger.Info("Received end-of-queue signal") w.fieldLogger.Debug("Draining and stopping worker pool as end-of-queue is reached") w.workerPool.Drain() w.workerPool.Stop() close(w.workerPoolStopped) w.fieldLogger.Debug("Closing batcher as end-of-queue is reached") err := w.batcher.Close(w.tomb.Context(nil)) if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).endOfQueueHandler") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Encountered error while closing batcher") return err } return nil case <-w.tomb.Dying(): return tomb.ErrDying } } func (w *WriteBacker) workerPoolManager() error
func (w *WriteBacker) batcherManager() error { select { case <-w.batcher.Dead(): err := w.batcher.Err() if err == lifecycle.ErrStopSignalled { return nil } else if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).batcherManager") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Batcher died") return err } return nil case <-w.tomb.Dying(): w.fieldLogger.Debug("Waiting for batcher to finish as component is dying") _ = w.batcher.Wait() return tomb.ErrDying } } func (w *WriteBacker) createBatcher() *batcher { config := &batcherConfig{} *config = w.Config.Batcher return newBatcher(config) } func (w *WriteBacker) createWorkerPool() *work.WorkerPool { workerPool := work.NewWorkerPool(writeBackerContext{}, 1, w.Config.Namespace, w.Config.Pool) workerPool.Middleware( func(wBCtx *writeBackerContext, job *work.Job, next work.NextMiddlewareFunc) error { wBCtx.WriteBacker = w return next() }, ) jobName := workqueue.WriteBackJobName(w.Config.FileSystemName, w.Config.SnapshotName) workerPool.Job(jobName, (*writeBackerContext).Process) return workerPool } func (w *WriteBacker) createTransactioner() *transactioner { config := &transactionerConfig{} *config = w.Config.Transactioner config.DB = w.Config.DB return newTransactioner(w.tomb.Context(nil), config) } func (w *WriteBacker) processor() error { ctx := w.tomb.Context(nil) transactioner := w.createTransactioner() batchChan := w.batcher.Out() dying := w.tomb.Dying() for { select { case batch, ok := <-batchChan: if !ok { transactioner.Commit() return nil } err := w.processBatch(ctx, batch, transactioner) batch.Return() if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).processor") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Encountered error while processing batch") transactioner.Close() return err } case <-dying: transactioner.Close() return tomb.ErrDying } } } func (w *WriteBacker) processBatch(ctx context.Context, batch *filesBatch, transactioner *transactioner) error { w.fieldLogger.Debug("Starting processing of batch") checksums, fileIDs := w.collectFilesInBatch(batch) // TODO pool files var files []meda.File files, err := transactioner.AppendFilesByIDs(files, ctx, fileIDs) if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: fetch files from database") } for i := range files { // Pointer to file in files, don't copy file := &files[i] checksum, ok := checksums[file.ID] if !ok { return pkgErrors.Wrapf(ErrFetchedUnexpectedFile, "(*WriteBacker).processBatch: process file with id %d", file.ID) } else if checksum == nil { return pkgErrors.Wrapf(ErrFetchedDuplicateFile, "(*WriteBacker).processBatch: process file with id %d", file.ID) } checksums[file.ID] = nil if file.Checksum != nil && file.ToBeCompared == 1 && !bytes.Equal(checksum, file.Checksum) { // info logging is handled by issueChecksumWarning err = w.issueChecksumWarning(ctx, file, checksum, transactioner) if err != nil { // error logging is handled by issueChecksumWarning (escalating) return pkgErrors.Wrapf(err, "(*WriteBacker).processBatch: process file with id %d", file.ID) } } file.Checksum = checksum file.LastRead.Uint64, file.LastRead.Valid = w.Config.RunID, true file.ToBeCompared = 0 // ToBeRead is set in a separate loop } // Check that all files in the batch have been processed for _, checksum := range checksums { if checksum != nil { return pkgErrors.Wrap(ErrFetchedInsufficientFiles, "(*WriteBacker).processBatch: check all files processed") } } // Set ToBeRead to 0 and drop files, which have already been written for i := 0; i < len(files); { if files[i].ToBeRead != 1 { files[i] = files[len(files)-1] files = files[:len(files)-1] } else { files[i].ToBeRead = 0 i++ } } err = transactioner.UpdateFilesChecksums(ctx, files, w.Config.RunID) if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: update files in database") } w.fieldLogger.Debug("Finished processing of batch") return nil } func (w *WriteBacker) collectFilesInBatch(batch *filesBatch) (map[uint64][]byte, []uint64) { checksums := make(map[uint64][]byte) // TODO use pool for fileIDs fileIDs := make([]uint64, 0, len(batch.Files)) for i := range batch.Files { file := &batch.Files[i] if checksum, ok := checksums[file.ID]; ok { w.fieldLogger.WithFields(log.Fields{ "action": "skipping", "file_id": file.ID, "first_checksum": checksum, "subsequent_checksum": file.Checksum, }).Warn("Received same file multiple times in job batch, dropping all but first encounter") continue } checksums[file.ID] = file.Checksum fileIDs = append(fileIDs, file.ID) } return checksums, fileIDs } func (w *WriteBacker) issueChecksumWarning(ctx context.Context, file *meda.File, checksum []byte, transactioner *transactioner) error { fieldLogger := w.fieldLogger.WithFields(log.Fields{ "file_id": file.ID, "file_path": file.Path, "file_modification_time": file.ModificationTime, "file_size": file.FileSize, "expected_checksum": hex.EncodeToString(file.Checksum), "actual_checksum": hex.EncodeToString(checksum), "file_last_read": file.LastRead.Uint64, }) fieldLogger.Info("Discovered checksum mismatch, writing checksum warning") checksumWarning := meda.ChecksumWarning{ FileID: file.ID, Path: file.Path, ModificationTime: file.ModificationTime, FileSize: file.FileSize, ExpectedChecksum: file.Checksum, ActualChecksum: checksum, Discovered: w.Config.RunID, LastRead: file.LastRead.Uint64, Created: meda.Time(time.Now()), } err := transactioner.InsertChecksumWarning(ctx, &checksumWarning) if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).issueChecksumWarning") fieldLogger.WithError(err).WithFields(log.Fields{ "action": "escalating", }).Error("Encountered error while issuing checksum warning") } return nil } type writeBackerContext struct { WriteBacker *WriteBacker } func (w *writeBackerContext) Process(job *work.Job) error { ctx := w.WriteBacker.tomb.Context(nil) // TODO pool writeBackPack := workqueue.WriteBackPack{} err := writeBackPack.FromJobArgs(job.Args) if err != nil { err = pkgErrors.Wrap(err, "(*writeBackerContext).Process: unmarshal WriteBackPack from job") w.WriteBacker.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", "args": job.Args, "job_name": job.Name, }).Error("Encountered error while unmarshaling WriteBackPack from job") w.WriteBacker.tomb.Kill(err) // return nil as to not re-queue the job return nil } w.WriteBacker.fieldLogger.WithFields(log.Fields{ "write_back_pack": writeBackPack, }).Debug("Received and unmarshaled WriteBackPack") for i := range writeBackPack.Files { // Pointer to file in files, don't copy file := &writeBackPack.Files[i] err := w.WriteBacker.batcher.Add(ctx, file) if err != nil { err = pkgErrors.Wrap(err, "(*writeBackerContext).Process: add received file to batcher") w.WriteBacker.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", "file_id": file.ID, "file_checksum": file.Checksum, }).Error("Encountered error while adding received file to batcher") w.WriteBacker.tomb.Kill(err) // return nil as to not re-queue the job return nil } } return nil }
{ select { case <-w.workerPoolStopped: // There is no way of receiving errors from the worker pool return nil case <-w.tomb.Dying(): w.fieldLogger.Debug("Stopping and waiting for worker pool as component is dying") w.workerPool.Stop() return tomb.ErrDying } }
identifier_body
writebacker.go
package workqueue import ( "bytes" "context" "encoding/hex" "errors" "time" "github.com/apex/log" "github.com/gocraft/work" "github.com/gomodule/redigo/redis" pkgErrors "github.com/pkg/errors" "gopkg.in/tomb.v2" "git.scc.kit.edu/sdm/lsdf-checksum/internal/lifecycle" "git.scc.kit.edu/sdm/lsdf-checksum/meda" "git.scc.kit.edu/sdm/lsdf-checksum/workqueue" ) var ( ErrFetchedInsufficientFiles = errors.New("database returned insufficient files, some queried files are missing") ErrFetchedUnexpectedFile = errors.New("database returned unexpected file, id not included in query") ErrFetchedDuplicateFile = errors.New("database returned duplicate file") ) //go:generate confions config WriteBackerConfig type WriteBackerConfig struct { Batcher batcherConfig Transactioner transactionerConfig FileSystemName string Namespace string RunID uint64 SnapshotName string Pool *redis.Pool `yaml:"-"` DB *meda.DB `yaml:"-"` Logger log.Interface `yaml:"-"` } var WriteBackerDefaultConfig = &WriteBackerConfig{ Batcher: batcherConfig{ // Complete batch after 500 items (as identified in checksum_write_back // benchmark). MaxItems: 500, // Complete batch 30 s after first item has been added. MaxWaitTime: 30 * time.Second, // Only allow 1 batch to be in-flight. BatchBufferSize: 1, }, Transactioner: transactionerConfig{ // Commit transaction once 20 write queries have been performed (as // identified in checksum_write_back benchmark). MaxTransactionSize: 20, // Commit transaction after 2 minutes regardless of the number of // queries performed. MaxTransactionLifetime: 2 * time.Minute, }, } type WriteBacker struct { Config *WriteBackerConfig tomb *tomb.Tomb batcher *batcher endOfQueueSignal chan struct{} workerPool *work.WorkerPool workerPoolStopped chan struct{} fieldLogger log.Interface } func NewWriteBacker(config *WriteBackerConfig) *WriteBacker { return &WriteBacker{ Config: config, } }
w.tomb, _ = tomb.WithContext(ctx) w.fieldLogger = w.Config.Logger.WithFields(log.Fields{ "run": w.Config.RunID, "snapshot": w.Config.SnapshotName, "filesystem": w.Config.FileSystemName, "namespace": w.Config.Namespace, "component": "workqueue.WriteBacker", }) w.endOfQueueSignal = make(chan struct{}) w.workerPoolStopped = make(chan struct{}) w.batcher = w.createBatcher() w.workerPool = w.createWorkerPool() w.tomb.Go(func() error { concurrency, err := w.Config.DB.ServerConcurrency() if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).Start") } w.batcher.Start(w.tomb.Context(nil)) for i := 0; i < concurrency; i++ { w.tomb.Go(w.processor) } w.workerPool.Start() w.tomb.Go(w.endOfQueueHandler) w.tomb.Go(w.batcherManager) w.tomb.Go(w.workerPoolManager) return nil }) } func (w *WriteBacker) SignalEndOfQueue() { close(w.endOfQueueSignal) } func (w *WriteBacker) SignalStop() { w.tomb.Kill(lifecycle.ErrStopSignalled) } func (w *WriteBacker) Wait() error { return w.tomb.Wait() } func (w *WriteBacker) Dead() <-chan struct{} { return w.tomb.Dead() } func (w *WriteBacker) Err() error { return w.tomb.Err() } func (w *WriteBacker) endOfQueueHandler() error { select { case <-w.endOfQueueSignal: w.fieldLogger.Info("Received end-of-queue signal") w.fieldLogger.Debug("Draining and stopping worker pool as end-of-queue is reached") w.workerPool.Drain() w.workerPool.Stop() close(w.workerPoolStopped) w.fieldLogger.Debug("Closing batcher as end-of-queue is reached") err := w.batcher.Close(w.tomb.Context(nil)) if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).endOfQueueHandler") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Encountered error while closing batcher") return err } return nil case <-w.tomb.Dying(): return tomb.ErrDying } } func (w *WriteBacker) workerPoolManager() error { select { case <-w.workerPoolStopped: // There is no way of receiving errors from the worker pool return nil case <-w.tomb.Dying(): w.fieldLogger.Debug("Stopping and waiting for worker pool as component is dying") w.workerPool.Stop() return tomb.ErrDying } } func (w *WriteBacker) batcherManager() error { select { case <-w.batcher.Dead(): err := w.batcher.Err() if err == lifecycle.ErrStopSignalled { return nil } else if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).batcherManager") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Batcher died") return err } return nil case <-w.tomb.Dying(): w.fieldLogger.Debug("Waiting for batcher to finish as component is dying") _ = w.batcher.Wait() return tomb.ErrDying } } func (w *WriteBacker) createBatcher() *batcher { config := &batcherConfig{} *config = w.Config.Batcher return newBatcher(config) } func (w *WriteBacker) createWorkerPool() *work.WorkerPool { workerPool := work.NewWorkerPool(writeBackerContext{}, 1, w.Config.Namespace, w.Config.Pool) workerPool.Middleware( func(wBCtx *writeBackerContext, job *work.Job, next work.NextMiddlewareFunc) error { wBCtx.WriteBacker = w return next() }, ) jobName := workqueue.WriteBackJobName(w.Config.FileSystemName, w.Config.SnapshotName) workerPool.Job(jobName, (*writeBackerContext).Process) return workerPool } func (w *WriteBacker) createTransactioner() *transactioner { config := &transactionerConfig{} *config = w.Config.Transactioner config.DB = w.Config.DB return newTransactioner(w.tomb.Context(nil), config) } func (w *WriteBacker) processor() error { ctx := w.tomb.Context(nil) transactioner := w.createTransactioner() batchChan := w.batcher.Out() dying := w.tomb.Dying() for { select { case batch, ok := <-batchChan: if !ok { transactioner.Commit() return nil } err := w.processBatch(ctx, batch, transactioner) batch.Return() if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).processor") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Encountered error while processing batch") transactioner.Close() return err } case <-dying: transactioner.Close() return tomb.ErrDying } } } func (w *WriteBacker) processBatch(ctx context.Context, batch *filesBatch, transactioner *transactioner) error { w.fieldLogger.Debug("Starting processing of batch") checksums, fileIDs := w.collectFilesInBatch(batch) // TODO pool files var files []meda.File files, err := transactioner.AppendFilesByIDs(files, ctx, fileIDs) if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: fetch files from database") } for i := range files { // Pointer to file in files, don't copy file := &files[i] checksum, ok := checksums[file.ID] if !ok { return pkgErrors.Wrapf(ErrFetchedUnexpectedFile, "(*WriteBacker).processBatch: process file with id %d", file.ID) } else if checksum == nil { return pkgErrors.Wrapf(ErrFetchedDuplicateFile, "(*WriteBacker).processBatch: process file with id %d", file.ID) } checksums[file.ID] = nil if file.Checksum != nil && file.ToBeCompared == 1 && !bytes.Equal(checksum, file.Checksum) { // info logging is handled by issueChecksumWarning err = w.issueChecksumWarning(ctx, file, checksum, transactioner) if err != nil { // error logging is handled by issueChecksumWarning (escalating) return pkgErrors.Wrapf(err, "(*WriteBacker).processBatch: process file with id %d", file.ID) } } file.Checksum = checksum file.LastRead.Uint64, file.LastRead.Valid = w.Config.RunID, true file.ToBeCompared = 0 // ToBeRead is set in a separate loop } // Check that all files in the batch have been processed for _, checksum := range checksums { if checksum != nil { return pkgErrors.Wrap(ErrFetchedInsufficientFiles, "(*WriteBacker).processBatch: check all files processed") } } // Set ToBeRead to 0 and drop files, which have already been written for i := 0; i < len(files); { if files[i].ToBeRead != 1 { files[i] = files[len(files)-1] files = files[:len(files)-1] } else { files[i].ToBeRead = 0 i++ } } err = transactioner.UpdateFilesChecksums(ctx, files, w.Config.RunID) if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: update files in database") } w.fieldLogger.Debug("Finished processing of batch") return nil } func (w *WriteBacker) collectFilesInBatch(batch *filesBatch) (map[uint64][]byte, []uint64) { checksums := make(map[uint64][]byte) // TODO use pool for fileIDs fileIDs := make([]uint64, 0, len(batch.Files)) for i := range batch.Files { file := &batch.Files[i] if checksum, ok := checksums[file.ID]; ok { w.fieldLogger.WithFields(log.Fields{ "action": "skipping", "file_id": file.ID, "first_checksum": checksum, "subsequent_checksum": file.Checksum, }).Warn("Received same file multiple times in job batch, dropping all but first encounter") continue } checksums[file.ID] = file.Checksum fileIDs = append(fileIDs, file.ID) } return checksums, fileIDs } func (w *WriteBacker) issueChecksumWarning(ctx context.Context, file *meda.File, checksum []byte, transactioner *transactioner) error { fieldLogger := w.fieldLogger.WithFields(log.Fields{ "file_id": file.ID, "file_path": file.Path, "file_modification_time": file.ModificationTime, "file_size": file.FileSize, "expected_checksum": hex.EncodeToString(file.Checksum), "actual_checksum": hex.EncodeToString(checksum), "file_last_read": file.LastRead.Uint64, }) fieldLogger.Info("Discovered checksum mismatch, writing checksum warning") checksumWarning := meda.ChecksumWarning{ FileID: file.ID, Path: file.Path, ModificationTime: file.ModificationTime, FileSize: file.FileSize, ExpectedChecksum: file.Checksum, ActualChecksum: checksum, Discovered: w.Config.RunID, LastRead: file.LastRead.Uint64, Created: meda.Time(time.Now()), } err := transactioner.InsertChecksumWarning(ctx, &checksumWarning) if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).issueChecksumWarning") fieldLogger.WithError(err).WithFields(log.Fields{ "action": "escalating", }).Error("Encountered error while issuing checksum warning") } return nil } type writeBackerContext struct { WriteBacker *WriteBacker } func (w *writeBackerContext) Process(job *work.Job) error { ctx := w.WriteBacker.tomb.Context(nil) // TODO pool writeBackPack := workqueue.WriteBackPack{} err := writeBackPack.FromJobArgs(job.Args) if err != nil { err = pkgErrors.Wrap(err, "(*writeBackerContext).Process: unmarshal WriteBackPack from job") w.WriteBacker.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", "args": job.Args, "job_name": job.Name, }).Error("Encountered error while unmarshaling WriteBackPack from job") w.WriteBacker.tomb.Kill(err) // return nil as to not re-queue the job return nil } w.WriteBacker.fieldLogger.WithFields(log.Fields{ "write_back_pack": writeBackPack, }).Debug("Received and unmarshaled WriteBackPack") for i := range writeBackPack.Files { // Pointer to file in files, don't copy file := &writeBackPack.Files[i] err := w.WriteBacker.batcher.Add(ctx, file) if err != nil { err = pkgErrors.Wrap(err, "(*writeBackerContext).Process: add received file to batcher") w.WriteBacker.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", "file_id": file.ID, "file_checksum": file.Checksum, }).Error("Encountered error while adding received file to batcher") w.WriteBacker.tomb.Kill(err) // return nil as to not re-queue the job return nil } } return nil }
func (w *WriteBacker) Start(ctx context.Context) {
random_line_split
writebacker.go
package workqueue import ( "bytes" "context" "encoding/hex" "errors" "time" "github.com/apex/log" "github.com/gocraft/work" "github.com/gomodule/redigo/redis" pkgErrors "github.com/pkg/errors" "gopkg.in/tomb.v2" "git.scc.kit.edu/sdm/lsdf-checksum/internal/lifecycle" "git.scc.kit.edu/sdm/lsdf-checksum/meda" "git.scc.kit.edu/sdm/lsdf-checksum/workqueue" ) var ( ErrFetchedInsufficientFiles = errors.New("database returned insufficient files, some queried files are missing") ErrFetchedUnexpectedFile = errors.New("database returned unexpected file, id not included in query") ErrFetchedDuplicateFile = errors.New("database returned duplicate file") ) //go:generate confions config WriteBackerConfig type WriteBackerConfig struct { Batcher batcherConfig Transactioner transactionerConfig FileSystemName string Namespace string RunID uint64 SnapshotName string Pool *redis.Pool `yaml:"-"` DB *meda.DB `yaml:"-"` Logger log.Interface `yaml:"-"` } var WriteBackerDefaultConfig = &WriteBackerConfig{ Batcher: batcherConfig{ // Complete batch after 500 items (as identified in checksum_write_back // benchmark). MaxItems: 500, // Complete batch 30 s after first item has been added. MaxWaitTime: 30 * time.Second, // Only allow 1 batch to be in-flight. BatchBufferSize: 1, }, Transactioner: transactionerConfig{ // Commit transaction once 20 write queries have been performed (as // identified in checksum_write_back benchmark). MaxTransactionSize: 20, // Commit transaction after 2 minutes regardless of the number of // queries performed. MaxTransactionLifetime: 2 * time.Minute, }, } type WriteBacker struct { Config *WriteBackerConfig tomb *tomb.Tomb batcher *batcher endOfQueueSignal chan struct{} workerPool *work.WorkerPool workerPoolStopped chan struct{} fieldLogger log.Interface } func NewWriteBacker(config *WriteBackerConfig) *WriteBacker { return &WriteBacker{ Config: config, } } func (w *WriteBacker) Start(ctx context.Context) { w.tomb, _ = tomb.WithContext(ctx) w.fieldLogger = w.Config.Logger.WithFields(log.Fields{ "run": w.Config.RunID, "snapshot": w.Config.SnapshotName, "filesystem": w.Config.FileSystemName, "namespace": w.Config.Namespace, "component": "workqueue.WriteBacker", }) w.endOfQueueSignal = make(chan struct{}) w.workerPoolStopped = make(chan struct{}) w.batcher = w.createBatcher() w.workerPool = w.createWorkerPool() w.tomb.Go(func() error { concurrency, err := w.Config.DB.ServerConcurrency() if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).Start") } w.batcher.Start(w.tomb.Context(nil)) for i := 0; i < concurrency; i++ { w.tomb.Go(w.processor) } w.workerPool.Start() w.tomb.Go(w.endOfQueueHandler) w.tomb.Go(w.batcherManager) w.tomb.Go(w.workerPoolManager) return nil }) } func (w *WriteBacker) SignalEndOfQueue() { close(w.endOfQueueSignal) } func (w *WriteBacker) SignalStop() { w.tomb.Kill(lifecycle.ErrStopSignalled) } func (w *WriteBacker) Wait() error { return w.tomb.Wait() } func (w *WriteBacker) Dead() <-chan struct{} { return w.tomb.Dead() } func (w *WriteBacker) Err() error { return w.tomb.Err() } func (w *WriteBacker) endOfQueueHandler() error { select { case <-w.endOfQueueSignal: w.fieldLogger.Info("Received end-of-queue signal") w.fieldLogger.Debug("Draining and stopping worker pool as end-of-queue is reached") w.workerPool.Drain() w.workerPool.Stop() close(w.workerPoolStopped) w.fieldLogger.Debug("Closing batcher as end-of-queue is reached") err := w.batcher.Close(w.tomb.Context(nil)) if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).endOfQueueHandler") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Encountered error while closing batcher") return err } return nil case <-w.tomb.Dying(): return tomb.ErrDying } } func (w *WriteBacker) workerPoolManager() error { select { case <-w.workerPoolStopped: // There is no way of receiving errors from the worker pool return nil case <-w.tomb.Dying(): w.fieldLogger.Debug("Stopping and waiting for worker pool as component is dying") w.workerPool.Stop() return tomb.ErrDying } } func (w *WriteBacker) batcherManager() error { select { case <-w.batcher.Dead(): err := w.batcher.Err() if err == lifecycle.ErrStopSignalled { return nil } else if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).batcherManager") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Batcher died") return err } return nil case <-w.tomb.Dying(): w.fieldLogger.Debug("Waiting for batcher to finish as component is dying") _ = w.batcher.Wait() return tomb.ErrDying } } func (w *WriteBacker) createBatcher() *batcher { config := &batcherConfig{} *config = w.Config.Batcher return newBatcher(config) } func (w *WriteBacker) createWorkerPool() *work.WorkerPool { workerPool := work.NewWorkerPool(writeBackerContext{}, 1, w.Config.Namespace, w.Config.Pool) workerPool.Middleware( func(wBCtx *writeBackerContext, job *work.Job, next work.NextMiddlewareFunc) error { wBCtx.WriteBacker = w return next() }, ) jobName := workqueue.WriteBackJobName(w.Config.FileSystemName, w.Config.SnapshotName) workerPool.Job(jobName, (*writeBackerContext).Process) return workerPool } func (w *WriteBacker) createTransactioner() *transactioner { config := &transactionerConfig{} *config = w.Config.Transactioner config.DB = w.Config.DB return newTransactioner(w.tomb.Context(nil), config) } func (w *WriteBacker) processor() error { ctx := w.tomb.Context(nil) transactioner := w.createTransactioner() batchChan := w.batcher.Out() dying := w.tomb.Dying() for { select { case batch, ok := <-batchChan: if !ok { transactioner.Commit() return nil } err := w.processBatch(ctx, batch, transactioner) batch.Return() if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).processor") w.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", }).Error("Encountered error while processing batch") transactioner.Close() return err } case <-dying: transactioner.Close() return tomb.ErrDying } } } func (w *WriteBacker) processBatch(ctx context.Context, batch *filesBatch, transactioner *transactioner) error { w.fieldLogger.Debug("Starting processing of batch") checksums, fileIDs := w.collectFilesInBatch(batch) // TODO pool files var files []meda.File files, err := transactioner.AppendFilesByIDs(files, ctx, fileIDs) if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: fetch files from database") } for i := range files { // Pointer to file in files, don't copy file := &files[i] checksum, ok := checksums[file.ID] if !ok { return pkgErrors.Wrapf(ErrFetchedUnexpectedFile, "(*WriteBacker).processBatch: process file with id %d", file.ID) } else if checksum == nil { return pkgErrors.Wrapf(ErrFetchedDuplicateFile, "(*WriteBacker).processBatch: process file with id %d", file.ID) } checksums[file.ID] = nil if file.Checksum != nil && file.ToBeCompared == 1 && !bytes.Equal(checksum, file.Checksum) { // info logging is handled by issueChecksumWarning err = w.issueChecksumWarning(ctx, file, checksum, transactioner) if err != nil { // error logging is handled by issueChecksumWarning (escalating) return pkgErrors.Wrapf(err, "(*WriteBacker).processBatch: process file with id %d", file.ID) } } file.Checksum = checksum file.LastRead.Uint64, file.LastRead.Valid = w.Config.RunID, true file.ToBeCompared = 0 // ToBeRead is set in a separate loop } // Check that all files in the batch have been processed for _, checksum := range checksums { if checksum != nil { return pkgErrors.Wrap(ErrFetchedInsufficientFiles, "(*WriteBacker).processBatch: check all files processed") } } // Set ToBeRead to 0 and drop files, which have already been written for i := 0; i < len(files); { if files[i].ToBeRead != 1 { files[i] = files[len(files)-1] files = files[:len(files)-1] } else { files[i].ToBeRead = 0 i++ } } err = transactioner.UpdateFilesChecksums(ctx, files, w.Config.RunID) if err != nil { return pkgErrors.Wrap(err, "(*WriteBacker).processBatch: update files in database") } w.fieldLogger.Debug("Finished processing of batch") return nil } func (w *WriteBacker) collectFilesInBatch(batch *filesBatch) (map[uint64][]byte, []uint64) { checksums := make(map[uint64][]byte) // TODO use pool for fileIDs fileIDs := make([]uint64, 0, len(batch.Files)) for i := range batch.Files { file := &batch.Files[i] if checksum, ok := checksums[file.ID]; ok { w.fieldLogger.WithFields(log.Fields{ "action": "skipping", "file_id": file.ID, "first_checksum": checksum, "subsequent_checksum": file.Checksum, }).Warn("Received same file multiple times in job batch, dropping all but first encounter") continue } checksums[file.ID] = file.Checksum fileIDs = append(fileIDs, file.ID) } return checksums, fileIDs } func (w *WriteBacker) issueChecksumWarning(ctx context.Context, file *meda.File, checksum []byte, transactioner *transactioner) error { fieldLogger := w.fieldLogger.WithFields(log.Fields{ "file_id": file.ID, "file_path": file.Path, "file_modification_time": file.ModificationTime, "file_size": file.FileSize, "expected_checksum": hex.EncodeToString(file.Checksum), "actual_checksum": hex.EncodeToString(checksum), "file_last_read": file.LastRead.Uint64, }) fieldLogger.Info("Discovered checksum mismatch, writing checksum warning") checksumWarning := meda.ChecksumWarning{ FileID: file.ID, Path: file.Path, ModificationTime: file.ModificationTime, FileSize: file.FileSize, ExpectedChecksum: file.Checksum, ActualChecksum: checksum, Discovered: w.Config.RunID, LastRead: file.LastRead.Uint64, Created: meda.Time(time.Now()), } err := transactioner.InsertChecksumWarning(ctx, &checksumWarning) if err != nil { err = pkgErrors.Wrap(err, "(*WriteBacker).issueChecksumWarning") fieldLogger.WithError(err).WithFields(log.Fields{ "action": "escalating", }).Error("Encountered error while issuing checksum warning") } return nil } type writeBackerContext struct { WriteBacker *WriteBacker } func (w *writeBackerContext) Process(job *work.Job) error { ctx := w.WriteBacker.tomb.Context(nil) // TODO pool writeBackPack := workqueue.WriteBackPack{} err := writeBackPack.FromJobArgs(job.Args) if err != nil
w.WriteBacker.fieldLogger.WithFields(log.Fields{ "write_back_pack": writeBackPack, }).Debug("Received and unmarshaled WriteBackPack") for i := range writeBackPack.Files { // Pointer to file in files, don't copy file := &writeBackPack.Files[i] err := w.WriteBacker.batcher.Add(ctx, file) if err != nil { err = pkgErrors.Wrap(err, "(*writeBackerContext).Process: add received file to batcher") w.WriteBacker.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", "file_id": file.ID, "file_checksum": file.Checksum, }).Error("Encountered error while adding received file to batcher") w.WriteBacker.tomb.Kill(err) // return nil as to not re-queue the job return nil } } return nil }
{ err = pkgErrors.Wrap(err, "(*writeBackerContext).Process: unmarshal WriteBackPack from job") w.WriteBacker.fieldLogger.WithError(err).WithFields(log.Fields{ "action": "stopping", "args": job.Args, "job_name": job.Name, }).Error("Encountered error while unmarshaling WriteBackPack from job") w.WriteBacker.tomb.Kill(err) // return nil as to not re-queue the job return nil }
conditional_block
find_panics.py
#!/usr/bin/env python3 # Licensed under the Apache License, Version 2.0 or the MIT License. # SPDX-License-Identifier: Apache-2.0 OR MIT # Copyright Tock Contributors 2023. # Prints out the source locations of panics in a Tock kernel ELF # # This tool attempts to trace all panic locations in a Tock kernel ELF by # tracing calls to panic functions in the core library, using the debug information # embedded in the ELF file. This tool requires an ELF which includes debug information. # In its current state, cannot accurately provide the source locations # corresponding to each panic, but tries to be honest about its confidence in # each guess. In general, each guess is usually enough to locate the relevant panic. # More creative analysis might be able to increase # the accuracy with which this tool can identify source locations of panics. For now, # this tool is useful for: # # - obtaining a rough count of the number of panics in a Tock kernel binary # # - finding and removing panics in a Tock kernel binary # # - roughly determining which components of a Tock kernel binary contain the most panic # paths # # There are several assumptions built into this tool which may not always hold. For one, # the list of panic_functions are assumed to not match any strings in the actual # codebase, despite the fact they are incomplete function names and overlap is possible. # I could solve this by using full names of these functions, but I am unsure how often # the name mangling of these functions will change as the rust compiler changes so this # approach felt potentially more stable. # # Several assumptions are made about DWARF locations that do not always hold, so source # locations are not always accurate -- sometimes, the printed location just points to # the function containing a panic, rather than the actual line on which the panic # occurs. Some assumptions about which panics are in the core library and will be # caught by grepping for other calls may also not always hold. The best way to inspect # these is by manually inspecting the panics in the `within_core_panic_list`. # # This script stores panics which it cannot trace out of the core library in the # `no_info_panic_list`. If this list contains some panics, that is a sign that some # panics have not been identified. You can manually look at the addresses stored in # this list, attempt to find the core library function which leads to these instrucitons # being called, and then add those core library functions to the list of panic functions. # # The output of this script is *not* stable. # # Usage: find_panics.py ELF [--riscv] # # Requires Python 3.7+ # # Author: Hudson Ayers <[email protected]> import argparse import platform import re import subprocess import sys if platform.system() == 'Darwin': DWARFDUMP = "dwarfdump" elif platform.system() == 'Linux': DWARFDUMP = "llvm-dwarfdump" else: raise NotImplementedError("Unknown platform") # Note: In practice, GCC objdumps are better at symbol resolution than LLVM objdump ARM_OBJDUMP = "arm-none-eabi-objdump" RISCV_OBJDUMP = "riscv64-unknown-elf-objdump" # TODO: For all functions below the initial batch, it would like be preferable to # automatically populate the list with additional functions in the core library using # debug info. For now, however, I do this manually. panic_functions = [ "expect_failed", "unwrap_failed", "panic_bounds_check", "slice_index_order_fail", "slice_end_index_len_fail", "slice_start_index_len_fail", "slice17len_mismatch_fail", "str16slice_error_fail", "copy_from_slice17len_mismatch_fail", "copy_from_slice17", "panicking5panic", # below are functions I have manually traced up from the above, more "core" panics, on a riscv binary with a low inline threshold "6unwrap17", "6expect17", "11copy_within17", "core..fmt..builders..PadAdapter", # calls slice_error_fail "11copy_within17", # calls panicking::panic "write_char", # calls PadAdapter one above "write_str", # calls write_char "printable5check", # calls slice_index_order_fail "char$u20$as$u20$core..fmt..Debug", # calls printable5check "GenericRadix7fmt_int", # calls slice_start_index_len_fail # below are functions I manually traced on an arm binary, # with a somewhat higher inline threshold. "10unwrap_err17h6", "13is_whitespace17", "$u20$core..slice..index..SliceIndex$LT", "core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter", "_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE", "_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE", ] # Pre-compiled regex lookups dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""") dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""") line_info_re = re.compile(r""".*Line info.*""") abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""") dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""") dw_at_name_re = re.compile(r""".*DW_AT_name.*""") def matches_panic_funcs(name): """If the passed name contains one of the known panic_functions, return the match """ for func in panic_functions: if func in name: return func return "" def linkage_or_origin_all_parents(elf, addr, linkage=False):
def any_origin_matches_panic_func(elf, addr): """returns name if any origin for the passed addr matches one of the functions in the panic_functions array """ origins = linkage_or_origin_all_parents(elf, addr) for origin in origins: name = matches_panic_funcs(origin) if name: return name return "" def any_linkage_matches_panic_func(elf, addr): """returns True + name if any linkage for the passed addr matches one of the functions in the panic_functions array """ linkages = linkage_or_origin_all_parents(elf, addr, True) for linkage in linkages: name = matches_panic_funcs(linkage) if name: return name return "" def check_for_source_in_parent(elf, addr): """Takes in a dwarfdump lookup including parents of the source DWARF location, returns the first parent with a call file not in the core library. If found, this often indicates the source of the panic in the Tock source code. """ result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True ) dwarfdump = result.stdout matches = re.findall(dw_at_file_re, dwarfdump) def getFile(line): return line.strip().split('"')[1] source_files = list(map(getFile, matches)) for (i, f) in enumerate(source_files[::-1]): if "/core/" not in f: line_matches = re.findall(dw_at_line_re, dwarfdump) def getLine(line): return line.strip().split("(")[1].split(")")[0] source_lines = list(map(getLine, line_matches)) source_line = source_lines[::-1][i] return (f, source_line) return ("", "") def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("ELF", help="ELF file for analysis") parser.add_argument( "--verbose", "-v", action="store_true", help="Output additional DWARF info for each panic location in the binary", ) parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump") return parser.parse_args() # Find all addresses that panic, and get basic dwarf info on those addresses def find_all_panics(objdump, elf, is_riscv): panic_list = [] within_core_panic_list = [] no_info_panic_list = [] result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True) objdump_out = result.stdout for function in panic_functions: function_re = re.compile(".*:.*#.*" + function + ".*") if not is_riscv: # Arm-none-eabi-objdump uses ';' for comments instead of '#' function_re = re.compile(".*:.*<.*" + function + ".*") # TODO: arm elfs include loads of offsets from symbols in such a way that these lines # are matched by this regex. In general, these loads occur within the instruction stream # associated with the symbol at hand, and will usually be excluded by logic later in # this function. This leads to `within_core_panic_list` and `no_info_panic_list` # containing more "panics" than when analyzing a risc-v binary. We could fix this # by matching *only* on functions with instructions that actually jump to a new symbol, # but this would require a list of such instructions for each architecture. However # as written it actually lets us identify panics which are jumped to via addresses # stored in registers, which may actually catch additional valid panics. matches = re.findall(function_re, objdump_out) def getAddr(line): return line.strip().split(":")[0] addrs = list(map(getAddr, matches)) for addr in addrs: result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True ) dwarfdump = result.stdout dw_at_file = re.search(dw_at_file_re, dwarfdump) dw_at_line = re.search(dw_at_line_re, dwarfdump) line_info = re.search(line_info_re, dwarfdump) abstract_origin = re.search(abstract_origin_re, dwarfdump) linkage_name = re.search(dw_at_linkage_name_re, dwarfdump) file_string = "" line_string = "" line_info_string = "" abstract_origin_string = "" linkage_name_string = "" if dw_at_file: file_string = dw_at_file.group(0).strip() line_string = dw_at_line.group(0).strip() panicinfo = {} panicinfo["addr"] = addr panicinfo["function"] = function if line_info: line_info_string = line_info.group(0).strip() panicinfo["line_info"] = line_info_string if abstract_origin: abstract_origin_string = abstract_origin.group(0).strip() if linkage_name: linkage_name_string = linkage_name.group(0).strip() if "DW_AT_call_file" in file_string and "DW_AT_decl_file" in file_string: raise RuntimeError("I misunderstand DWARF") if "DW_AT_call_file" in file_string or "DW_AT_decl_file" in file_string: filename = file_string.split('"')[1] line_num = line_string.split("(")[1].split(")")[0] if "DW_AT_call_file" in file_string: panicinfo["call_file"] = filename panicinfo["call_line"] = line_num if "DW_AT_decl_file" in file_string: panicinfo["decl_file"] = filename panicinfo["decl_line"] = line_num if not "/core/" in filename: if not "closure" in abstract_origin_string: panicinfo["best_guess_source"] = "call/decl" else: panicinfo["best_guess_source"] = "call-closure-line-info" panic_list.append(panicinfo) continue else: # 'core' in filename (parent_file, parent_line) = check_for_source_in_parent(elf, addr) if parent_file: panicinfo["parent_call_file"] = parent_file panicinfo["parent_call_line"] = parent_line panicinfo["best_guess_source"] = "parent" panic_list.append(panicinfo) continue elif not abstract_origin and not linkage_name: no_info_panic_list.append(panicinfo) continue elif abstract_origin: if "core" in abstract_origin_string: name = matches_panic_funcs(abstract_origin_string) if name: within_core_panic_list.append(panicinfo) continue else: name2 = any_origin_matches_panic_func(elf, addr) name3 = any_linkage_matches_panic_func(elf, addr) if name2: within_core_panic_list.append(panicinfo) continue elif name3: within_core_panic_list.append(panicinfo) continue else: no_info_panic_list.append(panicinfo) continue elif "closure" in abstract_origin_string: # not in core, in closure, line info is probably sufficient panicinfo["best_guess_source"] = "lineinfo" panic_list.append(panicinfo) continue else: # i have not seen this happen -- core in file, not closure, origin not core raise RuntimeError("Unhandled") if linkage_name: name = matches_panic_funcs(linkage_name_string) if name: within_core_panic_list.append(panicinfo) continue else: no_info_panic_list.append(panicinfo) print( "Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}".format( linkage_name_string, addr ) ) continue no_info_panic_list.append(panic_info) print("did not find source for panic: {}".format(addr)) continue elif abstract_origin: origin = abstract_origin_string.split('"')[1] panicinfo["abstract_origin"] = origin if "core" in origin: if matches_panic_funcs(origin): within_core_panic_list.append(panicinfo) continue no_info_panic_list.append(panicinfo) print( "Probably could add this origin or one of its parents to the panic function list: {}".format( abstract_origin_string ) ) continue else: panicinfo["best_guess_source"] = "abstract_origin + line" panic_list.append(panicinfo) continue else: # This gets hit for OUTLINED_FUNCTION_XX a bunch on ARM try: dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[ -1 ].strip() # see multiple matches for this string sometimes function_name = dw_at_name_string.split('"')[1] if "OUTLINED_FUNCTION_" in function_name: # This is a common pattern where panicing paths are repeated in many # places throughout the binary, and LLVMs optimizer outlines the repeated code. # Let's add these to the list of panicing functions, dynamically so this is resilient to # changes in the binary. if function_name not in panic_functions: # don't double insert panic_functions.append( function_name + ">" ) # so FUNCTION_22 does not catch FUNCTION_222 within_core_panic_list.append(panicinfo) continue no_info_panic_list.append(panicinfo) continue except: # There seem to be a places where lookup fails completely # Not easy to recover, log these and continue on. no_info_panic_list.append(panicinfo) continue raise RuntimeError("BUG: Should not reach here") return (panic_list, within_core_panic_list, no_info_panic_list) def pretty_print(panicinfo): if panicinfo["best_guess_source"] == "call/decl": try: print( "\t{} -- {}:{}".format( panicinfo["addr"], panicinfo["call_file"], panicinfo["call_line"] ) ) except: print( "\t{} -- in function starting at {}:{}".format( panicinfo["addr"], panicinfo["decl_file"], panicinfo["decl_line"] ) ) elif panicinfo["best_guess_source"] == "parent": print( "\t{} -- at or in function starting at {}:{}".format( panicinfo["addr"], panicinfo["parent_call_file"], panicinfo["parent_call_line"], ) ) elif panicinfo["best_guess_source"] == "lineinfo": print( "\t{} -- in closure, try: {}".format( panicinfo["addr"], panicinfo["line_info"] ) ) elif panicinfo["best_guess_source"] == "abstract_origin + line": print( "\t{} -- line_info: {} from origin :{}".format( panicinfo["addr"], panicinfo["line_info"], panicinfo["abstract_origin"] ) ) elif panicinfo["best_guess_source"] == "call-closure-line-info": print( "\t{} -- in closure starting on line_info: {}".format( panicinfo["addr"], panicinfo["line_info"] ) ) else: raise RuntimeError("Missing best guess source: {}".format(panicinfo)) def main(): args = parse_args() if sys.version_info.minor < 7: print("This tool requires Python 3.7+") return -1 print("Tock panic report for " + args.ELF) objdump = ARM_OBJDUMP if args.riscv: objdump = RISCV_OBJDUMP (panic_list, within_core_panic_list, no_info_panic_list) = find_all_panics( objdump, args.ELF, args.riscv ) print("num_panics: {}".format(len(panic_list))) buckets_list = {} for f in panic_functions: buckets_list[f] = [] for panic in panic_list: buckets_list[panic["function"]].append(panic) for f, l in buckets_list.items(): if len(l) > 0: print("{}: {}".format(f, len(l))) for p in l: pretty_print(p) if args.verbose: print(p) print() print("num panics in core ignored: {}".format(len(within_core_panic_list))) print("num panics for which no info available: {}".format(len(no_info_panic_list))) if args.verbose: print( "If more debug info is needed, run dwarfdump directly on the address in question." ) if __name__ == "__main__": main()
"""Returns a list of the abstract origin or linkage of all parents of the dwarf location for the passed address """ result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True ) dwarfdump = result.stdout regex = abstract_origin_re if linkage: regex = dw_at_linkage_name_re matches = re.findall(regex, dwarfdump) def getFunction(line): return line.strip().split('"')[1] origins = list(map(getFunction, matches)) return origins
identifier_body
find_panics.py
#!/usr/bin/env python3 # Licensed under the Apache License, Version 2.0 or the MIT License. # SPDX-License-Identifier: Apache-2.0 OR MIT # Copyright Tock Contributors 2023. # Prints out the source locations of panics in a Tock kernel ELF # # This tool attempts to trace all panic locations in a Tock kernel ELF by # tracing calls to panic functions in the core library, using the debug information # embedded in the ELF file. This tool requires an ELF which includes debug information. # In its current state, cannot accurately provide the source locations # corresponding to each panic, but tries to be honest about its confidence in # each guess. In general, each guess is usually enough to locate the relevant panic. # More creative analysis might be able to increase # the accuracy with which this tool can identify source locations of panics. For now, # this tool is useful for: # # - obtaining a rough count of the number of panics in a Tock kernel binary # # - finding and removing panics in a Tock kernel binary # # - roughly determining which components of a Tock kernel binary contain the most panic # paths # # There are several assumptions built into this tool which may not always hold. For one, # the list of panic_functions are assumed to not match any strings in the actual # codebase, despite the fact they are incomplete function names and overlap is possible. # I could solve this by using full names of these functions, but I am unsure how often # the name mangling of these functions will change as the rust compiler changes so this # approach felt potentially more stable. # # Several assumptions are made about DWARF locations that do not always hold, so source # locations are not always accurate -- sometimes, the printed location just points to # the function containing a panic, rather than the actual line on which the panic # occurs. Some assumptions about which panics are in the core library and will be # caught by grepping for other calls may also not always hold. The best way to inspect # these is by manually inspecting the panics in the `within_core_panic_list`. # # This script stores panics which it cannot trace out of the core library in the # `no_info_panic_list`. If this list contains some panics, that is a sign that some # panics have not been identified. You can manually look at the addresses stored in # this list, attempt to find the core library function which leads to these instrucitons # being called, and then add those core library functions to the list of panic functions. # # The output of this script is *not* stable. # # Usage: find_panics.py ELF [--riscv] # # Requires Python 3.7+ # # Author: Hudson Ayers <[email protected]> import argparse import platform import re import subprocess import sys if platform.system() == 'Darwin': DWARFDUMP = "dwarfdump" elif platform.system() == 'Linux': DWARFDUMP = "llvm-dwarfdump" else: raise NotImplementedError("Unknown platform") # Note: In practice, GCC objdumps are better at symbol resolution than LLVM objdump ARM_OBJDUMP = "arm-none-eabi-objdump" RISCV_OBJDUMP = "riscv64-unknown-elf-objdump" # TODO: For all functions below the initial batch, it would like be preferable to # automatically populate the list with additional functions in the core library using # debug info. For now, however, I do this manually. panic_functions = [ "expect_failed", "unwrap_failed", "panic_bounds_check", "slice_index_order_fail", "slice_end_index_len_fail", "slice_start_index_len_fail", "slice17len_mismatch_fail", "str16slice_error_fail", "copy_from_slice17len_mismatch_fail", "copy_from_slice17", "panicking5panic", # below are functions I have manually traced up from the above, more "core" panics, on a riscv binary with a low inline threshold "6unwrap17", "6expect17", "11copy_within17", "core..fmt..builders..PadAdapter", # calls slice_error_fail "11copy_within17", # calls panicking::panic "write_char", # calls PadAdapter one above "write_str", # calls write_char "printable5check", # calls slice_index_order_fail "char$u20$as$u20$core..fmt..Debug", # calls printable5check "GenericRadix7fmt_int", # calls slice_start_index_len_fail # below are functions I manually traced on an arm binary, # with a somewhat higher inline threshold. "10unwrap_err17h6", "13is_whitespace17", "$u20$core..slice..index..SliceIndex$LT", "core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter", "_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE", "_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE", ] # Pre-compiled regex lookups dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""") dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""") line_info_re = re.compile(r""".*Line info.*""") abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""") dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""") dw_at_name_re = re.compile(r""".*DW_AT_name.*""") def matches_panic_funcs(name): """If the passed name contains one of the known panic_functions, return the match """ for func in panic_functions: if func in name: return func return "" def linkage_or_origin_all_parents(elf, addr, linkage=False): """Returns a list of the abstract origin or linkage of all parents of the dwarf location for the passed address """ result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True ) dwarfdump = result.stdout regex = abstract_origin_re if linkage: regex = dw_at_linkage_name_re matches = re.findall(regex, dwarfdump) def getFunction(line): return line.strip().split('"')[1] origins = list(map(getFunction, matches)) return origins def any_origin_matches_panic_func(elf, addr): """returns name if any origin for the passed addr matches one of the functions in the panic_functions array """ origins = linkage_or_origin_all_parents(elf, addr) for origin in origins: name = matches_panic_funcs(origin) if name: return name return "" def any_linkage_matches_panic_func(elf, addr): """returns True + name if any linkage for the passed addr matches one of the functions in the panic_functions array """ linkages = linkage_or_origin_all_parents(elf, addr, True) for linkage in linkages: name = matches_panic_funcs(linkage) if name: return name return "" def check_for_source_in_parent(elf, addr): """Takes in a dwarfdump lookup including parents of the source DWARF location, returns the first parent with a call file not in the core library. If found, this often indicates the source of the panic in the Tock source code. """ result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True ) dwarfdump = result.stdout matches = re.findall(dw_at_file_re, dwarfdump) def getFile(line): return line.strip().split('"')[1] source_files = list(map(getFile, matches)) for (i, f) in enumerate(source_files[::-1]): if "/core/" not in f: line_matches = re.findall(dw_at_line_re, dwarfdump) def getLine(line): return line.strip().split("(")[1].split(")")[0] source_lines = list(map(getLine, line_matches)) source_line = source_lines[::-1][i] return (f, source_line) return ("", "") def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("ELF", help="ELF file for analysis") parser.add_argument( "--verbose", "-v", action="store_true", help="Output additional DWARF info for each panic location in the binary", ) parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump") return parser.parse_args() # Find all addresses that panic, and get basic dwarf info on those addresses def find_all_panics(objdump, elf, is_riscv): panic_list = [] within_core_panic_list = [] no_info_panic_list = [] result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True) objdump_out = result.stdout for function in panic_functions: function_re = re.compile(".*:.*#.*" + function + ".*") if not is_riscv: # Arm-none-eabi-objdump uses ';' for comments instead of '#' function_re = re.compile(".*:.*<.*" + function + ".*") # TODO: arm elfs include loads of offsets from symbols in such a way that these lines # are matched by this regex. In general, these loads occur within the instruction stream # associated with the symbol at hand, and will usually be excluded by logic later in # this function. This leads to `within_core_panic_list` and `no_info_panic_list` # containing more "panics" than when analyzing a risc-v binary. We could fix this # by matching *only* on functions with instructions that actually jump to a new symbol, # but this would require a list of such instructions for each architecture. However # as written it actually lets us identify panics which are jumped to via addresses # stored in registers, which may actually catch additional valid panics. matches = re.findall(function_re, objdump_out) def getAddr(line): return line.strip().split(":")[0] addrs = list(map(getAddr, matches)) for addr in addrs: result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True ) dwarfdump = result.stdout dw_at_file = re.search(dw_at_file_re, dwarfdump) dw_at_line = re.search(dw_at_line_re, dwarfdump) line_info = re.search(line_info_re, dwarfdump) abstract_origin = re.search(abstract_origin_re, dwarfdump) linkage_name = re.search(dw_at_linkage_name_re, dwarfdump) file_string = "" line_string = "" line_info_string = "" abstract_origin_string = "" linkage_name_string = "" if dw_at_file: file_string = dw_at_file.group(0).strip() line_string = dw_at_line.group(0).strip() panicinfo = {} panicinfo["addr"] = addr panicinfo["function"] = function if line_info: line_info_string = line_info.group(0).strip() panicinfo["line_info"] = line_info_string if abstract_origin: abstract_origin_string = abstract_origin.group(0).strip() if linkage_name: linkage_name_string = linkage_name.group(0).strip() if "DW_AT_call_file" in file_string and "DW_AT_decl_file" in file_string: raise RuntimeError("I misunderstand DWARF") if "DW_AT_call_file" in file_string or "DW_AT_decl_file" in file_string: filename = file_string.split('"')[1] line_num = line_string.split("(")[1].split(")")[0] if "DW_AT_call_file" in file_string: panicinfo["call_file"] = filename panicinfo["call_line"] = line_num if "DW_AT_decl_file" in file_string: panicinfo["decl_file"] = filename panicinfo["decl_line"] = line_num if not "/core/" in filename: if not "closure" in abstract_origin_string: panicinfo["best_guess_source"] = "call/decl" else: panicinfo["best_guess_source"] = "call-closure-line-info" panic_list.append(panicinfo) continue else: # 'core' in filename (parent_file, parent_line) = check_for_source_in_parent(elf, addr) if parent_file: panicinfo["parent_call_file"] = parent_file panicinfo["parent_call_line"] = parent_line panicinfo["best_guess_source"] = "parent" panic_list.append(panicinfo) continue elif not abstract_origin and not linkage_name: no_info_panic_list.append(panicinfo) continue elif abstract_origin: if "core" in abstract_origin_string: name = matches_panic_funcs(abstract_origin_string) if name: within_core_panic_list.append(panicinfo) continue else: name2 = any_origin_matches_panic_func(elf, addr) name3 = any_linkage_matches_panic_func(elf, addr) if name2: within_core_panic_list.append(panicinfo) continue elif name3: within_core_panic_list.append(panicinfo) continue else: no_info_panic_list.append(panicinfo) continue elif "closure" in abstract_origin_string: # not in core, in closure, line info is probably sufficient panicinfo["best_guess_source"] = "lineinfo" panic_list.append(panicinfo) continue else: # i have not seen this happen -- core in file, not closure, origin not core
if linkage_name: name = matches_panic_funcs(linkage_name_string) if name: within_core_panic_list.append(panicinfo) continue else: no_info_panic_list.append(panicinfo) print( "Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}".format( linkage_name_string, addr ) ) continue no_info_panic_list.append(panic_info) print("did not find source for panic: {}".format(addr)) continue elif abstract_origin: origin = abstract_origin_string.split('"')[1] panicinfo["abstract_origin"] = origin if "core" in origin: if matches_panic_funcs(origin): within_core_panic_list.append(panicinfo) continue no_info_panic_list.append(panicinfo) print( "Probably could add this origin or one of its parents to the panic function list: {}".format( abstract_origin_string ) ) continue else: panicinfo["best_guess_source"] = "abstract_origin + line" panic_list.append(panicinfo) continue else: # This gets hit for OUTLINED_FUNCTION_XX a bunch on ARM try: dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[ -1 ].strip() # see multiple matches for this string sometimes function_name = dw_at_name_string.split('"')[1] if "OUTLINED_FUNCTION_" in function_name: # This is a common pattern where panicing paths are repeated in many # places throughout the binary, and LLVMs optimizer outlines the repeated code. # Let's add these to the list of panicing functions, dynamically so this is resilient to # changes in the binary. if function_name not in panic_functions: # don't double insert panic_functions.append( function_name + ">" ) # so FUNCTION_22 does not catch FUNCTION_222 within_core_panic_list.append(panicinfo) continue no_info_panic_list.append(panicinfo) continue except: # There seem to be a places where lookup fails completely # Not easy to recover, log these and continue on. no_info_panic_list.append(panicinfo) continue raise RuntimeError("BUG: Should not reach here") return (panic_list, within_core_panic_list, no_info_panic_list) def pretty_print(panicinfo): if panicinfo["best_guess_source"] == "call/decl": try: print( "\t{} -- {}:{}".format( panicinfo["addr"], panicinfo["call_file"], panicinfo["call_line"] ) ) except: print( "\t{} -- in function starting at {}:{}".format( panicinfo["addr"], panicinfo["decl_file"], panicinfo["decl_line"] ) ) elif panicinfo["best_guess_source"] == "parent": print( "\t{} -- at or in function starting at {}:{}".format( panicinfo["addr"], panicinfo["parent_call_file"], panicinfo["parent_call_line"], ) ) elif panicinfo["best_guess_source"] == "lineinfo": print( "\t{} -- in closure, try: {}".format( panicinfo["addr"], panicinfo["line_info"] ) ) elif panicinfo["best_guess_source"] == "abstract_origin + line": print( "\t{} -- line_info: {} from origin :{}".format( panicinfo["addr"], panicinfo["line_info"], panicinfo["abstract_origin"] ) ) elif panicinfo["best_guess_source"] == "call-closure-line-info": print( "\t{} -- in closure starting on line_info: {}".format( panicinfo["addr"], panicinfo["line_info"] ) ) else: raise RuntimeError("Missing best guess source: {}".format(panicinfo)) def main(): args = parse_args() if sys.version_info.minor < 7: print("This tool requires Python 3.7+") return -1 print("Tock panic report for " + args.ELF) objdump = ARM_OBJDUMP if args.riscv: objdump = RISCV_OBJDUMP (panic_list, within_core_panic_list, no_info_panic_list) = find_all_panics( objdump, args.ELF, args.riscv ) print("num_panics: {}".format(len(panic_list))) buckets_list = {} for f in panic_functions: buckets_list[f] = [] for panic in panic_list: buckets_list[panic["function"]].append(panic) for f, l in buckets_list.items(): if len(l) > 0: print("{}: {}".format(f, len(l))) for p in l: pretty_print(p) if args.verbose: print(p) print() print("num panics in core ignored: {}".format(len(within_core_panic_list))) print("num panics for which no info available: {}".format(len(no_info_panic_list))) if args.verbose: print( "If more debug info is needed, run dwarfdump directly on the address in question." ) if __name__ == "__main__": main()
raise RuntimeError("Unhandled")
conditional_block
find_panics.py
#!/usr/bin/env python3 # Licensed under the Apache License, Version 2.0 or the MIT License. # SPDX-License-Identifier: Apache-2.0 OR MIT # Copyright Tock Contributors 2023. # Prints out the source locations of panics in a Tock kernel ELF # # This tool attempts to trace all panic locations in a Tock kernel ELF by # tracing calls to panic functions in the core library, using the debug information # embedded in the ELF file. This tool requires an ELF which includes debug information. # In its current state, cannot accurately provide the source locations # corresponding to each panic, but tries to be honest about its confidence in # each guess. In general, each guess is usually enough to locate the relevant panic. # More creative analysis might be able to increase # the accuracy with which this tool can identify source locations of panics. For now, # this tool is useful for: # # - obtaining a rough count of the number of panics in a Tock kernel binary # # - finding and removing panics in a Tock kernel binary # # - roughly determining which components of a Tock kernel binary contain the most panic # paths # # There are several assumptions built into this tool which may not always hold. For one, # the list of panic_functions are assumed to not match any strings in the actual # codebase, despite the fact they are incomplete function names and overlap is possible. # I could solve this by using full names of these functions, but I am unsure how often # the name mangling of these functions will change as the rust compiler changes so this # approach felt potentially more stable. # # Several assumptions are made about DWARF locations that do not always hold, so source # locations are not always accurate -- sometimes, the printed location just points to # the function containing a panic, rather than the actual line on which the panic # occurs. Some assumptions about which panics are in the core library and will be # caught by grepping for other calls may also not always hold. The best way to inspect # these is by manually inspecting the panics in the `within_core_panic_list`. # # This script stores panics which it cannot trace out of the core library in the # `no_info_panic_list`. If this list contains some panics, that is a sign that some # panics have not been identified. You can manually look at the addresses stored in # this list, attempt to find the core library function which leads to these instrucitons # being called, and then add those core library functions to the list of panic functions. # # The output of this script is *not* stable. # # Usage: find_panics.py ELF [--riscv] # # Requires Python 3.7+ # # Author: Hudson Ayers <[email protected]> import argparse import platform import re import subprocess import sys if platform.system() == 'Darwin': DWARFDUMP = "dwarfdump" elif platform.system() == 'Linux': DWARFDUMP = "llvm-dwarfdump" else: raise NotImplementedError("Unknown platform") # Note: In practice, GCC objdumps are better at symbol resolution than LLVM objdump ARM_OBJDUMP = "arm-none-eabi-objdump" RISCV_OBJDUMP = "riscv64-unknown-elf-objdump" # TODO: For all functions below the initial batch, it would like be preferable to # automatically populate the list with additional functions in the core library using # debug info. For now, however, I do this manually. panic_functions = [ "expect_failed", "unwrap_failed", "panic_bounds_check", "slice_index_order_fail", "slice_end_index_len_fail", "slice_start_index_len_fail", "slice17len_mismatch_fail", "str16slice_error_fail", "copy_from_slice17len_mismatch_fail", "copy_from_slice17", "panicking5panic", # below are functions I have manually traced up from the above, more "core" panics, on a riscv binary with a low inline threshold "6unwrap17", "6expect17", "11copy_within17", "core..fmt..builders..PadAdapter", # calls slice_error_fail "11copy_within17", # calls panicking::panic "write_char", # calls PadAdapter one above "write_str", # calls write_char "printable5check", # calls slice_index_order_fail "char$u20$as$u20$core..fmt..Debug", # calls printable5check "GenericRadix7fmt_int", # calls slice_start_index_len_fail # below are functions I manually traced on an arm binary, # with a somewhat higher inline threshold. "10unwrap_err17h6", "13is_whitespace17", "$u20$core..slice..index..SliceIndex$LT", "core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter", "_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE", "_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE", ] # Pre-compiled regex lookups dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""") dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""") line_info_re = re.compile(r""".*Line info.*""") abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""") dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""") dw_at_name_re = re.compile(r""".*DW_AT_name.*""") def matches_panic_funcs(name): """If the passed name contains one of the known panic_functions, return the match """ for func in panic_functions: if func in name: return func return "" def
(elf, addr, linkage=False): """Returns a list of the abstract origin or linkage of all parents of the dwarf location for the passed address """ result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True ) dwarfdump = result.stdout regex = abstract_origin_re if linkage: regex = dw_at_linkage_name_re matches = re.findall(regex, dwarfdump) def getFunction(line): return line.strip().split('"')[1] origins = list(map(getFunction, matches)) return origins def any_origin_matches_panic_func(elf, addr): """returns name if any origin for the passed addr matches one of the functions in the panic_functions array """ origins = linkage_or_origin_all_parents(elf, addr) for origin in origins: name = matches_panic_funcs(origin) if name: return name return "" def any_linkage_matches_panic_func(elf, addr): """returns True + name if any linkage for the passed addr matches one of the functions in the panic_functions array """ linkages = linkage_or_origin_all_parents(elf, addr, True) for linkage in linkages: name = matches_panic_funcs(linkage) if name: return name return "" def check_for_source_in_parent(elf, addr): """Takes in a dwarfdump lookup including parents of the source DWARF location, returns the first parent with a call file not in the core library. If found, this often indicates the source of the panic in the Tock source code. """ result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True ) dwarfdump = result.stdout matches = re.findall(dw_at_file_re, dwarfdump) def getFile(line): return line.strip().split('"')[1] source_files = list(map(getFile, matches)) for (i, f) in enumerate(source_files[::-1]): if "/core/" not in f: line_matches = re.findall(dw_at_line_re, dwarfdump) def getLine(line): return line.strip().split("(")[1].split(")")[0] source_lines = list(map(getLine, line_matches)) source_line = source_lines[::-1][i] return (f, source_line) return ("", "") def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("ELF", help="ELF file for analysis") parser.add_argument( "--verbose", "-v", action="store_true", help="Output additional DWARF info for each panic location in the binary", ) parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump") return parser.parse_args() # Find all addresses that panic, and get basic dwarf info on those addresses def find_all_panics(objdump, elf, is_riscv): panic_list = [] within_core_panic_list = [] no_info_panic_list = [] result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True) objdump_out = result.stdout for function in panic_functions: function_re = re.compile(".*:.*#.*" + function + ".*") if not is_riscv: # Arm-none-eabi-objdump uses ';' for comments instead of '#' function_re = re.compile(".*:.*<.*" + function + ".*") # TODO: arm elfs include loads of offsets from symbols in such a way that these lines # are matched by this regex. In general, these loads occur within the instruction stream # associated with the symbol at hand, and will usually be excluded by logic later in # this function. This leads to `within_core_panic_list` and `no_info_panic_list` # containing more "panics" than when analyzing a risc-v binary. We could fix this # by matching *only* on functions with instructions that actually jump to a new symbol, # but this would require a list of such instructions for each architecture. However # as written it actually lets us identify panics which are jumped to via addresses # stored in registers, which may actually catch additional valid panics. matches = re.findall(function_re, objdump_out) def getAddr(line): return line.strip().split(":")[0] addrs = list(map(getAddr, matches)) for addr in addrs: result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True ) dwarfdump = result.stdout dw_at_file = re.search(dw_at_file_re, dwarfdump) dw_at_line = re.search(dw_at_line_re, dwarfdump) line_info = re.search(line_info_re, dwarfdump) abstract_origin = re.search(abstract_origin_re, dwarfdump) linkage_name = re.search(dw_at_linkage_name_re, dwarfdump) file_string = "" line_string = "" line_info_string = "" abstract_origin_string = "" linkage_name_string = "" if dw_at_file: file_string = dw_at_file.group(0).strip() line_string = dw_at_line.group(0).strip() panicinfo = {} panicinfo["addr"] = addr panicinfo["function"] = function if line_info: line_info_string = line_info.group(0).strip() panicinfo["line_info"] = line_info_string if abstract_origin: abstract_origin_string = abstract_origin.group(0).strip() if linkage_name: linkage_name_string = linkage_name.group(0).strip() if "DW_AT_call_file" in file_string and "DW_AT_decl_file" in file_string: raise RuntimeError("I misunderstand DWARF") if "DW_AT_call_file" in file_string or "DW_AT_decl_file" in file_string: filename = file_string.split('"')[1] line_num = line_string.split("(")[1].split(")")[0] if "DW_AT_call_file" in file_string: panicinfo["call_file"] = filename panicinfo["call_line"] = line_num if "DW_AT_decl_file" in file_string: panicinfo["decl_file"] = filename panicinfo["decl_line"] = line_num if not "/core/" in filename: if not "closure" in abstract_origin_string: panicinfo["best_guess_source"] = "call/decl" else: panicinfo["best_guess_source"] = "call-closure-line-info" panic_list.append(panicinfo) continue else: # 'core' in filename (parent_file, parent_line) = check_for_source_in_parent(elf, addr) if parent_file: panicinfo["parent_call_file"] = parent_file panicinfo["parent_call_line"] = parent_line panicinfo["best_guess_source"] = "parent" panic_list.append(panicinfo) continue elif not abstract_origin and not linkage_name: no_info_panic_list.append(panicinfo) continue elif abstract_origin: if "core" in abstract_origin_string: name = matches_panic_funcs(abstract_origin_string) if name: within_core_panic_list.append(panicinfo) continue else: name2 = any_origin_matches_panic_func(elf, addr) name3 = any_linkage_matches_panic_func(elf, addr) if name2: within_core_panic_list.append(panicinfo) continue elif name3: within_core_panic_list.append(panicinfo) continue else: no_info_panic_list.append(panicinfo) continue elif "closure" in abstract_origin_string: # not in core, in closure, line info is probably sufficient panicinfo["best_guess_source"] = "lineinfo" panic_list.append(panicinfo) continue else: # i have not seen this happen -- core in file, not closure, origin not core raise RuntimeError("Unhandled") if linkage_name: name = matches_panic_funcs(linkage_name_string) if name: within_core_panic_list.append(panicinfo) continue else: no_info_panic_list.append(panicinfo) print( "Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}".format( linkage_name_string, addr ) ) continue no_info_panic_list.append(panic_info) print("did not find source for panic: {}".format(addr)) continue elif abstract_origin: origin = abstract_origin_string.split('"')[1] panicinfo["abstract_origin"] = origin if "core" in origin: if matches_panic_funcs(origin): within_core_panic_list.append(panicinfo) continue no_info_panic_list.append(panicinfo) print( "Probably could add this origin or one of its parents to the panic function list: {}".format( abstract_origin_string ) ) continue else: panicinfo["best_guess_source"] = "abstract_origin + line" panic_list.append(panicinfo) continue else: # This gets hit for OUTLINED_FUNCTION_XX a bunch on ARM try: dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[ -1 ].strip() # see multiple matches for this string sometimes function_name = dw_at_name_string.split('"')[1] if "OUTLINED_FUNCTION_" in function_name: # This is a common pattern where panicing paths are repeated in many # places throughout the binary, and LLVMs optimizer outlines the repeated code. # Let's add these to the list of panicing functions, dynamically so this is resilient to # changes in the binary. if function_name not in panic_functions: # don't double insert panic_functions.append( function_name + ">" ) # so FUNCTION_22 does not catch FUNCTION_222 within_core_panic_list.append(panicinfo) continue no_info_panic_list.append(panicinfo) continue except: # There seem to be a places where lookup fails completely # Not easy to recover, log these and continue on. no_info_panic_list.append(panicinfo) continue raise RuntimeError("BUG: Should not reach here") return (panic_list, within_core_panic_list, no_info_panic_list) def pretty_print(panicinfo): if panicinfo["best_guess_source"] == "call/decl": try: print( "\t{} -- {}:{}".format( panicinfo["addr"], panicinfo["call_file"], panicinfo["call_line"] ) ) except: print( "\t{} -- in function starting at {}:{}".format( panicinfo["addr"], panicinfo["decl_file"], panicinfo["decl_line"] ) ) elif panicinfo["best_guess_source"] == "parent": print( "\t{} -- at or in function starting at {}:{}".format( panicinfo["addr"], panicinfo["parent_call_file"], panicinfo["parent_call_line"], ) ) elif panicinfo["best_guess_source"] == "lineinfo": print( "\t{} -- in closure, try: {}".format( panicinfo["addr"], panicinfo["line_info"] ) ) elif panicinfo["best_guess_source"] == "abstract_origin + line": print( "\t{} -- line_info: {} from origin :{}".format( panicinfo["addr"], panicinfo["line_info"], panicinfo["abstract_origin"] ) ) elif panicinfo["best_guess_source"] == "call-closure-line-info": print( "\t{} -- in closure starting on line_info: {}".format( panicinfo["addr"], panicinfo["line_info"] ) ) else: raise RuntimeError("Missing best guess source: {}".format(panicinfo)) def main(): args = parse_args() if sys.version_info.minor < 7: print("This tool requires Python 3.7+") return -1 print("Tock panic report for " + args.ELF) objdump = ARM_OBJDUMP if args.riscv: objdump = RISCV_OBJDUMP (panic_list, within_core_panic_list, no_info_panic_list) = find_all_panics( objdump, args.ELF, args.riscv ) print("num_panics: {}".format(len(panic_list))) buckets_list = {} for f in panic_functions: buckets_list[f] = [] for panic in panic_list: buckets_list[panic["function"]].append(panic) for f, l in buckets_list.items(): if len(l) > 0: print("{}: {}".format(f, len(l))) for p in l: pretty_print(p) if args.verbose: print(p) print() print("num panics in core ignored: {}".format(len(within_core_panic_list))) print("num panics for which no info available: {}".format(len(no_info_panic_list))) if args.verbose: print( "If more debug info is needed, run dwarfdump directly on the address in question." ) if __name__ == "__main__": main()
linkage_or_origin_all_parents
identifier_name
find_panics.py
#!/usr/bin/env python3 # Licensed under the Apache License, Version 2.0 or the MIT License. # SPDX-License-Identifier: Apache-2.0 OR MIT # Copyright Tock Contributors 2023. # Prints out the source locations of panics in a Tock kernel ELF # # This tool attempts to trace all panic locations in a Tock kernel ELF by # tracing calls to panic functions in the core library, using the debug information # embedded in the ELF file. This tool requires an ELF which includes debug information. # In its current state, cannot accurately provide the source locations # corresponding to each panic, but tries to be honest about its confidence in # each guess. In general, each guess is usually enough to locate the relevant panic. # More creative analysis might be able to increase # the accuracy with which this tool can identify source locations of panics. For now, # this tool is useful for: # # - obtaining a rough count of the number of panics in a Tock kernel binary # # - finding and removing panics in a Tock kernel binary # # - roughly determining which components of a Tock kernel binary contain the most panic # paths # # There are several assumptions built into this tool which may not always hold. For one, # the list of panic_functions are assumed to not match any strings in the actual # codebase, despite the fact they are incomplete function names and overlap is possible. # I could solve this by using full names of these functions, but I am unsure how often # the name mangling of these functions will change as the rust compiler changes so this # approach felt potentially more stable. # # Several assumptions are made about DWARF locations that do not always hold, so source # locations are not always accurate -- sometimes, the printed location just points to # the function containing a panic, rather than the actual line on which the panic # occurs. Some assumptions about which panics are in the core library and will be # caught by grepping for other calls may also not always hold. The best way to inspect # these is by manually inspecting the panics in the `within_core_panic_list`. # # This script stores panics which it cannot trace out of the core library in the # `no_info_panic_list`. If this list contains some panics, that is a sign that some # panics have not been identified. You can manually look at the addresses stored in # this list, attempt to find the core library function which leads to these instrucitons # being called, and then add those core library functions to the list of panic functions. # # The output of this script is *not* stable. # # Usage: find_panics.py ELF [--riscv] # # Requires Python 3.7+ # # Author: Hudson Ayers <[email protected]>
import re import subprocess import sys if platform.system() == 'Darwin': DWARFDUMP = "dwarfdump" elif platform.system() == 'Linux': DWARFDUMP = "llvm-dwarfdump" else: raise NotImplementedError("Unknown platform") # Note: In practice, GCC objdumps are better at symbol resolution than LLVM objdump ARM_OBJDUMP = "arm-none-eabi-objdump" RISCV_OBJDUMP = "riscv64-unknown-elf-objdump" # TODO: For all functions below the initial batch, it would like be preferable to # automatically populate the list with additional functions in the core library using # debug info. For now, however, I do this manually. panic_functions = [ "expect_failed", "unwrap_failed", "panic_bounds_check", "slice_index_order_fail", "slice_end_index_len_fail", "slice_start_index_len_fail", "slice17len_mismatch_fail", "str16slice_error_fail", "copy_from_slice17len_mismatch_fail", "copy_from_slice17", "panicking5panic", # below are functions I have manually traced up from the above, more "core" panics, on a riscv binary with a low inline threshold "6unwrap17", "6expect17", "11copy_within17", "core..fmt..builders..PadAdapter", # calls slice_error_fail "11copy_within17", # calls panicking::panic "write_char", # calls PadAdapter one above "write_str", # calls write_char "printable5check", # calls slice_index_order_fail "char$u20$as$u20$core..fmt..Debug", # calls printable5check "GenericRadix7fmt_int", # calls slice_start_index_len_fail # below are functions I manually traced on an arm binary, # with a somewhat higher inline threshold. "10unwrap_err17h6", "13is_whitespace17", "$u20$core..slice..index..SliceIndex$LT", "core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter", "_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE", "_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE", ] # Pre-compiled regex lookups dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""") dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""") line_info_re = re.compile(r""".*Line info.*""") abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""") dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""") dw_at_name_re = re.compile(r""".*DW_AT_name.*""") def matches_panic_funcs(name): """If the passed name contains one of the known panic_functions, return the match """ for func in panic_functions: if func in name: return func return "" def linkage_or_origin_all_parents(elf, addr, linkage=False): """Returns a list of the abstract origin or linkage of all parents of the dwarf location for the passed address """ result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True ) dwarfdump = result.stdout regex = abstract_origin_re if linkage: regex = dw_at_linkage_name_re matches = re.findall(regex, dwarfdump) def getFunction(line): return line.strip().split('"')[1] origins = list(map(getFunction, matches)) return origins def any_origin_matches_panic_func(elf, addr): """returns name if any origin for the passed addr matches one of the functions in the panic_functions array """ origins = linkage_or_origin_all_parents(elf, addr) for origin in origins: name = matches_panic_funcs(origin) if name: return name return "" def any_linkage_matches_panic_func(elf, addr): """returns True + name if any linkage for the passed addr matches one of the functions in the panic_functions array """ linkages = linkage_or_origin_all_parents(elf, addr, True) for linkage in linkages: name = matches_panic_funcs(linkage) if name: return name return "" def check_for_source_in_parent(elf, addr): """Takes in a dwarfdump lookup including parents of the source DWARF location, returns the first parent with a call file not in the core library. If found, this often indicates the source of the panic in the Tock source code. """ result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True ) dwarfdump = result.stdout matches = re.findall(dw_at_file_re, dwarfdump) def getFile(line): return line.strip().split('"')[1] source_files = list(map(getFile, matches)) for (i, f) in enumerate(source_files[::-1]): if "/core/" not in f: line_matches = re.findall(dw_at_line_re, dwarfdump) def getLine(line): return line.strip().split("(")[1].split(")")[0] source_lines = list(map(getLine, line_matches)) source_line = source_lines[::-1][i] return (f, source_line) return ("", "") def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("ELF", help="ELF file for analysis") parser.add_argument( "--verbose", "-v", action="store_true", help="Output additional DWARF info for each panic location in the binary", ) parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump") return parser.parse_args() # Find all addresses that panic, and get basic dwarf info on those addresses def find_all_panics(objdump, elf, is_riscv): panic_list = [] within_core_panic_list = [] no_info_panic_list = [] result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True) objdump_out = result.stdout for function in panic_functions: function_re = re.compile(".*:.*#.*" + function + ".*") if not is_riscv: # Arm-none-eabi-objdump uses ';' for comments instead of '#' function_re = re.compile(".*:.*<.*" + function + ".*") # TODO: arm elfs include loads of offsets from symbols in such a way that these lines # are matched by this regex. In general, these loads occur within the instruction stream # associated with the symbol at hand, and will usually be excluded by logic later in # this function. This leads to `within_core_panic_list` and `no_info_panic_list` # containing more "panics" than when analyzing a risc-v binary. We could fix this # by matching *only* on functions with instructions that actually jump to a new symbol, # but this would require a list of such instructions for each architecture. However # as written it actually lets us identify panics which are jumped to via addresses # stored in registers, which may actually catch additional valid panics. matches = re.findall(function_re, objdump_out) def getAddr(line): return line.strip().split(":")[0] addrs = list(map(getAddr, matches)) for addr in addrs: result = subprocess.run( (DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True ) dwarfdump = result.stdout dw_at_file = re.search(dw_at_file_re, dwarfdump) dw_at_line = re.search(dw_at_line_re, dwarfdump) line_info = re.search(line_info_re, dwarfdump) abstract_origin = re.search(abstract_origin_re, dwarfdump) linkage_name = re.search(dw_at_linkage_name_re, dwarfdump) file_string = "" line_string = "" line_info_string = "" abstract_origin_string = "" linkage_name_string = "" if dw_at_file: file_string = dw_at_file.group(0).strip() line_string = dw_at_line.group(0).strip() panicinfo = {} panicinfo["addr"] = addr panicinfo["function"] = function if line_info: line_info_string = line_info.group(0).strip() panicinfo["line_info"] = line_info_string if abstract_origin: abstract_origin_string = abstract_origin.group(0).strip() if linkage_name: linkage_name_string = linkage_name.group(0).strip() if "DW_AT_call_file" in file_string and "DW_AT_decl_file" in file_string: raise RuntimeError("I misunderstand DWARF") if "DW_AT_call_file" in file_string or "DW_AT_decl_file" in file_string: filename = file_string.split('"')[1] line_num = line_string.split("(")[1].split(")")[0] if "DW_AT_call_file" in file_string: panicinfo["call_file"] = filename panicinfo["call_line"] = line_num if "DW_AT_decl_file" in file_string: panicinfo["decl_file"] = filename panicinfo["decl_line"] = line_num if not "/core/" in filename: if not "closure" in abstract_origin_string: panicinfo["best_guess_source"] = "call/decl" else: panicinfo["best_guess_source"] = "call-closure-line-info" panic_list.append(panicinfo) continue else: # 'core' in filename (parent_file, parent_line) = check_for_source_in_parent(elf, addr) if parent_file: panicinfo["parent_call_file"] = parent_file panicinfo["parent_call_line"] = parent_line panicinfo["best_guess_source"] = "parent" panic_list.append(panicinfo) continue elif not abstract_origin and not linkage_name: no_info_panic_list.append(panicinfo) continue elif abstract_origin: if "core" in abstract_origin_string: name = matches_panic_funcs(abstract_origin_string) if name: within_core_panic_list.append(panicinfo) continue else: name2 = any_origin_matches_panic_func(elf, addr) name3 = any_linkage_matches_panic_func(elf, addr) if name2: within_core_panic_list.append(panicinfo) continue elif name3: within_core_panic_list.append(panicinfo) continue else: no_info_panic_list.append(panicinfo) continue elif "closure" in abstract_origin_string: # not in core, in closure, line info is probably sufficient panicinfo["best_guess_source"] = "lineinfo" panic_list.append(panicinfo) continue else: # i have not seen this happen -- core in file, not closure, origin not core raise RuntimeError("Unhandled") if linkage_name: name = matches_panic_funcs(linkage_name_string) if name: within_core_panic_list.append(panicinfo) continue else: no_info_panic_list.append(panicinfo) print( "Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}".format( linkage_name_string, addr ) ) continue no_info_panic_list.append(panic_info) print("did not find source for panic: {}".format(addr)) continue elif abstract_origin: origin = abstract_origin_string.split('"')[1] panicinfo["abstract_origin"] = origin if "core" in origin: if matches_panic_funcs(origin): within_core_panic_list.append(panicinfo) continue no_info_panic_list.append(panicinfo) print( "Probably could add this origin or one of its parents to the panic function list: {}".format( abstract_origin_string ) ) continue else: panicinfo["best_guess_source"] = "abstract_origin + line" panic_list.append(panicinfo) continue else: # This gets hit for OUTLINED_FUNCTION_XX a bunch on ARM try: dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[ -1 ].strip() # see multiple matches for this string sometimes function_name = dw_at_name_string.split('"')[1] if "OUTLINED_FUNCTION_" in function_name: # This is a common pattern where panicing paths are repeated in many # places throughout the binary, and LLVMs optimizer outlines the repeated code. # Let's add these to the list of panicing functions, dynamically so this is resilient to # changes in the binary. if function_name not in panic_functions: # don't double insert panic_functions.append( function_name + ">" ) # so FUNCTION_22 does not catch FUNCTION_222 within_core_panic_list.append(panicinfo) continue no_info_panic_list.append(panicinfo) continue except: # There seem to be a places where lookup fails completely # Not easy to recover, log these and continue on. no_info_panic_list.append(panicinfo) continue raise RuntimeError("BUG: Should not reach here") return (panic_list, within_core_panic_list, no_info_panic_list) def pretty_print(panicinfo): if panicinfo["best_guess_source"] == "call/decl": try: print( "\t{} -- {}:{}".format( panicinfo["addr"], panicinfo["call_file"], panicinfo["call_line"] ) ) except: print( "\t{} -- in function starting at {}:{}".format( panicinfo["addr"], panicinfo["decl_file"], panicinfo["decl_line"] ) ) elif panicinfo["best_guess_source"] == "parent": print( "\t{} -- at or in function starting at {}:{}".format( panicinfo["addr"], panicinfo["parent_call_file"], panicinfo["parent_call_line"], ) ) elif panicinfo["best_guess_source"] == "lineinfo": print( "\t{} -- in closure, try: {}".format( panicinfo["addr"], panicinfo["line_info"] ) ) elif panicinfo["best_guess_source"] == "abstract_origin + line": print( "\t{} -- line_info: {} from origin :{}".format( panicinfo["addr"], panicinfo["line_info"], panicinfo["abstract_origin"] ) ) elif panicinfo["best_guess_source"] == "call-closure-line-info": print( "\t{} -- in closure starting on line_info: {}".format( panicinfo["addr"], panicinfo["line_info"] ) ) else: raise RuntimeError("Missing best guess source: {}".format(panicinfo)) def main(): args = parse_args() if sys.version_info.minor < 7: print("This tool requires Python 3.7+") return -1 print("Tock panic report for " + args.ELF) objdump = ARM_OBJDUMP if args.riscv: objdump = RISCV_OBJDUMP (panic_list, within_core_panic_list, no_info_panic_list) = find_all_panics( objdump, args.ELF, args.riscv ) print("num_panics: {}".format(len(panic_list))) buckets_list = {} for f in panic_functions: buckets_list[f] = [] for panic in panic_list: buckets_list[panic["function"]].append(panic) for f, l in buckets_list.items(): if len(l) > 0: print("{}: {}".format(f, len(l))) for p in l: pretty_print(p) if args.verbose: print(p) print() print("num panics in core ignored: {}".format(len(within_core_panic_list))) print("num panics for which no info available: {}".format(len(no_info_panic_list))) if args.verbose: print( "If more debug info is needed, run dwarfdump directly on the address in question." ) if __name__ == "__main__": main()
import argparse import platform
random_line_split
topics_and_partitions.go
package kgo import ( "sync" "sync/atomic" "github.com/twmb/franz-go/pkg/kerr" ) func newTopicPartitions() *topicPartitions { parts := new(topicPartitions) parts.v.Store(new(topicPartitionsData)) return parts } // Contains all information about a topic's partitions. type topicPartitions struct { v atomic.Value // *topicPartitionsData partsMu sync.Mutex partitioner TopicPartitioner } func (t *topicPartitions) load() *topicPartitionsData { return t.v.Load().(*topicPartitionsData) } var noTopicsPartitions = newTopicsPartitions() func newTopicsPartitions() *topicsPartitions { var t topicsPartitions t.v.Store(make(topicsPartitionsData)) return &t } // A helper type mapping topics to their partitions; // this is the inner value of topicPartitions.v. type topicsPartitionsData map[string]*topicPartitions func (d topicsPartitionsData) hasTopic(t string) bool { _, exists := d[t]; return exists } func (d topicsPartitionsData) loadTopic(t string) *topicPartitionsData
// A helper type mapping topics to their partitions that can be updated // atomically. type topicsPartitions struct { v atomic.Value // topicsPartitionsData (map[string]*topicPartitions) } func (t *topicsPartitions) load() topicsPartitionsData { if t == nil { return nil } return t.v.Load().(topicsPartitionsData) } func (t *topicsPartitions) storeData(d topicsPartitionsData) { t.v.Store(d) } func (t *topicsPartitions) storeTopics(topics []string) { t.v.Store(t.ensureTopics(topics)) } func (t *topicsPartitions) clone() topicsPartitionsData { current := t.load() clone := make(map[string]*topicPartitions, len(current)) for k, v := range current { clone[k] = v } return clone } // Ensures that the topics exist in the returned map, but does not store the // update. This can be used to update the data and store later, rather than // storing immediately. func (t *topicsPartitions) ensureTopics(topics []string) topicsPartitionsData { var cloned bool current := t.load() for _, topic := range topics { if _, exists := current[topic]; !exists { if !cloned { current = t.clone() cloned = true } current[topic] = newTopicPartitions() } } return current } // Updates the topic partitions data atomic value. // // If this is the first time seeing partitions, we do processing of unknown // partitions that may be buffered for producing. func (cl *Client) storePartitionsUpdate(topic string, l *topicPartitions, lv *topicPartitionsData, hadPartitions bool) { // If the topic already had partitions, then there would be no // unknown topic waiting and we do not need to notify anything. if hadPartitions { l.v.Store(lv) return } p := &cl.producer p.unknownTopicsMu.Lock() defer p.unknownTopicsMu.Unlock() // If the topic did not have partitions, then we need to store the // partition update BEFORE unlocking the mutex to guard against this // sequence of events: // // - unlock waiters // - delete waiter // - new produce recreates waiter // - we store update // - we never notify the recreated waiter // // By storing before releasing the locks, we ensure that later // partition loads for this topic under the mu will see our update. defer l.v.Store(lv) // If there are no unknown topics or this topic is not unknown, then we // have nothing to do. if len(p.unknownTopics) == 0 { return } unknown, exists := p.unknownTopics[topic] if !exists { return } // If we loaded no partitions because of a retriable error, we signal // the waiting goroutine that a try happened. It is possible the // goroutine is quitting and will not be draining unknownWait, so we do // not require the send. if len(lv.partitions) == 0 && kerr.IsRetriable(lv.loadErr) { select { case unknown.wait <- lv.loadErr: default: } return } // Either we have a fatal error or we can successfully partition. // // Even with a fatal error, if we loaded any partitions, we partition. // If we only had a fatal error, we can finish promises in a goroutine. // If we are partitioning, we have to do it under the unknownMu to // ensure prior buffered records are produced in order before we // release the mu. delete(p.unknownTopics, topic) close(unknown.wait) // allow waiting goroutine to quit if len(lv.partitions) == 0 { cl.failUnknownTopicRecords(topic, unknown, lv.loadErr) } else { for _, pr := range unknown.buffered { cl.doPartitionRecord(l, lv, pr) } } } // If a metadata request fails after retrying (internally retrying, so only a // few times), or the metadata request does not return topics that we requested // (which may also happen additionally consuming via regex), then we need to // bump errors for topics that were previously loaded, and bump errors for // topics awaiting load. // // This has two modes of operation: // // 1) if no topics were missing, then the metadata request failed outright, // and we need to bump errors on all stored topics and unknown topics. // // 2) if topics were missing, then the metadata request was successful but // had missing data, and we need to bump errors on only what was mising. // func (cl *Client) bumpMetadataFailForTopics(requested map[string]*topicPartitions, err error, missingTopics ...string) { p := &cl.producer // mode 1 if len(missingTopics) == 0 { for _, topic := range requested { for _, topicPartition := range topic.load().partitions { topicPartition.records.bumpRepeatedLoadErr(err) } } } // mode 2 var missing map[string]bool for _, failTopic := range missingTopics { if missing == nil { missing = make(map[string]bool, len(missingTopics)) } missing[failTopic] = true if topic, exists := requested[failTopic]; exists { for _, topicPartition := range topic.load().partitions { topicPartition.records.bumpRepeatedLoadErr(err) } } } p.unknownTopicsMu.Lock() defer p.unknownTopicsMu.Unlock() for topic, unknown := range p.unknownTopics { // if nil, mode 1, else mode 2 if missing != nil && !missing[topic] { continue } select { case unknown.wait <- err: default: } } } // topicPartitionsData is the data behind a topicPartitions' v. // // We keep this in an atomic because it is expected to be extremely read heavy, // and if it were behind a lock, the lock would need to be held for a while. type topicPartitionsData struct { // NOTE if adding anything to this struct, be sure to fix meta merge. loadErr error // could be auth, unknown, leader not avail, or creation err isInternal bool partitions []*topicPartition // partition num => partition writablePartitions []*topicPartition // subset of above } // topicPartition contains all information from Kafka for a topic's partition, // as well as what a client is producing to it or info about consuming from it. type topicPartition struct { // If we have a load error (leader/listener/replica not available), we // keep the old topicPartition data and the new error. loadErr error // If we do not have a load error, we determine if the new // topicPartition is the same or different from the old based on // whether the data changed (leader or leader epoch, etc.). topicPartitionData // If we do not have a load error, we copy the records and cursor // pointers from the old after updating any necessary fields in them // (see migrate functions below). // // Only one of records or cursor is non-nil. records *recBuf cursor *cursor } // Contains stuff that changes on metadata update that we copy into a cursor or // recBuf. type topicPartitionData struct { // Our leader; if metadata sees this change, the metadata update // migrates the cursor to a different source with the session stopped, // and the recBuf to a different sink under a tight mutex. leader int32 // What we believe to be the epoch of the leader for this partition. // // For cursors, for KIP-320, if a broker receives a fetch request where // the current leader epoch does not match the brokers, either the // broker is behind and returns UnknownLeaderEpoch, or we are behind // and the broker returns FencedLeaderEpoch. For the former, we back // off and retry. For the latter, we update our metadata. leaderEpoch int32 } // migrateProductionTo is called on metadata update if a topic partition's sink // has changed. This moves record production from one sink to the other; this // must be done such that records produced during migration follow those // already buffered. func (old *topicPartition) migrateProductionTo(new *topicPartition) { // First, remove our record buffer from the old sink. old.records.sink.removeRecBuf(old.records) // Before this next lock, record producing will buffer to the // in-migration-progress records and may trigger draining to // the old sink. That is fine, the old sink no longer consumes // from these records. We just have wasted drain triggers. old.records.mu.Lock() // guard setting sink and topic partition data old.records.sink = new.records.sink old.records.topicPartitionData = new.topicPartitionData old.records.mu.Unlock() // After the unlock above, record buffering can trigger drains // on the new sink, which is not yet consuming from these // records. Again, just more wasted drain triggers. old.records.sink.addRecBuf(old.records) // add our record source to the new sink // At this point, the new sink will be draining our records. We lastly // need to copy the records pointer to our new topicPartition. new.records = old.records } // migrateCursorTo is called on metadata update if a topic partition's leader // or leader epoch has changed. // // This is a little bit different from above, in that we do this logic only // after stopping a consumer session. With the consumer session stopped, we // have fewer concurrency issues to worry about. func (old *topicPartition) migrateCursorTo( new *topicPartition, consumer *consumer, consumerSessionStopped *bool, reloadOffsets *listOrEpochLoads, tpsPrior **topicsPartitions, ) { // Migrating a cursor requires stopping any consumer session. If we // stop a session, we need to eventually re-start any offset listing or // epoch loading that was stopped. Thus, we simply merge what we // stopped into what we will reload. if !*consumerSessionStopped { loads, tps := consumer.stopSession() reloadOffsets.mergeFrom(loads) *tpsPrior = tps *consumerSessionStopped = true } old.cursor.source.removeCursor(old.cursor) // With the session stopped, we can update fields on the old cursor // with no concurrency issue. old.cursor.source = new.cursor.source // KIP-320: if we had consumed some messages, we need to validate the // leader epoch on the new broker to see if we experienced data loss // before we can use this cursor. if new.leaderEpoch != -1 && old.cursor.lastConsumedEpoch >= 0 { // Since the cursor consumed messages, it is definitely usable. // We use it so that the epoch load can finish using it // properly. old.cursor.use() reloadOffsets.addLoad(old.cursor.topic, old.cursor.partition, loadTypeEpoch, offsetLoad{ replica: -1, Offset: Offset{ at: old.cursor.offset, epoch: old.cursor.lastConsumedEpoch, }, }) } old.cursor.topicPartitionData = new.topicPartitionData old.cursor.source.addCursor(old.cursor) new.cursor = old.cursor }
{ tp, exists := d[t] if !exists { return nil } return tp.load() }
identifier_body
topics_and_partitions.go
package kgo import ( "sync" "sync/atomic" "github.com/twmb/franz-go/pkg/kerr" ) func newTopicPartitions() *topicPartitions { parts := new(topicPartitions) parts.v.Store(new(topicPartitionsData)) return parts } // Contains all information about a topic's partitions. type topicPartitions struct { v atomic.Value // *topicPartitionsData partsMu sync.Mutex partitioner TopicPartitioner } func (t *topicPartitions) load() *topicPartitionsData { return t.v.Load().(*topicPartitionsData) } var noTopicsPartitions = newTopicsPartitions() func newTopicsPartitions() *topicsPartitions { var t topicsPartitions t.v.Store(make(topicsPartitionsData)) return &t } // A helper type mapping topics to their partitions; // this is the inner value of topicPartitions.v. type topicsPartitionsData map[string]*topicPartitions func (d topicsPartitionsData) hasTopic(t string) bool { _, exists := d[t]; return exists } func (d topicsPartitionsData) loadTopic(t string) *topicPartitionsData { tp, exists := d[t] if !exists { return nil } return tp.load() } // A helper type mapping topics to their partitions that can be updated // atomically. type topicsPartitions struct { v atomic.Value // topicsPartitionsData (map[string]*topicPartitions) } func (t *topicsPartitions) load() topicsPartitionsData { if t == nil { return nil } return t.v.Load().(topicsPartitionsData) } func (t *topicsPartitions) storeData(d topicsPartitionsData) { t.v.Store(d) } func (t *topicsPartitions) storeTopics(topics []string) { t.v.Store(t.ensureTopics(topics)) } func (t *topicsPartitions) clone() topicsPartitionsData { current := t.load() clone := make(map[string]*topicPartitions, len(current)) for k, v := range current { clone[k] = v } return clone } // Ensures that the topics exist in the returned map, but does not store the // update. This can be used to update the data and store later, rather than // storing immediately. func (t *topicsPartitions) ensureTopics(topics []string) topicsPartitionsData { var cloned bool current := t.load() for _, topic := range topics { if _, exists := current[topic]; !exists { if !cloned { current = t.clone() cloned = true } current[topic] = newTopicPartitions() } } return current } // Updates the topic partitions data atomic value. // // If this is the first time seeing partitions, we do processing of unknown // partitions that may be buffered for producing. func (cl *Client) storePartitionsUpdate(topic string, l *topicPartitions, lv *topicPartitionsData, hadPartitions bool) { // If the topic already had partitions, then there would be no // unknown topic waiting and we do not need to notify anything. if hadPartitions { l.v.Store(lv) return } p := &cl.producer p.unknownTopicsMu.Lock() defer p.unknownTopicsMu.Unlock() // If the topic did not have partitions, then we need to store the // partition update BEFORE unlocking the mutex to guard against this // sequence of events: // // - unlock waiters // - delete waiter // - new produce recreates waiter // - we store update // - we never notify the recreated waiter // // By storing before releasing the locks, we ensure that later // partition loads for this topic under the mu will see our update. defer l.v.Store(lv) // If there are no unknown topics or this topic is not unknown, then we // have nothing to do. if len(p.unknownTopics) == 0 { return } unknown, exists := p.unknownTopics[topic] if !exists { return } // If we loaded no partitions because of a retriable error, we signal // the waiting goroutine that a try happened. It is possible the // goroutine is quitting and will not be draining unknownWait, so we do // not require the send. if len(lv.partitions) == 0 && kerr.IsRetriable(lv.loadErr) { select { case unknown.wait <- lv.loadErr: default: } return } // Either we have a fatal error or we can successfully partition. // // Even with a fatal error, if we loaded any partitions, we partition. // If we only had a fatal error, we can finish promises in a goroutine. // If we are partitioning, we have to do it under the unknownMu to // ensure prior buffered records are produced in order before we // release the mu. delete(p.unknownTopics, topic) close(unknown.wait) // allow waiting goroutine to quit if len(lv.partitions) == 0 { cl.failUnknownTopicRecords(topic, unknown, lv.loadErr) } else { for _, pr := range unknown.buffered { cl.doPartitionRecord(l, lv, pr) } } } // If a metadata request fails after retrying (internally retrying, so only a // few times), or the metadata request does not return topics that we requested // (which may also happen additionally consuming via regex), then we need to // bump errors for topics that were previously loaded, and bump errors for // topics awaiting load. // // This has two modes of operation: // // 1) if no topics were missing, then the metadata request failed outright, // and we need to bump errors on all stored topics and unknown topics. // // 2) if topics were missing, then the metadata request was successful but // had missing data, and we need to bump errors on only what was mising. // func (cl *Client) bumpMetadataFailForTopics(requested map[string]*topicPartitions, err error, missingTopics ...string) { p := &cl.producer // mode 1 if len(missingTopics) == 0 { for _, topic := range requested { for _, topicPartition := range topic.load().partitions { topicPartition.records.bumpRepeatedLoadErr(err) } } } // mode 2 var missing map[string]bool for _, failTopic := range missingTopics { if missing == nil { missing = make(map[string]bool, len(missingTopics)) } missing[failTopic] = true if topic, exists := requested[failTopic]; exists { for _, topicPartition := range topic.load().partitions { topicPartition.records.bumpRepeatedLoadErr(err) } } } p.unknownTopicsMu.Lock() defer p.unknownTopicsMu.Unlock() for topic, unknown := range p.unknownTopics { // if nil, mode 1, else mode 2 if missing != nil && !missing[topic] { continue } select { case unknown.wait <- err: default: } } } // topicPartitionsData is the data behind a topicPartitions' v. // // We keep this in an atomic because it is expected to be extremely read heavy, // and if it were behind a lock, the lock would need to be held for a while. type topicPartitionsData struct { // NOTE if adding anything to this struct, be sure to fix meta merge. loadErr error // could be auth, unknown, leader not avail, or creation err isInternal bool partitions []*topicPartition // partition num => partition writablePartitions []*topicPartition // subset of above } // topicPartition contains all information from Kafka for a topic's partition, // as well as what a client is producing to it or info about consuming from it. type topicPartition struct { // If we have a load error (leader/listener/replica not available), we // keep the old topicPartition data and the new error. loadErr error // If we do not have a load error, we determine if the new // topicPartition is the same or different from the old based on // whether the data changed (leader or leader epoch, etc.). topicPartitionData // If we do not have a load error, we copy the records and cursor // pointers from the old after updating any necessary fields in them // (see migrate functions below). // // Only one of records or cursor is non-nil. records *recBuf cursor *cursor } // Contains stuff that changes on metadata update that we copy into a cursor or // recBuf. type topicPartitionData struct { // Our leader; if metadata sees this change, the metadata update // migrates the cursor to a different source with the session stopped, // and the recBuf to a different sink under a tight mutex. leader int32 // What we believe to be the epoch of the leader for this partition. // // For cursors, for KIP-320, if a broker receives a fetch request where // the current leader epoch does not match the brokers, either the // broker is behind and returns UnknownLeaderEpoch, or we are behind // and the broker returns FencedLeaderEpoch. For the former, we back // off and retry. For the latter, we update our metadata. leaderEpoch int32 } // migrateProductionTo is called on metadata update if a topic partition's sink // has changed. This moves record production from one sink to the other; this // must be done such that records produced during migration follow those // already buffered. func (old *topicPartition) migrateProductionTo(new *topicPartition) { // First, remove our record buffer from the old sink. old.records.sink.removeRecBuf(old.records) // Before this next lock, record producing will buffer to the // in-migration-progress records and may trigger draining to // the old sink. That is fine, the old sink no longer consumes // from these records. We just have wasted drain triggers. old.records.mu.Lock() // guard setting sink and topic partition data old.records.sink = new.records.sink old.records.topicPartitionData = new.topicPartitionData old.records.mu.Unlock()
old.records.sink.addRecBuf(old.records) // add our record source to the new sink // At this point, the new sink will be draining our records. We lastly // need to copy the records pointer to our new topicPartition. new.records = old.records } // migrateCursorTo is called on metadata update if a topic partition's leader // or leader epoch has changed. // // This is a little bit different from above, in that we do this logic only // after stopping a consumer session. With the consumer session stopped, we // have fewer concurrency issues to worry about. func (old *topicPartition) migrateCursorTo( new *topicPartition, consumer *consumer, consumerSessionStopped *bool, reloadOffsets *listOrEpochLoads, tpsPrior **topicsPartitions, ) { // Migrating a cursor requires stopping any consumer session. If we // stop a session, we need to eventually re-start any offset listing or // epoch loading that was stopped. Thus, we simply merge what we // stopped into what we will reload. if !*consumerSessionStopped { loads, tps := consumer.stopSession() reloadOffsets.mergeFrom(loads) *tpsPrior = tps *consumerSessionStopped = true } old.cursor.source.removeCursor(old.cursor) // With the session stopped, we can update fields on the old cursor // with no concurrency issue. old.cursor.source = new.cursor.source // KIP-320: if we had consumed some messages, we need to validate the // leader epoch on the new broker to see if we experienced data loss // before we can use this cursor. if new.leaderEpoch != -1 && old.cursor.lastConsumedEpoch >= 0 { // Since the cursor consumed messages, it is definitely usable. // We use it so that the epoch load can finish using it // properly. old.cursor.use() reloadOffsets.addLoad(old.cursor.topic, old.cursor.partition, loadTypeEpoch, offsetLoad{ replica: -1, Offset: Offset{ at: old.cursor.offset, epoch: old.cursor.lastConsumedEpoch, }, }) } old.cursor.topicPartitionData = new.topicPartitionData old.cursor.source.addCursor(old.cursor) new.cursor = old.cursor }
// After the unlock above, record buffering can trigger drains // on the new sink, which is not yet consuming from these // records. Again, just more wasted drain triggers.
random_line_split
topics_and_partitions.go
package kgo import ( "sync" "sync/atomic" "github.com/twmb/franz-go/pkg/kerr" ) func newTopicPartitions() *topicPartitions { parts := new(topicPartitions) parts.v.Store(new(topicPartitionsData)) return parts } // Contains all information about a topic's partitions. type topicPartitions struct { v atomic.Value // *topicPartitionsData partsMu sync.Mutex partitioner TopicPartitioner } func (t *topicPartitions) load() *topicPartitionsData { return t.v.Load().(*topicPartitionsData) } var noTopicsPartitions = newTopicsPartitions() func newTopicsPartitions() *topicsPartitions { var t topicsPartitions t.v.Store(make(topicsPartitionsData)) return &t } // A helper type mapping topics to their partitions; // this is the inner value of topicPartitions.v. type topicsPartitionsData map[string]*topicPartitions func (d topicsPartitionsData) hasTopic(t string) bool { _, exists := d[t]; return exists } func (d topicsPartitionsData) loadTopic(t string) *topicPartitionsData { tp, exists := d[t] if !exists { return nil } return tp.load() } // A helper type mapping topics to their partitions that can be updated // atomically. type topicsPartitions struct { v atomic.Value // topicsPartitionsData (map[string]*topicPartitions) } func (t *topicsPartitions) load() topicsPartitionsData { if t == nil { return nil } return t.v.Load().(topicsPartitionsData) } func (t *topicsPartitions) storeData(d topicsPartitionsData) { t.v.Store(d) } func (t *topicsPartitions) storeTopics(topics []string) { t.v.Store(t.ensureTopics(topics)) } func (t *topicsPartitions) clone() topicsPartitionsData { current := t.load() clone := make(map[string]*topicPartitions, len(current)) for k, v := range current { clone[k] = v } return clone } // Ensures that the topics exist in the returned map, but does not store the // update. This can be used to update the data and store later, rather than // storing immediately. func (t *topicsPartitions) ensureTopics(topics []string) topicsPartitionsData { var cloned bool current := t.load() for _, topic := range topics { if _, exists := current[topic]; !exists { if !cloned { current = t.clone() cloned = true } current[topic] = newTopicPartitions() } } return current } // Updates the topic partitions data atomic value. // // If this is the first time seeing partitions, we do processing of unknown // partitions that may be buffered for producing. func (cl *Client) storePartitionsUpdate(topic string, l *topicPartitions, lv *topicPartitionsData, hadPartitions bool) { // If the topic already had partitions, then there would be no // unknown topic waiting and we do not need to notify anything. if hadPartitions { l.v.Store(lv) return } p := &cl.producer p.unknownTopicsMu.Lock() defer p.unknownTopicsMu.Unlock() // If the topic did not have partitions, then we need to store the // partition update BEFORE unlocking the mutex to guard against this // sequence of events: // // - unlock waiters // - delete waiter // - new produce recreates waiter // - we store update // - we never notify the recreated waiter // // By storing before releasing the locks, we ensure that later // partition loads for this topic under the mu will see our update. defer l.v.Store(lv) // If there are no unknown topics or this topic is not unknown, then we // have nothing to do. if len(p.unknownTopics) == 0 { return } unknown, exists := p.unknownTopics[topic] if !exists { return } // If we loaded no partitions because of a retriable error, we signal // the waiting goroutine that a try happened. It is possible the // goroutine is quitting and will not be draining unknownWait, so we do // not require the send. if len(lv.partitions) == 0 && kerr.IsRetriable(lv.loadErr) { select { case unknown.wait <- lv.loadErr: default: } return } // Either we have a fatal error or we can successfully partition. // // Even with a fatal error, if we loaded any partitions, we partition. // If we only had a fatal error, we can finish promises in a goroutine. // If we are partitioning, we have to do it under the unknownMu to // ensure prior buffered records are produced in order before we // release the mu. delete(p.unknownTopics, topic) close(unknown.wait) // allow waiting goroutine to quit if len(lv.partitions) == 0 { cl.failUnknownTopicRecords(topic, unknown, lv.loadErr) } else { for _, pr := range unknown.buffered { cl.doPartitionRecord(l, lv, pr) } } } // If a metadata request fails after retrying (internally retrying, so only a // few times), or the metadata request does not return topics that we requested // (which may also happen additionally consuming via regex), then we need to // bump errors for topics that were previously loaded, and bump errors for // topics awaiting load. // // This has two modes of operation: // // 1) if no topics were missing, then the metadata request failed outright, // and we need to bump errors on all stored topics and unknown topics. // // 2) if topics were missing, then the metadata request was successful but // had missing data, and we need to bump errors on only what was mising. // func (cl *Client) bumpMetadataFailForTopics(requested map[string]*topicPartitions, err error, missingTopics ...string) { p := &cl.producer // mode 1 if len(missingTopics) == 0 { for _, topic := range requested { for _, topicPartition := range topic.load().partitions { topicPartition.records.bumpRepeatedLoadErr(err) } } } // mode 2 var missing map[string]bool for _, failTopic := range missingTopics { if missing == nil { missing = make(map[string]bool, len(missingTopics)) } missing[failTopic] = true if topic, exists := requested[failTopic]; exists { for _, topicPartition := range topic.load().partitions { topicPartition.records.bumpRepeatedLoadErr(err) } } } p.unknownTopicsMu.Lock() defer p.unknownTopicsMu.Unlock() for topic, unknown := range p.unknownTopics { // if nil, mode 1, else mode 2 if missing != nil && !missing[topic] { continue } select { case unknown.wait <- err: default: } } } // topicPartitionsData is the data behind a topicPartitions' v. // // We keep this in an atomic because it is expected to be extremely read heavy, // and if it were behind a lock, the lock would need to be held for a while. type topicPartitionsData struct { // NOTE if adding anything to this struct, be sure to fix meta merge. loadErr error // could be auth, unknown, leader not avail, or creation err isInternal bool partitions []*topicPartition // partition num => partition writablePartitions []*topicPartition // subset of above } // topicPartition contains all information from Kafka for a topic's partition, // as well as what a client is producing to it or info about consuming from it. type topicPartition struct { // If we have a load error (leader/listener/replica not available), we // keep the old topicPartition data and the new error. loadErr error // If we do not have a load error, we determine if the new // topicPartition is the same or different from the old based on // whether the data changed (leader or leader epoch, etc.). topicPartitionData // If we do not have a load error, we copy the records and cursor // pointers from the old after updating any necessary fields in them // (see migrate functions below). // // Only one of records or cursor is non-nil. records *recBuf cursor *cursor } // Contains stuff that changes on metadata update that we copy into a cursor or // recBuf. type topicPartitionData struct { // Our leader; if metadata sees this change, the metadata update // migrates the cursor to a different source with the session stopped, // and the recBuf to a different sink under a tight mutex. leader int32 // What we believe to be the epoch of the leader for this partition. // // For cursors, for KIP-320, if a broker receives a fetch request where // the current leader epoch does not match the brokers, either the // broker is behind and returns UnknownLeaderEpoch, or we are behind // and the broker returns FencedLeaderEpoch. For the former, we back // off and retry. For the latter, we update our metadata. leaderEpoch int32 } // migrateProductionTo is called on metadata update if a topic partition's sink // has changed. This moves record production from one sink to the other; this // must be done such that records produced during migration follow those // already buffered. func (old *topicPartition)
(new *topicPartition) { // First, remove our record buffer from the old sink. old.records.sink.removeRecBuf(old.records) // Before this next lock, record producing will buffer to the // in-migration-progress records and may trigger draining to // the old sink. That is fine, the old sink no longer consumes // from these records. We just have wasted drain triggers. old.records.mu.Lock() // guard setting sink and topic partition data old.records.sink = new.records.sink old.records.topicPartitionData = new.topicPartitionData old.records.mu.Unlock() // After the unlock above, record buffering can trigger drains // on the new sink, which is not yet consuming from these // records. Again, just more wasted drain triggers. old.records.sink.addRecBuf(old.records) // add our record source to the new sink // At this point, the new sink will be draining our records. We lastly // need to copy the records pointer to our new topicPartition. new.records = old.records } // migrateCursorTo is called on metadata update if a topic partition's leader // or leader epoch has changed. // // This is a little bit different from above, in that we do this logic only // after stopping a consumer session. With the consumer session stopped, we // have fewer concurrency issues to worry about. func (old *topicPartition) migrateCursorTo( new *topicPartition, consumer *consumer, consumerSessionStopped *bool, reloadOffsets *listOrEpochLoads, tpsPrior **topicsPartitions, ) { // Migrating a cursor requires stopping any consumer session. If we // stop a session, we need to eventually re-start any offset listing or // epoch loading that was stopped. Thus, we simply merge what we // stopped into what we will reload. if !*consumerSessionStopped { loads, tps := consumer.stopSession() reloadOffsets.mergeFrom(loads) *tpsPrior = tps *consumerSessionStopped = true } old.cursor.source.removeCursor(old.cursor) // With the session stopped, we can update fields on the old cursor // with no concurrency issue. old.cursor.source = new.cursor.source // KIP-320: if we had consumed some messages, we need to validate the // leader epoch on the new broker to see if we experienced data loss // before we can use this cursor. if new.leaderEpoch != -1 && old.cursor.lastConsumedEpoch >= 0 { // Since the cursor consumed messages, it is definitely usable. // We use it so that the epoch load can finish using it // properly. old.cursor.use() reloadOffsets.addLoad(old.cursor.topic, old.cursor.partition, loadTypeEpoch, offsetLoad{ replica: -1, Offset: Offset{ at: old.cursor.offset, epoch: old.cursor.lastConsumedEpoch, }, }) } old.cursor.topicPartitionData = new.topicPartitionData old.cursor.source.addCursor(old.cursor) new.cursor = old.cursor }
migrateProductionTo
identifier_name
topics_and_partitions.go
package kgo import ( "sync" "sync/atomic" "github.com/twmb/franz-go/pkg/kerr" ) func newTopicPartitions() *topicPartitions { parts := new(topicPartitions) parts.v.Store(new(topicPartitionsData)) return parts } // Contains all information about a topic's partitions. type topicPartitions struct { v atomic.Value // *topicPartitionsData partsMu sync.Mutex partitioner TopicPartitioner } func (t *topicPartitions) load() *topicPartitionsData { return t.v.Load().(*topicPartitionsData) } var noTopicsPartitions = newTopicsPartitions() func newTopicsPartitions() *topicsPartitions { var t topicsPartitions t.v.Store(make(topicsPartitionsData)) return &t } // A helper type mapping topics to their partitions; // this is the inner value of topicPartitions.v. type topicsPartitionsData map[string]*topicPartitions func (d topicsPartitionsData) hasTopic(t string) bool { _, exists := d[t]; return exists } func (d topicsPartitionsData) loadTopic(t string) *topicPartitionsData { tp, exists := d[t] if !exists { return nil } return tp.load() } // A helper type mapping topics to their partitions that can be updated // atomically. type topicsPartitions struct { v atomic.Value // topicsPartitionsData (map[string]*topicPartitions) } func (t *topicsPartitions) load() topicsPartitionsData { if t == nil { return nil } return t.v.Load().(topicsPartitionsData) } func (t *topicsPartitions) storeData(d topicsPartitionsData) { t.v.Store(d) } func (t *topicsPartitions) storeTopics(topics []string) { t.v.Store(t.ensureTopics(topics)) } func (t *topicsPartitions) clone() topicsPartitionsData { current := t.load() clone := make(map[string]*topicPartitions, len(current)) for k, v := range current { clone[k] = v } return clone } // Ensures that the topics exist in the returned map, but does not store the // update. This can be used to update the data and store later, rather than // storing immediately. func (t *topicsPartitions) ensureTopics(topics []string) topicsPartitionsData { var cloned bool current := t.load() for _, topic := range topics { if _, exists := current[topic]; !exists { if !cloned { current = t.clone() cloned = true } current[topic] = newTopicPartitions() } } return current } // Updates the topic partitions data atomic value. // // If this is the first time seeing partitions, we do processing of unknown // partitions that may be buffered for producing. func (cl *Client) storePartitionsUpdate(topic string, l *topicPartitions, lv *topicPartitionsData, hadPartitions bool) { // If the topic already had partitions, then there would be no // unknown topic waiting and we do not need to notify anything. if hadPartitions
p := &cl.producer p.unknownTopicsMu.Lock() defer p.unknownTopicsMu.Unlock() // If the topic did not have partitions, then we need to store the // partition update BEFORE unlocking the mutex to guard against this // sequence of events: // // - unlock waiters // - delete waiter // - new produce recreates waiter // - we store update // - we never notify the recreated waiter // // By storing before releasing the locks, we ensure that later // partition loads for this topic under the mu will see our update. defer l.v.Store(lv) // If there are no unknown topics or this topic is not unknown, then we // have nothing to do. if len(p.unknownTopics) == 0 { return } unknown, exists := p.unknownTopics[topic] if !exists { return } // If we loaded no partitions because of a retriable error, we signal // the waiting goroutine that a try happened. It is possible the // goroutine is quitting and will not be draining unknownWait, so we do // not require the send. if len(lv.partitions) == 0 && kerr.IsRetriable(lv.loadErr) { select { case unknown.wait <- lv.loadErr: default: } return } // Either we have a fatal error or we can successfully partition. // // Even with a fatal error, if we loaded any partitions, we partition. // If we only had a fatal error, we can finish promises in a goroutine. // If we are partitioning, we have to do it under the unknownMu to // ensure prior buffered records are produced in order before we // release the mu. delete(p.unknownTopics, topic) close(unknown.wait) // allow waiting goroutine to quit if len(lv.partitions) == 0 { cl.failUnknownTopicRecords(topic, unknown, lv.loadErr) } else { for _, pr := range unknown.buffered { cl.doPartitionRecord(l, lv, pr) } } } // If a metadata request fails after retrying (internally retrying, so only a // few times), or the metadata request does not return topics that we requested // (which may also happen additionally consuming via regex), then we need to // bump errors for topics that were previously loaded, and bump errors for // topics awaiting load. // // This has two modes of operation: // // 1) if no topics were missing, then the metadata request failed outright, // and we need to bump errors on all stored topics and unknown topics. // // 2) if topics were missing, then the metadata request was successful but // had missing data, and we need to bump errors on only what was mising. // func (cl *Client) bumpMetadataFailForTopics(requested map[string]*topicPartitions, err error, missingTopics ...string) { p := &cl.producer // mode 1 if len(missingTopics) == 0 { for _, topic := range requested { for _, topicPartition := range topic.load().partitions { topicPartition.records.bumpRepeatedLoadErr(err) } } } // mode 2 var missing map[string]bool for _, failTopic := range missingTopics { if missing == nil { missing = make(map[string]bool, len(missingTopics)) } missing[failTopic] = true if topic, exists := requested[failTopic]; exists { for _, topicPartition := range topic.load().partitions { topicPartition.records.bumpRepeatedLoadErr(err) } } } p.unknownTopicsMu.Lock() defer p.unknownTopicsMu.Unlock() for topic, unknown := range p.unknownTopics { // if nil, mode 1, else mode 2 if missing != nil && !missing[topic] { continue } select { case unknown.wait <- err: default: } } } // topicPartitionsData is the data behind a topicPartitions' v. // // We keep this in an atomic because it is expected to be extremely read heavy, // and if it were behind a lock, the lock would need to be held for a while. type topicPartitionsData struct { // NOTE if adding anything to this struct, be sure to fix meta merge. loadErr error // could be auth, unknown, leader not avail, or creation err isInternal bool partitions []*topicPartition // partition num => partition writablePartitions []*topicPartition // subset of above } // topicPartition contains all information from Kafka for a topic's partition, // as well as what a client is producing to it or info about consuming from it. type topicPartition struct { // If we have a load error (leader/listener/replica not available), we // keep the old topicPartition data and the new error. loadErr error // If we do not have a load error, we determine if the new // topicPartition is the same or different from the old based on // whether the data changed (leader or leader epoch, etc.). topicPartitionData // If we do not have a load error, we copy the records and cursor // pointers from the old after updating any necessary fields in them // (see migrate functions below). // // Only one of records or cursor is non-nil. records *recBuf cursor *cursor } // Contains stuff that changes on metadata update that we copy into a cursor or // recBuf. type topicPartitionData struct { // Our leader; if metadata sees this change, the metadata update // migrates the cursor to a different source with the session stopped, // and the recBuf to a different sink under a tight mutex. leader int32 // What we believe to be the epoch of the leader for this partition. // // For cursors, for KIP-320, if a broker receives a fetch request where // the current leader epoch does not match the brokers, either the // broker is behind and returns UnknownLeaderEpoch, or we are behind // and the broker returns FencedLeaderEpoch. For the former, we back // off and retry. For the latter, we update our metadata. leaderEpoch int32 } // migrateProductionTo is called on metadata update if a topic partition's sink // has changed. This moves record production from one sink to the other; this // must be done such that records produced during migration follow those // already buffered. func (old *topicPartition) migrateProductionTo(new *topicPartition) { // First, remove our record buffer from the old sink. old.records.sink.removeRecBuf(old.records) // Before this next lock, record producing will buffer to the // in-migration-progress records and may trigger draining to // the old sink. That is fine, the old sink no longer consumes // from these records. We just have wasted drain triggers. old.records.mu.Lock() // guard setting sink and topic partition data old.records.sink = new.records.sink old.records.topicPartitionData = new.topicPartitionData old.records.mu.Unlock() // After the unlock above, record buffering can trigger drains // on the new sink, which is not yet consuming from these // records. Again, just more wasted drain triggers. old.records.sink.addRecBuf(old.records) // add our record source to the new sink // At this point, the new sink will be draining our records. We lastly // need to copy the records pointer to our new topicPartition. new.records = old.records } // migrateCursorTo is called on metadata update if a topic partition's leader // or leader epoch has changed. // // This is a little bit different from above, in that we do this logic only // after stopping a consumer session. With the consumer session stopped, we // have fewer concurrency issues to worry about. func (old *topicPartition) migrateCursorTo( new *topicPartition, consumer *consumer, consumerSessionStopped *bool, reloadOffsets *listOrEpochLoads, tpsPrior **topicsPartitions, ) { // Migrating a cursor requires stopping any consumer session. If we // stop a session, we need to eventually re-start any offset listing or // epoch loading that was stopped. Thus, we simply merge what we // stopped into what we will reload. if !*consumerSessionStopped { loads, tps := consumer.stopSession() reloadOffsets.mergeFrom(loads) *tpsPrior = tps *consumerSessionStopped = true } old.cursor.source.removeCursor(old.cursor) // With the session stopped, we can update fields on the old cursor // with no concurrency issue. old.cursor.source = new.cursor.source // KIP-320: if we had consumed some messages, we need to validate the // leader epoch on the new broker to see if we experienced data loss // before we can use this cursor. if new.leaderEpoch != -1 && old.cursor.lastConsumedEpoch >= 0 { // Since the cursor consumed messages, it is definitely usable. // We use it so that the epoch load can finish using it // properly. old.cursor.use() reloadOffsets.addLoad(old.cursor.topic, old.cursor.partition, loadTypeEpoch, offsetLoad{ replica: -1, Offset: Offset{ at: old.cursor.offset, epoch: old.cursor.lastConsumedEpoch, }, }) } old.cursor.topicPartitionData = new.topicPartitionData old.cursor.source.addCursor(old.cursor) new.cursor = old.cursor }
{ l.v.Store(lv) return }
conditional_block
lib.rs
//! Pretty printing for Javascript values from [wasm-bindgen](https://docs.rs/wasm-bindgen). #![forbid(unsafe_code)] use js_sys::{ Array, Date, Error, Function, JsString, Map, Object, Promise, Reflect, RegExp, Set, Symbol, WeakSet, }; use std::{ collections::{BTreeMap, BTreeSet, HashSet}, fmt::{Debug, Display, Formatter, Result as FmtResult}, rc::Rc, }; use wasm_bindgen::{JsCast, JsValue}; use web_sys::{Document, Element, Window}; pub trait Pretty { fn pretty(&self) -> Prettified; } impl<T> Pretty for T where T: AsRef<JsValue>, { fn pretty(&self) -> Prettified { Prettified { value: self.as_ref().to_owned(), seen: WeakSet::new(), skip: Default::default(), } } } /// A pretty-printable value from Javascript. pub struct Prettified { /// The current value we're visiting. value: JsValue, /// We just use a JS array here to avoid relying on wasm-bindgen's unstable /// ABI. seen: WeakSet, /// Properties we don't want serialized. skip: Rc<HashSet<String>>, } impl Prettified { /// Skip printing the property with `name` if it exists on any object /// visited (transitively). pub fn skip_property(&mut self, name: &str) -> &mut Self { let mut with_name = HashSet::to_owned(&self.skip); with_name.insert(name.to_owned()); self.skip = Rc::new(with_name); self } fn child(&self, v: &JsValue) -> Self { Self { seen: self.seen.clone(), skip: self.skip.clone(), value: v.as_ref().clone() } } // TODO get a serde_json::Value from this too } impl Debug for Prettified { fn fmt(&self, f: &mut Formatter) -> FmtResult { // detect and break cycles before trying to figure out Object subclass // keeps a single path here rather than separately in each branch below let mut _reset = None; if let Some(obj) = self.value.dyn_ref::<Object>() { if self.seen.has(obj) { return write!(f, "[Cycle]"); } self.seen.add(obj); _reset = Some(scopeguard::guard(obj.to_owned(), |obj| { self.seen.delete(&obj); })); } if self.value.is_null() { write!(f, "null") } else if self.value.is_undefined() { write!(f, "undefined") } else if self.value.dyn_ref::<Function>().is_some() { JsFunction.fmt(f) } else if self.value.dyn_ref::<Promise>().is_some() { write!(f, "[Promise]") } else if self.value.dyn_ref::<Document>().is_some() { write!(f, "[Document]") } else if self.value.dyn_ref::<Window>().is_some() { write!(f, "[Window]") } else if let Some(s) = self.value.dyn_ref::<JsString>() { write!(f, "{:?}", s.as_string().unwrap()) } else if let Some(n) = self.value.as_f64() { write!(f, "{}", n) } else if let Some(b) = self.value.as_bool() { write!(f, "{:?}", b) } else if let Some(d) = self.value.dyn_ref::<Date>() { write!(f, "{}", d.to_iso_string().as_string().unwrap()) } else if let Some(d) = self.value.dyn_ref::<Element>() { let name = d.tag_name().to_ascii_lowercase(); let (mut class, mut id) = (d.class_name(), d.id()); if !class.is_empty() { class.insert_str(0, " ."); } if !id.is_empty() { id.insert_str(0, " #"); } write!(f, "<{}{}{}/>", name, id, class) } else if let Some(e) = self.value.dyn_ref::<Error>() { write!(f, "Error: {}", e.to_string().as_string().unwrap()) } else if let Some(r) = self.value.dyn_ref::<RegExp>() { write!(f, "/{}/", r.to_string().as_string().unwrap()) } else if let Some(s) = self.value.dyn_ref::<Symbol>() { write!(f, "{}", s.to_string().as_string().unwrap()) } else if let Some(a) = self.value.dyn_ref::<Array>() { let mut f = f.debug_list(); for val in a.iter() { f.entry(&self.child(&val)); } f.finish() } else if let Some(s) = self.value.dyn_ref::<Set>() { let mut f = f.debug_set(); let entries = s.entries(); while let Ok(next) = entries.next() { if next.done() { break; } f.entry(&self.child(&next.value())); } f.finish() } else if let Some(m) = self.value.dyn_ref::<Map>() { let mut f = f.debug_map(); let keys = m.keys(); while let Ok(next) = keys.next() { if next.done() { break; } let key = next.value(); let value = m.get(&key); f.entry(&self.child(&key), &self.child(&value)); } f.finish() } else if let Some(obj) = self.value.dyn_ref::<Object>() { let mut proto = obj.clone(); let mut props_seen = HashSet::new(); let name = obj.constructor().name().as_string().unwrap(); let mut f = f.debug_struct(&name); loop { let mut functions = BTreeSet::new(); let mut props = BTreeMap::new(); for raw_key in Object::get_own_property_names(&proto).iter() { let key = raw_key.as_string().expect("object keys are always strings"); if (key.starts_with("__") && key.ends_with("__")) || props_seen.contains(&key) || functions.contains(&key) || self.skip.contains(&key) { continue; } if let Ok(value) = Reflect::get(obj, &raw_key) { props_seen.insert(key.clone()); if value.is_function() { functions.insert(key); } else { props.insert(key, self.child(&value)); } } } for (key, value) in props { f.field(&key, &value); } for key in functions { f.field(&key, &JsFunction); } proto = Object::get_prototype_of(proto.as_ref()); if proto.is_falsy() || proto.constructor().name().as_string().unwrap() == "Object" { // we've reached the end of the prototype chain break; } } f.finish() } else { write!(f, "unknown ({:?})", &self.value) } } } impl Display for Prettified { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "{:#?}", self) } } struct
; impl Debug for JsFunction { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "[Function]") } } #[cfg(test)] mod tests { use super::*; use futures::channel::oneshot::channel; use wasm_bindgen::closure::Closure; use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; use web_sys::{Event, EventTarget}; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn cycle_is_broken() { let with_cycles = js_sys::Function::new_no_args( r#" let root = { child: { nested: [] } }; root.child.nested.push(root); return root; "#, ) .call0(&JsValue::null()) .unwrap(); assert_eq!( with_cycles.pretty().to_string(), r#"Object { child: Object { nested: [ [Cycle], ], }, }"# ); } #[wasm_bindgen_test] fn repeated_siblings_are_not_cycles() { let with_siblings = js_sys::Function::new_no_args( r#" let root = { child: { nested: [] } }; let repeated_child = { foo: "bar" }; root.child.nested.push(repeated_child); root.child.nested.push(repeated_child); return root; "#, ) .call0(&JsValue::null()) .unwrap(); assert_eq!( with_siblings.pretty().to_string(), r#"Object { child: Object { nested: [ Object { foo: "bar", }, Object { foo: "bar", }, ], }, }"# ); } #[wasm_bindgen_test] async fn live_keyboard_event() { // create an input element and bind it to the document let window = web_sys::window().unwrap(); let document = window.document().unwrap(); let input = document.create_element("input").unwrap(); // input.set_attribute("type", "text").unwrap(); document.body().unwrap().append_child(input.as_ref()).unwrap(); // create & add an event listener that will send the event back the test let (send, recv) = channel(); let callback = Closure::once_into_js(move |ev: Event| { send.send(ev).unwrap(); }); let target: &EventTarget = input.as_ref(); let event_type = "keydown"; target.add_event_listener_with_callback(event_type, callback.dyn_ref().unwrap()).unwrap(); // create & dispatch an event to the input element let sent_event = web_sys::KeyboardEvent::new_with_keyboard_event_init_dict( event_type, web_sys::KeyboardEventInit::new() .char_code(b'F' as u32) .bubbles(true) .cancelable(true) .view(Some(&window)), ) .unwrap(); let sent: &Event = sent_event.as_ref(); assert!(target.dispatch_event(sent).unwrap()); // wait for the event to come back let received_event: Event = recv.await.unwrap(); // make sure we can print it without exploding due to nesting assert_eq!( received_event.pretty().skip_property("timeStamp").to_string(), r#"KeyboardEvent { isTrusted: false, DOM_KEY_LOCATION_LEFT: 1, DOM_KEY_LOCATION_NUMPAD: 3, DOM_KEY_LOCATION_RIGHT: 2, DOM_KEY_LOCATION_STANDARD: 0, altKey: false, charCode: 70, code: "", ctrlKey: false, isComposing: false, key: "", keyCode: 0, location: 0, metaKey: false, repeat: false, shiftKey: false, constructor: [Function], getModifierState: [Function], initKeyboardEvent: [Function], detail: 0, sourceCapabilities: null, view: [Window], which: 0, initUIEvent: [Function], AT_TARGET: 2, BUBBLING_PHASE: 3, CAPTURING_PHASE: 1, NONE: 0, bubbles: true, cancelBubble: false, cancelable: true, composed: false, currentTarget: null, defaultPrevented: false, eventPhase: 0, path: [ <input/>, <body/>, <html/>, [Document], [Window], ], returnValue: true, srcElement: <input/>, target: <input/>, type: "keydown", composedPath: [Function], initEvent: [Function], preventDefault: [Function], stopImmediatePropagation: [Function], stopPropagation: [Function], }"#, ); } }
JsFunction
identifier_name
lib.rs
//! Pretty printing for Javascript values from [wasm-bindgen](https://docs.rs/wasm-bindgen). #![forbid(unsafe_code)] use js_sys::{ Array, Date, Error, Function, JsString, Map, Object, Promise, Reflect, RegExp, Set, Symbol, WeakSet, }; use std::{ collections::{BTreeMap, BTreeSet, HashSet}, fmt::{Debug, Display, Formatter, Result as FmtResult}, rc::Rc, }; use wasm_bindgen::{JsCast, JsValue}; use web_sys::{Document, Element, Window}; pub trait Pretty { fn pretty(&self) -> Prettified; }
where T: AsRef<JsValue>, { fn pretty(&self) -> Prettified { Prettified { value: self.as_ref().to_owned(), seen: WeakSet::new(), skip: Default::default(), } } } /// A pretty-printable value from Javascript. pub struct Prettified { /// The current value we're visiting. value: JsValue, /// We just use a JS array here to avoid relying on wasm-bindgen's unstable /// ABI. seen: WeakSet, /// Properties we don't want serialized. skip: Rc<HashSet<String>>, } impl Prettified { /// Skip printing the property with `name` if it exists on any object /// visited (transitively). pub fn skip_property(&mut self, name: &str) -> &mut Self { let mut with_name = HashSet::to_owned(&self.skip); with_name.insert(name.to_owned()); self.skip = Rc::new(with_name); self } fn child(&self, v: &JsValue) -> Self { Self { seen: self.seen.clone(), skip: self.skip.clone(), value: v.as_ref().clone() } } // TODO get a serde_json::Value from this too } impl Debug for Prettified { fn fmt(&self, f: &mut Formatter) -> FmtResult { // detect and break cycles before trying to figure out Object subclass // keeps a single path here rather than separately in each branch below let mut _reset = None; if let Some(obj) = self.value.dyn_ref::<Object>() { if self.seen.has(obj) { return write!(f, "[Cycle]"); } self.seen.add(obj); _reset = Some(scopeguard::guard(obj.to_owned(), |obj| { self.seen.delete(&obj); })); } if self.value.is_null() { write!(f, "null") } else if self.value.is_undefined() { write!(f, "undefined") } else if self.value.dyn_ref::<Function>().is_some() { JsFunction.fmt(f) } else if self.value.dyn_ref::<Promise>().is_some() { write!(f, "[Promise]") } else if self.value.dyn_ref::<Document>().is_some() { write!(f, "[Document]") } else if self.value.dyn_ref::<Window>().is_some() { write!(f, "[Window]") } else if let Some(s) = self.value.dyn_ref::<JsString>() { write!(f, "{:?}", s.as_string().unwrap()) } else if let Some(n) = self.value.as_f64() { write!(f, "{}", n) } else if let Some(b) = self.value.as_bool() { write!(f, "{:?}", b) } else if let Some(d) = self.value.dyn_ref::<Date>() { write!(f, "{}", d.to_iso_string().as_string().unwrap()) } else if let Some(d) = self.value.dyn_ref::<Element>() { let name = d.tag_name().to_ascii_lowercase(); let (mut class, mut id) = (d.class_name(), d.id()); if !class.is_empty() { class.insert_str(0, " ."); } if !id.is_empty() { id.insert_str(0, " #"); } write!(f, "<{}{}{}/>", name, id, class) } else if let Some(e) = self.value.dyn_ref::<Error>() { write!(f, "Error: {}", e.to_string().as_string().unwrap()) } else if let Some(r) = self.value.dyn_ref::<RegExp>() { write!(f, "/{}/", r.to_string().as_string().unwrap()) } else if let Some(s) = self.value.dyn_ref::<Symbol>() { write!(f, "{}", s.to_string().as_string().unwrap()) } else if let Some(a) = self.value.dyn_ref::<Array>() { let mut f = f.debug_list(); for val in a.iter() { f.entry(&self.child(&val)); } f.finish() } else if let Some(s) = self.value.dyn_ref::<Set>() { let mut f = f.debug_set(); let entries = s.entries(); while let Ok(next) = entries.next() { if next.done() { break; } f.entry(&self.child(&next.value())); } f.finish() } else if let Some(m) = self.value.dyn_ref::<Map>() { let mut f = f.debug_map(); let keys = m.keys(); while let Ok(next) = keys.next() { if next.done() { break; } let key = next.value(); let value = m.get(&key); f.entry(&self.child(&key), &self.child(&value)); } f.finish() } else if let Some(obj) = self.value.dyn_ref::<Object>() { let mut proto = obj.clone(); let mut props_seen = HashSet::new(); let name = obj.constructor().name().as_string().unwrap(); let mut f = f.debug_struct(&name); loop { let mut functions = BTreeSet::new(); let mut props = BTreeMap::new(); for raw_key in Object::get_own_property_names(&proto).iter() { let key = raw_key.as_string().expect("object keys are always strings"); if (key.starts_with("__") && key.ends_with("__")) || props_seen.contains(&key) || functions.contains(&key) || self.skip.contains(&key) { continue; } if let Ok(value) = Reflect::get(obj, &raw_key) { props_seen.insert(key.clone()); if value.is_function() { functions.insert(key); } else { props.insert(key, self.child(&value)); } } } for (key, value) in props { f.field(&key, &value); } for key in functions { f.field(&key, &JsFunction); } proto = Object::get_prototype_of(proto.as_ref()); if proto.is_falsy() || proto.constructor().name().as_string().unwrap() == "Object" { // we've reached the end of the prototype chain break; } } f.finish() } else { write!(f, "unknown ({:?})", &self.value) } } } impl Display for Prettified { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "{:#?}", self) } } struct JsFunction; impl Debug for JsFunction { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "[Function]") } } #[cfg(test)] mod tests { use super::*; use futures::channel::oneshot::channel; use wasm_bindgen::closure::Closure; use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; use web_sys::{Event, EventTarget}; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn cycle_is_broken() { let with_cycles = js_sys::Function::new_no_args( r#" let root = { child: { nested: [] } }; root.child.nested.push(root); return root; "#, ) .call0(&JsValue::null()) .unwrap(); assert_eq!( with_cycles.pretty().to_string(), r#"Object { child: Object { nested: [ [Cycle], ], }, }"# ); } #[wasm_bindgen_test] fn repeated_siblings_are_not_cycles() { let with_siblings = js_sys::Function::new_no_args( r#" let root = { child: { nested: [] } }; let repeated_child = { foo: "bar" }; root.child.nested.push(repeated_child); root.child.nested.push(repeated_child); return root; "#, ) .call0(&JsValue::null()) .unwrap(); assert_eq!( with_siblings.pretty().to_string(), r#"Object { child: Object { nested: [ Object { foo: "bar", }, Object { foo: "bar", }, ], }, }"# ); } #[wasm_bindgen_test] async fn live_keyboard_event() { // create an input element and bind it to the document let window = web_sys::window().unwrap(); let document = window.document().unwrap(); let input = document.create_element("input").unwrap(); // input.set_attribute("type", "text").unwrap(); document.body().unwrap().append_child(input.as_ref()).unwrap(); // create & add an event listener that will send the event back the test let (send, recv) = channel(); let callback = Closure::once_into_js(move |ev: Event| { send.send(ev).unwrap(); }); let target: &EventTarget = input.as_ref(); let event_type = "keydown"; target.add_event_listener_with_callback(event_type, callback.dyn_ref().unwrap()).unwrap(); // create & dispatch an event to the input element let sent_event = web_sys::KeyboardEvent::new_with_keyboard_event_init_dict( event_type, web_sys::KeyboardEventInit::new() .char_code(b'F' as u32) .bubbles(true) .cancelable(true) .view(Some(&window)), ) .unwrap(); let sent: &Event = sent_event.as_ref(); assert!(target.dispatch_event(sent).unwrap()); // wait for the event to come back let received_event: Event = recv.await.unwrap(); // make sure we can print it without exploding due to nesting assert_eq!( received_event.pretty().skip_property("timeStamp").to_string(), r#"KeyboardEvent { isTrusted: false, DOM_KEY_LOCATION_LEFT: 1, DOM_KEY_LOCATION_NUMPAD: 3, DOM_KEY_LOCATION_RIGHT: 2, DOM_KEY_LOCATION_STANDARD: 0, altKey: false, charCode: 70, code: "", ctrlKey: false, isComposing: false, key: "", keyCode: 0, location: 0, metaKey: false, repeat: false, shiftKey: false, constructor: [Function], getModifierState: [Function], initKeyboardEvent: [Function], detail: 0, sourceCapabilities: null, view: [Window], which: 0, initUIEvent: [Function], AT_TARGET: 2, BUBBLING_PHASE: 3, CAPTURING_PHASE: 1, NONE: 0, bubbles: true, cancelBubble: false, cancelable: true, composed: false, currentTarget: null, defaultPrevented: false, eventPhase: 0, path: [ <input/>, <body/>, <html/>, [Document], [Window], ], returnValue: true, srcElement: <input/>, target: <input/>, type: "keydown", composedPath: [Function], initEvent: [Function], preventDefault: [Function], stopImmediatePropagation: [Function], stopPropagation: [Function], }"#, ); } }
impl<T> Pretty for T
random_line_split
lib.rs
//! Pretty printing for Javascript values from [wasm-bindgen](https://docs.rs/wasm-bindgen). #![forbid(unsafe_code)] use js_sys::{ Array, Date, Error, Function, JsString, Map, Object, Promise, Reflect, RegExp, Set, Symbol, WeakSet, }; use std::{ collections::{BTreeMap, BTreeSet, HashSet}, fmt::{Debug, Display, Formatter, Result as FmtResult}, rc::Rc, }; use wasm_bindgen::{JsCast, JsValue}; use web_sys::{Document, Element, Window}; pub trait Pretty { fn pretty(&self) -> Prettified; } impl<T> Pretty for T where T: AsRef<JsValue>, { fn pretty(&self) -> Prettified { Prettified { value: self.as_ref().to_owned(), seen: WeakSet::new(), skip: Default::default(), } } } /// A pretty-printable value from Javascript. pub struct Prettified { /// The current value we're visiting. value: JsValue, /// We just use a JS array here to avoid relying on wasm-bindgen's unstable /// ABI. seen: WeakSet, /// Properties we don't want serialized. skip: Rc<HashSet<String>>, } impl Prettified { /// Skip printing the property with `name` if it exists on any object /// visited (transitively). pub fn skip_property(&mut self, name: &str) -> &mut Self { let mut with_name = HashSet::to_owned(&self.skip); with_name.insert(name.to_owned()); self.skip = Rc::new(with_name); self } fn child(&self, v: &JsValue) -> Self { Self { seen: self.seen.clone(), skip: self.skip.clone(), value: v.as_ref().clone() } } // TODO get a serde_json::Value from this too } impl Debug for Prettified { fn fmt(&self, f: &mut Formatter) -> FmtResult { // detect and break cycles before trying to figure out Object subclass // keeps a single path here rather than separately in each branch below let mut _reset = None; if let Some(obj) = self.value.dyn_ref::<Object>() { if self.seen.has(obj) { return write!(f, "[Cycle]"); } self.seen.add(obj); _reset = Some(scopeguard::guard(obj.to_owned(), |obj| { self.seen.delete(&obj); })); } if self.value.is_null() { write!(f, "null") } else if self.value.is_undefined() { write!(f, "undefined") } else if self.value.dyn_ref::<Function>().is_some() { JsFunction.fmt(f) } else if self.value.dyn_ref::<Promise>().is_some() { write!(f, "[Promise]") } else if self.value.dyn_ref::<Document>().is_some() { write!(f, "[Document]") } else if self.value.dyn_ref::<Window>().is_some() { write!(f, "[Window]") } else if let Some(s) = self.value.dyn_ref::<JsString>() { write!(f, "{:?}", s.as_string().unwrap()) } else if let Some(n) = self.value.as_f64() { write!(f, "{}", n) } else if let Some(b) = self.value.as_bool() { write!(f, "{:?}", b) } else if let Some(d) = self.value.dyn_ref::<Date>() { write!(f, "{}", d.to_iso_string().as_string().unwrap()) } else if let Some(d) = self.value.dyn_ref::<Element>() { let name = d.tag_name().to_ascii_lowercase(); let (mut class, mut id) = (d.class_name(), d.id()); if !class.is_empty() { class.insert_str(0, " ."); } if !id.is_empty() { id.insert_str(0, " #"); } write!(f, "<{}{}{}/>", name, id, class) } else if let Some(e) = self.value.dyn_ref::<Error>() { write!(f, "Error: {}", e.to_string().as_string().unwrap()) } else if let Some(r) = self.value.dyn_ref::<RegExp>() { write!(f, "/{}/", r.to_string().as_string().unwrap()) } else if let Some(s) = self.value.dyn_ref::<Symbol>() { write!(f, "{}", s.to_string().as_string().unwrap()) } else if let Some(a) = self.value.dyn_ref::<Array>() { let mut f = f.debug_list(); for val in a.iter() { f.entry(&self.child(&val)); } f.finish() } else if let Some(s) = self.value.dyn_ref::<Set>() { let mut f = f.debug_set(); let entries = s.entries(); while let Ok(next) = entries.next() { if next.done() { break; } f.entry(&self.child(&next.value())); } f.finish() } else if let Some(m) = self.value.dyn_ref::<Map>() { let mut f = f.debug_map(); let keys = m.keys(); while let Ok(next) = keys.next() { if next.done() { break; } let key = next.value(); let value = m.get(&key); f.entry(&self.child(&key), &self.child(&value)); } f.finish() } else if let Some(obj) = self.value.dyn_ref::<Object>() { let mut proto = obj.clone(); let mut props_seen = HashSet::new(); let name = obj.constructor().name().as_string().unwrap(); let mut f = f.debug_struct(&name); loop { let mut functions = BTreeSet::new(); let mut props = BTreeMap::new(); for raw_key in Object::get_own_property_names(&proto).iter() { let key = raw_key.as_string().expect("object keys are always strings"); if (key.starts_with("__") && key.ends_with("__")) || props_seen.contains(&key) || functions.contains(&key) || self.skip.contains(&key) { continue; } if let Ok(value) = Reflect::get(obj, &raw_key) { props_seen.insert(key.clone()); if value.is_function() { functions.insert(key); } else { props.insert(key, self.child(&value)); } } } for (key, value) in props { f.field(&key, &value); } for key in functions { f.field(&key, &JsFunction); } proto = Object::get_prototype_of(proto.as_ref()); if proto.is_falsy() || proto.constructor().name().as_string().unwrap() == "Object" { // we've reached the end of the prototype chain break; } } f.finish() } else { write!(f, "unknown ({:?})", &self.value) } } } impl Display for Prettified { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "{:#?}", self) } } struct JsFunction; impl Debug for JsFunction { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "[Function]") } } #[cfg(test)] mod tests { use super::*; use futures::channel::oneshot::channel; use wasm_bindgen::closure::Closure; use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; use web_sys::{Event, EventTarget}; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn cycle_is_broken()
#[wasm_bindgen_test] fn repeated_siblings_are_not_cycles() { let with_siblings = js_sys::Function::new_no_args( r#" let root = { child: { nested: [] } }; let repeated_child = { foo: "bar" }; root.child.nested.push(repeated_child); root.child.nested.push(repeated_child); return root; "#, ) .call0(&JsValue::null()) .unwrap(); assert_eq!( with_siblings.pretty().to_string(), r#"Object { child: Object { nested: [ Object { foo: "bar", }, Object { foo: "bar", }, ], }, }"# ); } #[wasm_bindgen_test] async fn live_keyboard_event() { // create an input element and bind it to the document let window = web_sys::window().unwrap(); let document = window.document().unwrap(); let input = document.create_element("input").unwrap(); // input.set_attribute("type", "text").unwrap(); document.body().unwrap().append_child(input.as_ref()).unwrap(); // create & add an event listener that will send the event back the test let (send, recv) = channel(); let callback = Closure::once_into_js(move |ev: Event| { send.send(ev).unwrap(); }); let target: &EventTarget = input.as_ref(); let event_type = "keydown"; target.add_event_listener_with_callback(event_type, callback.dyn_ref().unwrap()).unwrap(); // create & dispatch an event to the input element let sent_event = web_sys::KeyboardEvent::new_with_keyboard_event_init_dict( event_type, web_sys::KeyboardEventInit::new() .char_code(b'F' as u32) .bubbles(true) .cancelable(true) .view(Some(&window)), ) .unwrap(); let sent: &Event = sent_event.as_ref(); assert!(target.dispatch_event(sent).unwrap()); // wait for the event to come back let received_event: Event = recv.await.unwrap(); // make sure we can print it without exploding due to nesting assert_eq!( received_event.pretty().skip_property("timeStamp").to_string(), r#"KeyboardEvent { isTrusted: false, DOM_KEY_LOCATION_LEFT: 1, DOM_KEY_LOCATION_NUMPAD: 3, DOM_KEY_LOCATION_RIGHT: 2, DOM_KEY_LOCATION_STANDARD: 0, altKey: false, charCode: 70, code: "", ctrlKey: false, isComposing: false, key: "", keyCode: 0, location: 0, metaKey: false, repeat: false, shiftKey: false, constructor: [Function], getModifierState: [Function], initKeyboardEvent: [Function], detail: 0, sourceCapabilities: null, view: [Window], which: 0, initUIEvent: [Function], AT_TARGET: 2, BUBBLING_PHASE: 3, CAPTURING_PHASE: 1, NONE: 0, bubbles: true, cancelBubble: false, cancelable: true, composed: false, currentTarget: null, defaultPrevented: false, eventPhase: 0, path: [ <input/>, <body/>, <html/>, [Document], [Window], ], returnValue: true, srcElement: <input/>, target: <input/>, type: "keydown", composedPath: [Function], initEvent: [Function], preventDefault: [Function], stopImmediatePropagation: [Function], stopPropagation: [Function], }"#, ); } }
{ let with_cycles = js_sys::Function::new_no_args( r#" let root = { child: { nested: [] } }; root.child.nested.push(root); return root; "#, ) .call0(&JsValue::null()) .unwrap(); assert_eq!( with_cycles.pretty().to_string(), r#"Object { child: Object { nested: [ [Cycle], ], }, }"# ); }
identifier_body
readfile.py
from java.awt import * from java.awt.event import ActionListener, ActionEvent from java.lang import Object from java.lang import Runnable from java.net import URL from java.util import Collection,HashSet,LinkedList,List,Set from javax.swing import * from javax.swing.table import AbstractTableModel from javax.swing.table import DefaultTableModel from org.openstreetmap.josm import Main from org.openstreetmap.josm.data import Preferences; Main.pref = Preferences() from org.openstreetmap.josm.data.osm import *; from org.openstreetmap.josm.data.validation import *; from org.openstreetmap.josm.tools import *; from org.openstreetmap.josm.tools.I18n.tr import * import org.openstreetmap.josm.Main as Main import org.openstreetmap.josm.command as Command import org.openstreetmap.josm.data.coor.LatLon as LatLon import org.openstreetmap.josm.data.osm.BBox as BBox import org.openstreetmap.josm.data.osm.DataSet as DataSet import org.openstreetmap.josm.data.osm.Node as Node import org.openstreetmap.josm.data.osm.TagCollection as TagCollection import org.openstreetmap.josm.data.osm.Way as Way import time from org.openstreetmap.josm.tools.I18n.tr import * import java.awt.Component import java.io.File as File; import java.io.FileInputStream as FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Collection; import org.openstreetmap.josm.data.Preferences as Preferences; import org.openstreetmap.josm.data.osm.OsmPrimitive as OsmPrimitive; import org.openstreetmap.josm.data.projection.Projection; import org.openstreetmap.josm.Main as Main; import org.openstreetmap.josm.gui.layer.OsmDataLayer; import org.openstreetmap.josm.gui.progress.NullProgressMonitor as NullProgressMonitor; import org.openstreetmap.josm.gui.progress.ProgressMonitor; import org.openstreetmap.josm.io.IllegalDataException; import org.openstreetmap.josm.io.OsmImporter as OsmImporter; import org.openstreetmap.josm.io.OsmImporter.OsmImporterData; import org.openstreetmap.josm.gui.preferences.projection.ProjectionChoice; import org.openstreetmap.josm.gui.preferences.projection.ProjectionPreference as ProjectionPreference; from java.awt.event import KeyEvent from javax.swing import ImageIcon from javax.swing import JMenu from javax.swing import JMenuBar from javax.swing import JMenuItem from java.awt.event import MouseListener from java.awt.event import KeyListener class ObjectTableModel(AbstractTableModel): __columns__ = () def __init__(self, delegate, columns): AbstractTableModel.__init__(self) self.__columns__ = columns self.delegate= delegate self._getters = [None] * len(self.__columns__) for index, column in enumerate(self.__columns__): self.__columns__[index] = self._validateColumn(column, index) def _fireItemsChanged(self, start, end): self.fireTableRowsUpdated(start, end) def _fireItemsAdded(self, start, end): self.fireTableRowsInserted(start, end) def _fireItemsRemoved(self, start, end): self.fireTableRowsDeleted(start, end) def setDelegate(self, value): self._delegate = value self.fireTableDataChanged() def getColumnCount(self): return len(self.__columns__) def getRowCount(self): n= len(self.delegate) # print "row count %d " % n return n def getColumnClass(self, columnIndex): return basestring # return self.__columns__[columnIndex][1] def getColumnName(self, columnIndex): return self.__columns__[columnIndex][0] def setValueAt(self, aValue, rowIndex, columnIndex): self[rowIndex][columnIndex] = aValue def refresh(self): if len(self) > 0: self.fireTableRowsUpdated(0, len(self) - 1) def _validateColumn(self, column, index): #column = DelegateTableModel._validateColumn(self, column, index) self._getters[index] = lambda row: row.get(column[2]) return column def getValueAt(self, rowIndex, columnIndex): print "getValueAt " + str(rowIndex) + ":"+ str(columnIndex) #line = self.delegate[rowIndex] return self.delegate[rowIndex].get(self.__columns__[columnIndex][1]) #return self._getters[columnIndex](line) def setValueAt(self, aValue, rowIndex, columnIndex): attrname = self.__columns__[columnIndex][2] setattr(self[rowIndex], attrname, aValue) self.fireTableCellUpdated(rowIndex, columnIndex) def getObjectIndex(self, obj): for i, row in enumerate(self): if row == obj: return i return - 1 def getSelectedObject(self, table): assert table.model is self if table.selectedRow >= 0: modelRow = table.convertRowIndexToModel(table.selectedRow) return self[modelRow] def getSelectedObjects(self, table): assert table.model is self selected = [] for viewRow in table.selectedRows: modelRow = table.convertRowIndexToModel(viewRow) selected.append(self[modelRow]) return selected def getVisibleObjects(self, table): assert table.model is self visible = [] for viewRow in xrange(table.rowCount): modelRow = table.convertRowIndexToModel(viewRow) visible.append(self[modelRow]) return visible #def EventListener(): class MyListener (MouseListener,KeyListener ) : def __init__(self, table): self.table=table # def mouseReleased(self, e): # print("Mouse released; # of clicks: " + str(e.getClickCount())) # print("button: " + str(e.getButton())) def keyPressed( self,e) : print("key pressed; " + str(e.getKeyChar())) # print self.table # r = self.table.getSelectedRow () # print r rs= self.table.getSelectedRows() if (e.getKeyChar() == 'l') : print("lookup; ") for r in rs : print r obj=self.table.getValueAt(r,0) print obj #d = LookupDialog() # def mouseReleased(self,e) : # print("Mouse released; # of clicks: " + str(e.getClickCount())) def mouseClicked(self, e): # print("Mouse clicked; # of clicks: " + str(e.getClickCount())) # print("button: " + str(e.getButton())) if (e.getButton() ==3) : print "Mouse3 clicked; # of clicks: " + str(e.getClickCount()) else : if (e.getButton() ==2): print "Mouse2 clicked; # of clicks: " + str(e.getClickCount()) else: if (e.getButton() ==1): print "Mouse1 clicked; # of clicks: " + str(e.getClickCount()) class MyFrame (JFrame ) : def __init__(self,name): super(MyFrame, self).__init__(name) def LookupEvent(self, event) : print self print event def DisplayTable (collection): columns=list( ( ("Street","addr:street"), ("Num","addr:housenumber") ) ) tm= ObjectTableModel(collection,columns) frame = MyFrame("street") frame.setSize(800, 1200) frame.setLayout(BorderLayout()) table = JTable(tm) table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS) header = table.getTableHeader() header.setUpdateTableInRealTime(True) header.setReorderingAllowed(True); scrollPane = JScrollPane() scrollPane.getViewport().setView((table)) frame.add(scrollPane) frame.pack(); frame.setSize(frame.getPreferredSize()); frame.show() def DisplayStreetTable (collection): columns=list( ( ("Name","name"), ) ) tm= ObjectTableModel(collection,columns) frame = MyFrame("Street Table") frame.setSize(800, 1200) frame.setLayout(BorderLayout()) table = JTable(tm) table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS) header = table.getTableHeader() header.setUpdateTableInRealTime(True) header.setReorderingAllowed(True); scrollPane = JScrollPane() scrollPane.getViewport().setView((table)) # copyButton = JButton('Merge') #,actionPerformed=self.noAction # frame.add(copyButton) listener=MyListener(table) table.addMouseListener(listener) table.addKeyListener(listener) menubar = JMenuBar() file = JMenu("Edit") file.setMnemonic(KeyEvent.VK_E) lookup = JMenuItem("Lookup",actionPerformed=frame.LookupEvent) lookup.setMnemonic(KeyEvent.VK_L) file.add(lookup) menubar.add(file) frame.setJMenuBar(menubar) frame.add(scrollPane) frame.pack(); frame.setSize(frame.getPreferredSize()); frame.show() def isBuilding(p): v = p.get("building"); if v is not None and v != "no" and v != "entrance": return True else: return False class BuildingInBuilding : BUILDING_INSIDE_BUILDING = 2001; def __init__ (self): self.primitivesToCheck = LinkedList(); self.index = QuadBuckets(); print 'building in building' #super(tr("Building inside building"), tr("Checks for building areas inside of buildings.")); def visitn(self,n) : # print "visitn:" # print n if (n.isUsable() and isBuilding(n)) : if not self.primitivesToCheck.contains(n): # print "adding :" n self.primitivesToCheck.add(n); else: print "duplicate p :" # print n def visitw(self,w) :
def isInPolygon(n, polygon) : return Geometry.nodeInsidePolygon(n, polygon); def sameLayers( w1, w2) : if w1.get("layer") is not None : l1 = w1.get("layer") else : l1 = "0"; if w2.get("layer") is not None : l2 = w2.get("layer") else : l2 ="0"; return l1.equals(l2); def evaluateNode(self,p,obj): print "te" # print p # print obj def endTest2(self): for p in self.primitivesToCheck : collection = self.index.search(p.getBBox()) for object in collection: if (not p.equals(object)): if (isinstance(p,Node)): self.evaluateNode(p, object) else : print p # else if (p instanceof Way) # return evaluateWay((Way) p, object); # else if (p instanceof Relation) # return evaluateRelation((Relation) p, object); # return false; def endTest(self) : print "end" # bbox = BBox(-180,-90,180,90) bbox = BBox(-1000,-900,1800,900) print self.index collection = self.index.search(bbox) # print collection def projection() : print "projection" pc = ProjectionPreference.mercator id = pc.getId() pref = None Main.pref.putCollection("projection.sub."+id, pref) pc.setPreferences(pref) proj = pc.getProjection() Main.setProjection(proj) def prefs() : print "prefs" Main.pref = Preferences() Main.pref.put("tags.reversed_direction", "false") class JythonWay(): def __init__(self,x): self.way=x self.subobjects=[] pass # todo: def lookup(self) : # lookup this street name on the internet print "ToDO" # todo: def merge(self) : # merge these two streets, fix the names print "ToDO fix the streets" def get (self,k): return self.way.get(k) def name(self): return self.way.get('name') def addsubobject(self, other): return self.subobjects.append(other) import re pattern = re.compile(r'\s+') def streetlist(objs) : objs2 = [] streets = {} for p in objs: if (not isinstance(p,Way)): continue s=p.get('name') hw=p.get('highway') if (s is None): continue if (hw is None): continue s = s.lower() s=re.sub(pattern, '', s) #remove whitespace if not s in streets : # print "%s is new" % s streets[s]=JythonWay(p) else : streets[s].addsubobject(p) # print streets.values() objs3= sorted(streets.values(),(lambda x, y: (cmp(x.name(), y.name())))) DisplayStreetTable(objs3) def main (): print "main" prefs(); projection(); importer = OsmImporter() fileObj= File('/home/mdupont/experiments/josm/topeka/noto.osm') # fileObj= File('/home/mdupont/experiments/josm/topeka/topeka.osm') inobj = FileInputStream(fileObj); data = importer.loadLayer(inobj, fileObj, fileObj.getName(), NullProgressMonitor.INSTANCE) s = data.toString(); # print s primitives = data.getLayer().data.allPrimitives(); # print primitives objs= primitives.toArray() #DisplayTable(obj) streetlist(objs) # make a list of the street objects main();
print "visitw:" # print w if (w.isUsable() and w.isClosed() and isBuilding(w)) : self.primitivesToCheck.add(w) self.index.add(w) print "added"
identifier_body
readfile.py
from java.awt import * from java.awt.event import ActionListener, ActionEvent from java.lang import Object from java.lang import Runnable from java.net import URL from java.util import Collection,HashSet,LinkedList,List,Set from javax.swing import * from javax.swing.table import AbstractTableModel from javax.swing.table import DefaultTableModel from org.openstreetmap.josm import Main from org.openstreetmap.josm.data import Preferences; Main.pref = Preferences() from org.openstreetmap.josm.data.osm import *; from org.openstreetmap.josm.data.validation import *; from org.openstreetmap.josm.tools import *; from org.openstreetmap.josm.tools.I18n.tr import * import org.openstreetmap.josm.Main as Main import org.openstreetmap.josm.command as Command import org.openstreetmap.josm.data.coor.LatLon as LatLon import org.openstreetmap.josm.data.osm.BBox as BBox import org.openstreetmap.josm.data.osm.DataSet as DataSet import org.openstreetmap.josm.data.osm.Node as Node import org.openstreetmap.josm.data.osm.TagCollection as TagCollection import org.openstreetmap.josm.data.osm.Way as Way import time from org.openstreetmap.josm.tools.I18n.tr import * import java.awt.Component import java.io.File as File; import java.io.FileInputStream as FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Collection; import org.openstreetmap.josm.data.Preferences as Preferences; import org.openstreetmap.josm.data.osm.OsmPrimitive as OsmPrimitive; import org.openstreetmap.josm.data.projection.Projection; import org.openstreetmap.josm.Main as Main; import org.openstreetmap.josm.gui.layer.OsmDataLayer; import org.openstreetmap.josm.gui.progress.NullProgressMonitor as NullProgressMonitor; import org.openstreetmap.josm.gui.progress.ProgressMonitor; import org.openstreetmap.josm.io.IllegalDataException; import org.openstreetmap.josm.io.OsmImporter as OsmImporter; import org.openstreetmap.josm.io.OsmImporter.OsmImporterData; import org.openstreetmap.josm.gui.preferences.projection.ProjectionChoice; import org.openstreetmap.josm.gui.preferences.projection.ProjectionPreference as ProjectionPreference; from java.awt.event import KeyEvent from javax.swing import ImageIcon from javax.swing import JMenu from javax.swing import JMenuBar from javax.swing import JMenuItem from java.awt.event import MouseListener from java.awt.event import KeyListener class ObjectTableModel(AbstractTableModel): __columns__ = () def __init__(self, delegate, columns): AbstractTableModel.__init__(self) self.__columns__ = columns self.delegate= delegate self._getters = [None] * len(self.__columns__) for index, column in enumerate(self.__columns__): self.__columns__[index] = self._validateColumn(column, index) def _fireItemsChanged(self, start, end): self.fireTableRowsUpdated(start, end) def _fireItemsAdded(self, start, end): self.fireTableRowsInserted(start, end) def _fireItemsRemoved(self, start, end): self.fireTableRowsDeleted(start, end) def setDelegate(self, value): self._delegate = value self.fireTableDataChanged() def getColumnCount(self): return len(self.__columns__) def getRowCount(self): n= len(self.delegate) # print "row count %d " % n return n def getColumnClass(self, columnIndex): return basestring # return self.__columns__[columnIndex][1] def getColumnName(self, columnIndex): return self.__columns__[columnIndex][0] def setValueAt(self, aValue, rowIndex, columnIndex): self[rowIndex][columnIndex] = aValue def refresh(self): if len(self) > 0: self.fireTableRowsUpdated(0, len(self) - 1) def _validateColumn(self, column, index): #column = DelegateTableModel._validateColumn(self, column, index) self._getters[index] = lambda row: row.get(column[2]) return column def getValueAt(self, rowIndex, columnIndex): print "getValueAt " + str(rowIndex) + ":"+ str(columnIndex) #line = self.delegate[rowIndex] return self.delegate[rowIndex].get(self.__columns__[columnIndex][1]) #return self._getters[columnIndex](line) def setValueAt(self, aValue, rowIndex, columnIndex): attrname = self.__columns__[columnIndex][2] setattr(self[rowIndex], attrname, aValue) self.fireTableCellUpdated(rowIndex, columnIndex) def getObjectIndex(self, obj): for i, row in enumerate(self): if row == obj: return i return - 1 def getSelectedObject(self, table): assert table.model is self if table.selectedRow >= 0: modelRow = table.convertRowIndexToModel(table.selectedRow) return self[modelRow] def getSelectedObjects(self, table): assert table.model is self selected = [] for viewRow in table.selectedRows: modelRow = table.convertRowIndexToModel(viewRow) selected.append(self[modelRow]) return selected def getVisibleObjects(self, table): assert table.model is self visible = [] for viewRow in xrange(table.rowCount): modelRow = table.convertRowIndexToModel(viewRow) visible.append(self[modelRow]) return visible #def EventListener(): class MyListener (MouseListener,KeyListener ) : def __init__(self, table): self.table=table # def mouseReleased(self, e): # print("Mouse released; # of clicks: " + str(e.getClickCount())) # print("button: " + str(e.getButton())) def keyPressed( self,e) : print("key pressed; " + str(e.getKeyChar())) # print self.table # r = self.table.getSelectedRow () # print r rs= self.table.getSelectedRows() if (e.getKeyChar() == 'l') : print("lookup; ") for r in rs : print r obj=self.table.getValueAt(r,0) print obj #d = LookupDialog() # def mouseReleased(self,e) : # print("Mouse released; # of clicks: " + str(e.getClickCount())) def mouseClicked(self, e): # print("Mouse clicked; # of clicks: " + str(e.getClickCount())) # print("button: " + str(e.getButton())) if (e.getButton() ==3) : print "Mouse3 clicked; # of clicks: " + str(e.getClickCount()) else : if (e.getButton() ==2): print "Mouse2 clicked; # of clicks: " + str(e.getClickCount()) else: if (e.getButton() ==1): print "Mouse1 clicked; # of clicks: " + str(e.getClickCount()) class MyFrame (JFrame ) : def __init__(self,name): super(MyFrame, self).__init__(name) def LookupEvent(self, event) : print self print event def DisplayTable (collection): columns=list( ( ("Street","addr:street"), ("Num","addr:housenumber") ) ) tm= ObjectTableModel(collection,columns) frame = MyFrame("street") frame.setSize(800, 1200) frame.setLayout(BorderLayout()) table = JTable(tm) table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS) header = table.getTableHeader() header.setUpdateTableInRealTime(True) header.setReorderingAllowed(True); scrollPane = JScrollPane() scrollPane.getViewport().setView((table)) frame.add(scrollPane) frame.pack(); frame.setSize(frame.getPreferredSize()); frame.show() def DisplayStreetTable (collection): columns=list( ( ("Name","name"), ) ) tm= ObjectTableModel(collection,columns) frame = MyFrame("Street Table") frame.setSize(800, 1200) frame.setLayout(BorderLayout()) table = JTable(tm) table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS) header = table.getTableHeader() header.setUpdateTableInRealTime(True) header.setReorderingAllowed(True); scrollPane = JScrollPane() scrollPane.getViewport().setView((table)) # copyButton = JButton('Merge') #,actionPerformed=self.noAction # frame.add(copyButton) listener=MyListener(table) table.addMouseListener(listener) table.addKeyListener(listener) menubar = JMenuBar() file = JMenu("Edit") file.setMnemonic(KeyEvent.VK_E) lookup = JMenuItem("Lookup",actionPerformed=frame.LookupEvent) lookup.setMnemonic(KeyEvent.VK_L) file.add(lookup) menubar.add(file) frame.setJMenuBar(menubar) frame.add(scrollPane) frame.pack(); frame.setSize(frame.getPreferredSize()); frame.show() def isBuilding(p): v = p.get("building"); if v is not None and v != "no" and v != "entrance": return True else: return False class BuildingInBuilding : BUILDING_INSIDE_BUILDING = 2001; def __init__ (self): self.primitivesToCheck = LinkedList(); self.index = QuadBuckets(); print 'building in building' #super(tr("Building inside building"), tr("Checks for building areas inside of buildings.")); def visitn(self,n) : # print "visitn:" # print n if (n.isUsable() and isBuilding(n)) : if not self.primitivesToCheck.contains(n): # print "adding :" n self.primitivesToCheck.add(n); else: print "duplicate p :" # print n def visitw(self,w) : print "visitw:" # print w if (w.isUsable() and w.isClosed() and isBuilding(w)) : self.primitivesToCheck.add(w) self.index.add(w) print "added" def isInPolygon(n, polygon) : return Geometry.nodeInsidePolygon(n, polygon); def
( w1, w2) : if w1.get("layer") is not None : l1 = w1.get("layer") else : l1 = "0"; if w2.get("layer") is not None : l2 = w2.get("layer") else : l2 ="0"; return l1.equals(l2); def evaluateNode(self,p,obj): print "te" # print p # print obj def endTest2(self): for p in self.primitivesToCheck : collection = self.index.search(p.getBBox()) for object in collection: if (not p.equals(object)): if (isinstance(p,Node)): self.evaluateNode(p, object) else : print p # else if (p instanceof Way) # return evaluateWay((Way) p, object); # else if (p instanceof Relation) # return evaluateRelation((Relation) p, object); # return false; def endTest(self) : print "end" # bbox = BBox(-180,-90,180,90) bbox = BBox(-1000,-900,1800,900) print self.index collection = self.index.search(bbox) # print collection def projection() : print "projection" pc = ProjectionPreference.mercator id = pc.getId() pref = None Main.pref.putCollection("projection.sub."+id, pref) pc.setPreferences(pref) proj = pc.getProjection() Main.setProjection(proj) def prefs() : print "prefs" Main.pref = Preferences() Main.pref.put("tags.reversed_direction", "false") class JythonWay(): def __init__(self,x): self.way=x self.subobjects=[] pass # todo: def lookup(self) : # lookup this street name on the internet print "ToDO" # todo: def merge(self) : # merge these two streets, fix the names print "ToDO fix the streets" def get (self,k): return self.way.get(k) def name(self): return self.way.get('name') def addsubobject(self, other): return self.subobjects.append(other) import re pattern = re.compile(r'\s+') def streetlist(objs) : objs2 = [] streets = {} for p in objs: if (not isinstance(p,Way)): continue s=p.get('name') hw=p.get('highway') if (s is None): continue if (hw is None): continue s = s.lower() s=re.sub(pattern, '', s) #remove whitespace if not s in streets : # print "%s is new" % s streets[s]=JythonWay(p) else : streets[s].addsubobject(p) # print streets.values() objs3= sorted(streets.values(),(lambda x, y: (cmp(x.name(), y.name())))) DisplayStreetTable(objs3) def main (): print "main" prefs(); projection(); importer = OsmImporter() fileObj= File('/home/mdupont/experiments/josm/topeka/noto.osm') # fileObj= File('/home/mdupont/experiments/josm/topeka/topeka.osm') inobj = FileInputStream(fileObj); data = importer.loadLayer(inobj, fileObj, fileObj.getName(), NullProgressMonitor.INSTANCE) s = data.toString(); # print s primitives = data.getLayer().data.allPrimitives(); # print primitives objs= primitives.toArray() #DisplayTable(obj) streetlist(objs) # make a list of the street objects main();
sameLayers
identifier_name
readfile.py
from java.awt import * from java.awt.event import ActionListener, ActionEvent from java.lang import Object from java.lang import Runnable from java.net import URL from java.util import Collection,HashSet,LinkedList,List,Set from javax.swing import * from javax.swing.table import AbstractTableModel from javax.swing.table import DefaultTableModel from org.openstreetmap.josm import Main from org.openstreetmap.josm.data import Preferences; Main.pref = Preferences() from org.openstreetmap.josm.data.osm import *; from org.openstreetmap.josm.data.validation import *; from org.openstreetmap.josm.tools import *; from org.openstreetmap.josm.tools.I18n.tr import * import org.openstreetmap.josm.Main as Main import org.openstreetmap.josm.command as Command import org.openstreetmap.josm.data.coor.LatLon as LatLon import org.openstreetmap.josm.data.osm.BBox as BBox import org.openstreetmap.josm.data.osm.DataSet as DataSet import org.openstreetmap.josm.data.osm.Node as Node import org.openstreetmap.josm.data.osm.TagCollection as TagCollection import org.openstreetmap.josm.data.osm.Way as Way import time from org.openstreetmap.josm.tools.I18n.tr import * import java.awt.Component import java.io.File as File; import java.io.FileInputStream as FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Collection; import org.openstreetmap.josm.data.Preferences as Preferences; import org.openstreetmap.josm.data.osm.OsmPrimitive as OsmPrimitive; import org.openstreetmap.josm.data.projection.Projection; import org.openstreetmap.josm.Main as Main; import org.openstreetmap.josm.gui.layer.OsmDataLayer; import org.openstreetmap.josm.gui.progress.NullProgressMonitor as NullProgressMonitor; import org.openstreetmap.josm.gui.progress.ProgressMonitor; import org.openstreetmap.josm.io.IllegalDataException; import org.openstreetmap.josm.io.OsmImporter as OsmImporter; import org.openstreetmap.josm.io.OsmImporter.OsmImporterData; import org.openstreetmap.josm.gui.preferences.projection.ProjectionChoice; import org.openstreetmap.josm.gui.preferences.projection.ProjectionPreference as ProjectionPreference; from java.awt.event import KeyEvent from javax.swing import ImageIcon from javax.swing import JMenu from javax.swing import JMenuBar from javax.swing import JMenuItem from java.awt.event import MouseListener from java.awt.event import KeyListener class ObjectTableModel(AbstractTableModel): __columns__ = () def __init__(self, delegate, columns): AbstractTableModel.__init__(self) self.__columns__ = columns self.delegate= delegate self._getters = [None] * len(self.__columns__) for index, column in enumerate(self.__columns__): self.__columns__[index] = self._validateColumn(column, index) def _fireItemsChanged(self, start, end): self.fireTableRowsUpdated(start, end) def _fireItemsAdded(self, start, end): self.fireTableRowsInserted(start, end) def _fireItemsRemoved(self, start, end): self.fireTableRowsDeleted(start, end) def setDelegate(self, value): self._delegate = value self.fireTableDataChanged() def getColumnCount(self): return len(self.__columns__) def getRowCount(self): n= len(self.delegate) # print "row count %d " % n return n def getColumnClass(self, columnIndex): return basestring # return self.__columns__[columnIndex][1] def getColumnName(self, columnIndex): return self.__columns__[columnIndex][0] def setValueAt(self, aValue, rowIndex, columnIndex): self[rowIndex][columnIndex] = aValue def refresh(self): if len(self) > 0: self.fireTableRowsUpdated(0, len(self) - 1) def _validateColumn(self, column, index): #column = DelegateTableModel._validateColumn(self, column, index) self._getters[index] = lambda row: row.get(column[2]) return column def getValueAt(self, rowIndex, columnIndex): print "getValueAt " + str(rowIndex) + ":"+ str(columnIndex) #line = self.delegate[rowIndex] return self.delegate[rowIndex].get(self.__columns__[columnIndex][1]) #return self._getters[columnIndex](line) def setValueAt(self, aValue, rowIndex, columnIndex): attrname = self.__columns__[columnIndex][2] setattr(self[rowIndex], attrname, aValue) self.fireTableCellUpdated(rowIndex, columnIndex) def getObjectIndex(self, obj): for i, row in enumerate(self): if row == obj: return i return - 1 def getSelectedObject(self, table): assert table.model is self if table.selectedRow >= 0: modelRow = table.convertRowIndexToModel(table.selectedRow) return self[modelRow] def getSelectedObjects(self, table): assert table.model is self selected = [] for viewRow in table.selectedRows: modelRow = table.convertRowIndexToModel(viewRow) selected.append(self[modelRow]) return selected def getVisibleObjects(self, table): assert table.model is self visible = [] for viewRow in xrange(table.rowCount): modelRow = table.convertRowIndexToModel(viewRow) visible.append(self[modelRow]) return visible #def EventListener(): class MyListener (MouseListener,KeyListener ) : def __init__(self, table): self.table=table # def mouseReleased(self, e): # print("Mouse released; # of clicks: " + str(e.getClickCount())) # print("button: " + str(e.getButton())) def keyPressed( self,e) : print("key pressed; " + str(e.getKeyChar())) # print self.table # r = self.table.getSelectedRow () # print r rs= self.table.getSelectedRows() if (e.getKeyChar() == 'l') : print("lookup; ") for r in rs : print r obj=self.table.getValueAt(r,0) print obj #d = LookupDialog() # def mouseReleased(self,e) : # print("Mouse released; # of clicks: " + str(e.getClickCount())) def mouseClicked(self, e): # print("Mouse clicked; # of clicks: " + str(e.getClickCount())) # print("button: " + str(e.getButton())) if (e.getButton() ==3) : print "Mouse3 clicked; # of clicks: " + str(e.getClickCount()) else : if (e.getButton() ==2): print "Mouse2 clicked; # of clicks: " + str(e.getClickCount()) else: if (e.getButton() ==1): print "Mouse1 clicked; # of clicks: " + str(e.getClickCount()) class MyFrame (JFrame ) : def __init__(self,name): super(MyFrame, self).__init__(name) def LookupEvent(self, event) : print self print event def DisplayTable (collection): columns=list( ( ("Street","addr:street"), ("Num","addr:housenumber") ) ) tm= ObjectTableModel(collection,columns) frame = MyFrame("street") frame.setSize(800, 1200) frame.setLayout(BorderLayout()) table = JTable(tm) table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS) header = table.getTableHeader() header.setUpdateTableInRealTime(True) header.setReorderingAllowed(True); scrollPane = JScrollPane() scrollPane.getViewport().setView((table)) frame.add(scrollPane) frame.pack(); frame.setSize(frame.getPreferredSize()); frame.show() def DisplayStreetTable (collection): columns=list( ( ("Name","name"), ) ) tm= ObjectTableModel(collection,columns) frame = MyFrame("Street Table") frame.setSize(800, 1200) frame.setLayout(BorderLayout()) table = JTable(tm) table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS) header = table.getTableHeader() header.setUpdateTableInRealTime(True) header.setReorderingAllowed(True); scrollPane = JScrollPane() scrollPane.getViewport().setView((table)) # copyButton = JButton('Merge') #,actionPerformed=self.noAction # frame.add(copyButton) listener=MyListener(table) table.addMouseListener(listener) table.addKeyListener(listener) menubar = JMenuBar() file = JMenu("Edit") file.setMnemonic(KeyEvent.VK_E) lookup = JMenuItem("Lookup",actionPerformed=frame.LookupEvent) lookup.setMnemonic(KeyEvent.VK_L) file.add(lookup) menubar.add(file) frame.setJMenuBar(menubar) frame.add(scrollPane) frame.pack(); frame.setSize(frame.getPreferredSize()); frame.show() def isBuilding(p): v = p.get("building"); if v is not None and v != "no" and v != "entrance": return True else: return False class BuildingInBuilding : BUILDING_INSIDE_BUILDING = 2001; def __init__ (self): self.primitivesToCheck = LinkedList(); self.index = QuadBuckets(); print 'building in building' #super(tr("Building inside building"), tr("Checks for building areas inside of buildings.")); def visitn(self,n) : # print "visitn:" # print n if (n.isUsable() and isBuilding(n)) : if not self.primitivesToCheck.contains(n): # print "adding :" n self.primitivesToCheck.add(n); else: print "duplicate p :" # print n def visitw(self,w) : print "visitw:" # print w if (w.isUsable() and w.isClosed() and isBuilding(w)) : self.primitivesToCheck.add(w) self.index.add(w) print "added" def isInPolygon(n, polygon) : return Geometry.nodeInsidePolygon(n, polygon); def sameLayers( w1, w2) : if w1.get("layer") is not None : l1 = w1.get("layer") else : l1 = "0"; if w2.get("layer") is not None : l2 = w2.get("layer") else : l2 ="0"; return l1.equals(l2); def evaluateNode(self,p,obj): print "te" # print p # print obj def endTest2(self): for p in self.primitivesToCheck : collection = self.index.search(p.getBBox()) for object in collection: if (not p.equals(object)): if (isinstance(p,Node)): self.evaluateNode(p, object) else : print p # else if (p instanceof Way) # return evaluateWay((Way) p, object); # else if (p instanceof Relation) # return evaluateRelation((Relation) p, object); # return false; def endTest(self) : print "end" # bbox = BBox(-180,-90,180,90) bbox = BBox(-1000,-900,1800,900) print self.index collection = self.index.search(bbox) # print collection def projection() : print "projection" pc = ProjectionPreference.mercator id = pc.getId() pref = None Main.pref.putCollection("projection.sub."+id, pref) pc.setPreferences(pref) proj = pc.getProjection() Main.setProjection(proj) def prefs() : print "prefs" Main.pref = Preferences() Main.pref.put("tags.reversed_direction", "false") class JythonWay(): def __init__(self,x): self.way=x self.subobjects=[] pass # todo: def lookup(self) : # lookup this street name on the internet print "ToDO" # todo: def merge(self) : # merge these two streets, fix the names print "ToDO fix the streets" def get (self,k): return self.way.get(k) def name(self): return self.way.get('name') def addsubobject(self, other): return self.subobjects.append(other) import re pattern = re.compile(r'\s+') def streetlist(objs) : objs2 = [] streets = {} for p in objs: if (not isinstance(p,Way)): continue s=p.get('name') hw=p.get('highway') if (s is None): continue if (hw is None):
s = s.lower() s=re.sub(pattern, '', s) #remove whitespace if not s in streets : # print "%s is new" % s streets[s]=JythonWay(p) else : streets[s].addsubobject(p) # print streets.values() objs3= sorted(streets.values(),(lambda x, y: (cmp(x.name(), y.name())))) DisplayStreetTable(objs3) def main (): print "main" prefs(); projection(); importer = OsmImporter() fileObj= File('/home/mdupont/experiments/josm/topeka/noto.osm') # fileObj= File('/home/mdupont/experiments/josm/topeka/topeka.osm') inobj = FileInputStream(fileObj); data = importer.loadLayer(inobj, fileObj, fileObj.getName(), NullProgressMonitor.INSTANCE) s = data.toString(); # print s primitives = data.getLayer().data.allPrimitives(); # print primitives objs= primitives.toArray() #DisplayTable(obj) streetlist(objs) # make a list of the street objects main();
continue
conditional_block
readfile.py
from java.awt import * from java.awt.event import ActionListener, ActionEvent from java.lang import Object from java.lang import Runnable from java.net import URL from java.util import Collection,HashSet,LinkedList,List,Set from javax.swing import * from javax.swing.table import AbstractTableModel from javax.swing.table import DefaultTableModel from org.openstreetmap.josm import Main from org.openstreetmap.josm.data import Preferences; Main.pref = Preferences() from org.openstreetmap.josm.data.osm import *; from org.openstreetmap.josm.data.validation import *; from org.openstreetmap.josm.tools import *; from org.openstreetmap.josm.tools.I18n.tr import * import org.openstreetmap.josm.Main as Main import org.openstreetmap.josm.command as Command import org.openstreetmap.josm.data.coor.LatLon as LatLon import org.openstreetmap.josm.data.osm.BBox as BBox import org.openstreetmap.josm.data.osm.DataSet as DataSet import org.openstreetmap.josm.data.osm.Node as Node import org.openstreetmap.josm.data.osm.TagCollection as TagCollection import org.openstreetmap.josm.data.osm.Way as Way import time from org.openstreetmap.josm.tools.I18n.tr import * import java.awt.Component import java.io.File as File; import java.io.FileInputStream as FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Collection; import org.openstreetmap.josm.data.Preferences as Preferences; import org.openstreetmap.josm.data.osm.OsmPrimitive as OsmPrimitive; import org.openstreetmap.josm.data.projection.Projection; import org.openstreetmap.josm.Main as Main; import org.openstreetmap.josm.gui.layer.OsmDataLayer; import org.openstreetmap.josm.gui.progress.NullProgressMonitor as NullProgressMonitor; import org.openstreetmap.josm.gui.progress.ProgressMonitor; import org.openstreetmap.josm.io.IllegalDataException; import org.openstreetmap.josm.io.OsmImporter as OsmImporter; import org.openstreetmap.josm.io.OsmImporter.OsmImporterData; import org.openstreetmap.josm.gui.preferences.projection.ProjectionChoice; import org.openstreetmap.josm.gui.preferences.projection.ProjectionPreference as ProjectionPreference; from java.awt.event import KeyEvent from javax.swing import ImageIcon from javax.swing import JMenu from javax.swing import JMenuBar from javax.swing import JMenuItem from java.awt.event import MouseListener from java.awt.event import KeyListener class ObjectTableModel(AbstractTableModel): __columns__ = () def __init__(self, delegate, columns): AbstractTableModel.__init__(self) self.__columns__ = columns self.delegate= delegate self._getters = [None] * len(self.__columns__) for index, column in enumerate(self.__columns__): self.__columns__[index] = self._validateColumn(column, index) def _fireItemsChanged(self, start, end): self.fireTableRowsUpdated(start, end) def _fireItemsAdded(self, start, end): self.fireTableRowsInserted(start, end) def _fireItemsRemoved(self, start, end): self.fireTableRowsDeleted(start, end) def setDelegate(self, value): self._delegate = value self.fireTableDataChanged() def getColumnCount(self): return len(self.__columns__) def getRowCount(self): n= len(self.delegate) # print "row count %d " % n return n def getColumnClass(self, columnIndex): return basestring # return self.__columns__[columnIndex][1] def getColumnName(self, columnIndex): return self.__columns__[columnIndex][0] def setValueAt(self, aValue, rowIndex, columnIndex): self[rowIndex][columnIndex] = aValue def refresh(self): if len(self) > 0: self.fireTableRowsUpdated(0, len(self) - 1) def _validateColumn(self, column, index): #column = DelegateTableModel._validateColumn(self, column, index) self._getters[index] = lambda row: row.get(column[2]) return column def getValueAt(self, rowIndex, columnIndex): print "getValueAt " + str(rowIndex) + ":"+ str(columnIndex) #line = self.delegate[rowIndex] return self.delegate[rowIndex].get(self.__columns__[columnIndex][1]) #return self._getters[columnIndex](line) def setValueAt(self, aValue, rowIndex, columnIndex): attrname = self.__columns__[columnIndex][2] setattr(self[rowIndex], attrname, aValue) self.fireTableCellUpdated(rowIndex, columnIndex) def getObjectIndex(self, obj): for i, row in enumerate(self): if row == obj: return i return - 1 def getSelectedObject(self, table): assert table.model is self if table.selectedRow >= 0: modelRow = table.convertRowIndexToModel(table.selectedRow) return self[modelRow] def getSelectedObjects(self, table): assert table.model is self selected = [] for viewRow in table.selectedRows: modelRow = table.convertRowIndexToModel(viewRow) selected.append(self[modelRow]) return selected def getVisibleObjects(self, table): assert table.model is self visible = [] for viewRow in xrange(table.rowCount): modelRow = table.convertRowIndexToModel(viewRow) visible.append(self[modelRow]) return visible #def EventListener(): class MyListener (MouseListener,KeyListener ) : def __init__(self, table): self.table=table # def mouseReleased(self, e): # print("Mouse released; # of clicks: " + str(e.getClickCount())) # print("button: " + str(e.getButton())) def keyPressed( self,e) : print("key pressed; " + str(e.getKeyChar())) # print self.table # r = self.table.getSelectedRow () # print r rs= self.table.getSelectedRows() if (e.getKeyChar() == 'l') : print("lookup; ") for r in rs : print r obj=self.table.getValueAt(r,0) print obj #d = LookupDialog() # def mouseReleased(self,e) : # print("Mouse released; # of clicks: " + str(e.getClickCount())) def mouseClicked(self, e): # print("Mouse clicked; # of clicks: " + str(e.getClickCount())) # print("button: " + str(e.getButton())) if (e.getButton() ==3) : print "Mouse3 clicked; # of clicks: " + str(e.getClickCount()) else : if (e.getButton() ==2): print "Mouse2 clicked; # of clicks: " + str(e.getClickCount()) else: if (e.getButton() ==1): print "Mouse1 clicked; # of clicks: " + str(e.getClickCount()) class MyFrame (JFrame ) : def __init__(self,name): super(MyFrame, self).__init__(name) def LookupEvent(self, event) : print self print event def DisplayTable (collection): columns=list( ( ("Street","addr:street"), ("Num","addr:housenumber") ) ) tm= ObjectTableModel(collection,columns) frame = MyFrame("street") frame.setSize(800, 1200) frame.setLayout(BorderLayout()) table = JTable(tm) table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS) header = table.getTableHeader() header.setUpdateTableInRealTime(True) header.setReorderingAllowed(True); scrollPane = JScrollPane() scrollPane.getViewport().setView((table)) frame.add(scrollPane) frame.pack(); frame.setSize(frame.getPreferredSize()); frame.show() def DisplayStreetTable (collection): columns=list( ( ("Name","name"), ) ) tm= ObjectTableModel(collection,columns) frame = MyFrame("Street Table") frame.setSize(800, 1200) frame.setLayout(BorderLayout()) table = JTable(tm) table.setAutoResizeMode(JTable.AUTO_RESIZE_ALL_COLUMNS) header = table.getTableHeader() header.setUpdateTableInRealTime(True) header.setReorderingAllowed(True); scrollPane = JScrollPane() scrollPane.getViewport().setView((table)) # copyButton = JButton('Merge') #,actionPerformed=self.noAction # frame.add(copyButton) listener=MyListener(table) table.addMouseListener(listener) table.addKeyListener(listener) menubar = JMenuBar() file = JMenu("Edit") file.setMnemonic(KeyEvent.VK_E) lookup = JMenuItem("Lookup",actionPerformed=frame.LookupEvent) lookup.setMnemonic(KeyEvent.VK_L) file.add(lookup) menubar.add(file) frame.setJMenuBar(menubar) frame.add(scrollPane) frame.pack(); frame.setSize(frame.getPreferredSize()); frame.show() def isBuilding(p): v = p.get("building"); if v is not None and v != "no" and v != "entrance": return True else: return False class BuildingInBuilding : BUILDING_INSIDE_BUILDING = 2001; def __init__ (self): self.primitivesToCheck = LinkedList(); self.index = QuadBuckets(); print 'building in building' #super(tr("Building inside building"), tr("Checks for building areas inside of buildings.")); def visitn(self,n) : # print "visitn:" # print n if (n.isUsable() and isBuilding(n)) : if not self.primitivesToCheck.contains(n): # print "adding :" n self.primitivesToCheck.add(n); else: print "duplicate p :" # print n def visitw(self,w) : print "visitw:" # print w if (w.isUsable() and w.isClosed() and isBuilding(w)) : self.primitivesToCheck.add(w) self.index.add(w) print "added" def isInPolygon(n, polygon) : return Geometry.nodeInsidePolygon(n, polygon); def sameLayers( w1, w2) : if w1.get("layer") is not None : l1 = w1.get("layer") else : l1 = "0"; if w2.get("layer") is not None : l2 = w2.get("layer") else : l2 ="0"; return l1.equals(l2);
def endTest2(self): for p in self.primitivesToCheck : collection = self.index.search(p.getBBox()) for object in collection: if (not p.equals(object)): if (isinstance(p,Node)): self.evaluateNode(p, object) else : print p # else if (p instanceof Way) # return evaluateWay((Way) p, object); # else if (p instanceof Relation) # return evaluateRelation((Relation) p, object); # return false; def endTest(self) : print "end" # bbox = BBox(-180,-90,180,90) bbox = BBox(-1000,-900,1800,900) print self.index collection = self.index.search(bbox) # print collection def projection() : print "projection" pc = ProjectionPreference.mercator id = pc.getId() pref = None Main.pref.putCollection("projection.sub."+id, pref) pc.setPreferences(pref) proj = pc.getProjection() Main.setProjection(proj) def prefs() : print "prefs" Main.pref = Preferences() Main.pref.put("tags.reversed_direction", "false") class JythonWay(): def __init__(self,x): self.way=x self.subobjects=[] pass # todo: def lookup(self) : # lookup this street name on the internet print "ToDO" # todo: def merge(self) : # merge these two streets, fix the names print "ToDO fix the streets" def get (self,k): return self.way.get(k) def name(self): return self.way.get('name') def addsubobject(self, other): return self.subobjects.append(other) import re pattern = re.compile(r'\s+') def streetlist(objs) : objs2 = [] streets = {} for p in objs: if (not isinstance(p,Way)): continue s=p.get('name') hw=p.get('highway') if (s is None): continue if (hw is None): continue s = s.lower() s=re.sub(pattern, '', s) #remove whitespace if not s in streets : # print "%s is new" % s streets[s]=JythonWay(p) else : streets[s].addsubobject(p) # print streets.values() objs3= sorted(streets.values(),(lambda x, y: (cmp(x.name(), y.name())))) DisplayStreetTable(objs3) def main (): print "main" prefs(); projection(); importer = OsmImporter() fileObj= File('/home/mdupont/experiments/josm/topeka/noto.osm') # fileObj= File('/home/mdupont/experiments/josm/topeka/topeka.osm') inobj = FileInputStream(fileObj); data = importer.loadLayer(inobj, fileObj, fileObj.getName(), NullProgressMonitor.INSTANCE) s = data.toString(); # print s primitives = data.getLayer().data.allPrimitives(); # print primitives objs= primitives.toArray() #DisplayTable(obj) streetlist(objs) # make a list of the street objects main();
def evaluateNode(self,p,obj): print "te" # print p # print obj
random_line_split
common.js
/**************************************** * 공통 상수 선언 ****************************************/ $.fn.Global = function () { this.isLoading = false; this.loadingNum = 0; this.Init(); }; $.fn.Global.prototype = { Init: function () { var G = this; }, showLoading: function (showYN, validateReservation) { var G = this; G.validateReservation = (validateReservation != undefined) ? validateReservation : false; if (showYN) { ++G.loadingNum; if (!G.isLoading) { G.isLoading = true; //if(!G.validateReservation){ //$('body').append('<div class="d_loading" style="position:fixed; top:50%; left:50%; z-index:9999;"><img src="/images/viewLoading.gif" alt="데이터 로딩중입니다. 잠시만 기다려 주세요."/></div>'); // 1603313 //화면의 높이와 너비를 구한다. var maskHeight = $(document).height(); // var maskWidth = $(document).width(); var maskWidth = window.document.body.clientWidth; var mask = "<div id='mask' style='position:absolute; z-index:9000; background-color:#000000; display:none; left:0; top:0;'></div>"; var loadingImg = ''; loadingImg += "<div id='loadingImg' style='position:absolute; left:50%; top:40%; display:none; z-index:10000;'>"; loadingImg += " <img src='/images/ajax_loader_blue_64.gif'/>"; loadingImg += "</div>"; //화면에 레이어 추가 $('body') .append(mask) .append(loadingImg) //마스크의 높이와 너비를 화면 것으로 만들어 전체 화면을 채운다. $('#mask').css({ 'width' : maskWidth , 'height': maskHeight , 'opacity' : '0.3' }); //마스크 표시 $('#mask').show(); //로딩중 이미지 표시 $('#loadingImg').show(); //} //else { // $('body').append('<div class="d_loading" style="position:fixed; top:50%; left:50%; z-index:100; margin-left: -90px; margin-top: -40px;"><img src="/LCHS/Image/preloader_icon.GIF" alt="결제 진행 중에 브라우져를 닫으시면 결제가 취소 됩니다. 결제가 진행중입니다. 잠시만 기다려주세요. 결제진행 시간은 시스템 환경에 다라 최대 10분가량 소요될 수 있습니다."/></div>'); //} } } else { --G.loadingNum; if (G.isLoading == true && G.loadingNum == 0) { //if ($('body').find('.d_loading')[0] != undefined) $('.d_loading').remove(); if ($('body').find('#mask')[0] != undefined) { $('#mask').remove(); $('#loadingImg').remove(); } G.isLoading = false; } } } }; var global = new $.fn.Global(); // ajax json call - 비동기 방식(기본:로딩 화면 있음) JsonCall = function (url, params, reCall, showLoading) { //params = "paramList=" + JSON.stringify(params); if (showLoading == undefined) { showLoading = true; } try { if (showLoading) global.showLoading(true); $.ajax({ type: "post", //async:false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { if (showLoading) global.showLoading(false); }, complete: function (data) { if (showLoading) global.showLoading(false); reCall(data); } }); } catch (e) { alert("JSON Error: " + e.message); } }; //ajax json call - 동기 방식(로딩 화면 없음) JsonCallSync = function (url, params, reCall) { //params = "paramList=" + JSON.stringify(params); try { $.ajax({ type: "post", async: false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { }, complete: function (data) { reCall(data); } }); } catch (e) { alert("JSON Error: " + e.message); } }; //ajax json call - 동기 방식(데이터 바로 받음) JsonReturnDataSync = function (url, params) { //params = "paramList=" + JSON.stringify(params); var jsonVal = ""; try { $.ajax({ type: "post", async: false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { }, complete: function (data) { jsonVal = data; } }); return jsonVal; } catch (e) { alert("JSON Error: " + e.message); } }; /**************************************** * 타입에 따라 링크 이동 방법이 바뀐다. * url : 링크 주소 * type : 3 :return url, 2:window.open(), 그외:windows.location ****************************************/ goUrl = function (url, type) { // type 기본값 설정 type = (type == undefined) ? '1' : type; if (type == "") { type = "1"; //1:링크 } if (type == "3") { return url; } else if (type == "2") { if (url == "") { alert(' 개발 중'); } else { window.open(url, '_blank');//새창링크 } } else { if (url == "") { alert(' 개발 중'); } else { location.href = url;//링크 } } } /************************************** * 예외 처리 **************************************/ var _debug; //디버깅 모드 설정용, 각 페이지에서 true로 설정하면, 오류를 alert 창으로 표시한다. var _notRedirection; //페이지 이동 금지 설정용, 각 페이지에서 true로 설정하면, 페이지 이동하지 않는다. //오류발생시 처리 //params> // e: Exception 객체 // headerText: 오류 구분용 Header Text // menu: 메뉴 구분자, 페이지 이동 등의 메뉴별 오류 처리용 구분자, null || "" 일 때 이동하지 않는다. //return> checkException = function (e, headerText, menu) { //디버깅 모드 이면 메시지 창으로 오류 표시 if (_debug) { var sb = new StringBuilder(); sb.AppendFormat("* Error [ {0} ]\n\n* Message: {1}\n\n* Stack:\n{2}", headerText, e.message, e.stack); alert(sb.ToString()); } else { //디버딩 모드가 아닐 때 오류 공통 처리 } //화면 이동 금지 상태 이면 return if (_notRedirection) { return; } else { //메뉴별 화면 이동 처리 switch (menu) { case "main": break; } } } // 파라미터 받아오기 // 사용법 : var param = getParameters(); function getParameters() { var searchString = window.location.search.substring(1), params = searchString.split("&"), hash = {}; if (searchString == "") return {}; for (var i = 0; i < params.length; i++) { var val = params[i].split("="); hash[unescape(val[0])] = unescape(val[1]); } return hash; } //휴대폰 번호 Array 형식으로 변환 getMemberInfoHandPhoneArray = function (phoneNum) { var hpArray = []; if (phoneNum == undefined || phoneNum == '') return hpArray; var hpLen = phoneNum.length; var hp1 = ''; var hp2 = ''; var hp3 = ''; if (hpLen == 13) {// 13자리 번호 일때 : 010-2222-3333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(4, 8); hp3 = phoneNum.substring(9); } else if (hpLen == 12)
때 : 010-222-3333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(4, 7); hp3 = phoneNum.substring(8); } else if (hpLen == 11) {// 11자리 번호 일때 : 01022223333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 7); hp3 = phoneNum.substring(7); } else if (hpLen == 10) {// 10자리 번호 일때 : 0102223333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 6); hp3 = phoneNum.substring(6); } else { hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 7); hp3 = phoneNum.substring(7); } hpArray.push(hp1); hpArray.push(hp2); hpArray.push(hp3); return hpArray; } //메인 화면으로 이동 한다. //params> //return> goToMain = function () { $(location).attr("href", "/com/main.do"); } //팝업창을 띠운다 //sUrl - 띠울 URL //sFrame - 띠울이름 //sFeature - 창 속성 openDialog = function (sUrl, sFrame, sFeature) { var op = window.open(sUrl, sFrame, sFeature); return op; } var ctrlDown = false; //숫자만 입력 Input Key Event //params> //return> //$("#userName").attr("onkeydown", "onlyNumberInputEvent(event);"); onlyNumberInputEvent = function (event) { try { var ctrlKey = 17, vKey = 86, cKey = 67; var key = event.which || event.keyCode; // backspace:8 // tab:9 // delete:46 if (key == 8 || key == 9 || key == 46) { // 키 통과 } else { if (ctrlDown && (key == 86 || key == 67)) { } else if (key >= 48 && key <= 57) { // 숫자 확인 } else if (key >= 96 && key <= 105) { // 숫자 확인 } else { //이벤트 해제 if (event.preventDefault) { event.preventDefault(); } else { event.returnValue = false; } } } } catch (e) { } } $(function () { $.fn.ClientInfo = function () { this.channelType = "HO"; this.browserAgent = ""; this.browserVersion = ""; this.Init(); }; $.fn.ClientInfo.prototype = { Init: function () { this.browserAgent = this.BrowserAgent(); this.browserVersion = navigator.userAgent; }, BrowserAgent: function () { var _ua = navigator.userAgent; var result = ""; //IE 11,10,9,8 var trident = _ua.match(/Trident\/(\d.\d)/i); if (trident != null) { if (trident[1] == "7.0") return result = "IE" + 11; if (trident[1] == "6.0") return result = "IE" + 10; if (trident[1] == "5.0") return result = "IE" + 9; if (trident[1] == "4.0") return result = "IE" + 8; } //IE 7... if (navigator.appName == 'Microsoft Internet Explorer') return result = "IE" + 7; /* var re = new RegExp("MSIE ([0-9]{1,}[\.0-9]{0,})"); if(re.exec(_ua) != null) result = parseFloat(RegExp.$1); if( result == 7 ) return result = "IE" + 7; */ //other var agt = _ua.toLowerCase(); if (agt.indexOf("chrome") != -1) return result = 'Chrome'; if (agt.indexOf("opera") != -1) return result = 'Opera'; if (agt.indexOf("staroffice") != -1) return result = 'Star Office'; if (agt.indexOf("webtv") != -1) return result = 'WebTV'; if (agt.indexOf("beonex") != -1) return result = 'Beonex'; if (agt.indexOf("chimera") != -1) return result = 'Chimera'; if (agt.indexOf("netpositive") != -1) return result = 'NetPositive'; if (agt.indexOf("phoenix") != -1) return result = 'Phoenix'; if (agt.indexOf("firefox") != -1) return result = 'Firefox'; if (agt.indexOf("safari") != -1) return result = 'Safari'; if (agt.indexOf("skipstone") != -1) return result = 'SkipStone'; if (agt.indexOf("netscape") != -1) return result = 'Netscape'; if (agt.indexOf("mozilla/5.0") != -1) return result = 'Mozilla'; return result; }, Android: function () { return navigator.userAgent.match(/Android/i); }, BlackBerry: function () { return navigator.userAgent.match(/BlackBerry/i); }, iOS: function () { return navigator.userAgent.match(/iPhone|iPad|iPod/i); }, isiPad: function () { return navigator.userAgent.match(/iPad/i); }, isiPhone: function () { return navigator.userAgent.match(/iPhone|iPod/i); }, Opera: function () { return navigator.userAgent.match(/Opera Mini/i); }, Windows: function () { return navigator.userAgent.match(/IEMobile/i); }, any: function () { return (this.Android() || this.BlackBerry() || this.iOS() || this.Opera() || this.Windows()); }, Chrome: function () { return navigator.userAgent.match(/Chrome/i); }, }; }); /*********************************************************************************************** * @Description : 페이지 공통 시작 * @param : totalcount - 전체 게시물 갯수 * @param : pageno - 현재 페이지 번호 * @param : blocksize - 화면 당 리스트 갯수 * @param : pageBoxSize - 하단 링크 갯수 * @param : scriptCallName - 링크 호출 javascript 함수명 * ************************************************************************************************/ function pageDisplay(totalcount, pageno, blocksize, pageBoxSize, scriptCallName) { var sbPaging = new StringBuilder(); totalcount = (totalcount == undefined) ? '0' : parseInt(totalcount, 10); pageno = (pageno == undefined) ? '0' : parseInt(pageno, 10); blocksize = (blocksize == undefined) ? '0' : parseInt(blocksize, 10); pageBoxSize = (pageBoxSize == undefined) ? '0' : parseInt(pageBoxSize, 10); var currentBoxSize = Math.ceil(pageno / pageBoxSize);// 현재 page block 번호 var startPageNo = Math.ceil((currentBoxSize - 1) * pageBoxSize) + 1;// 시작할 페이지 번호 var endPageNo = currentBoxSize * pageBoxSize;// block당 마지막 페이지 번호 var totalPageNo = Math.ceil((totalcount) / blocksize);// 총 페이지 번호 //console.log(totalPageNo); //var scriptCallName = ''; //scriptCallName = 'viewListNumberSearch'; // 시작 div //sbPaging.Append('\n<ul class="pagination">'); if (pageno > 1) { // 맨 처음 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{0}(1); return false;" aria-label="Previous"><span aria-hidden="true">&laquo;</span></a></li>', scriptCallName); // 이전 pageBoxSize 만큼 이동 //if (startPageNo > 1) { sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Previous"><span aria-hidden="true">&lt;</span></a></li>', pageno - 1, scriptCallName); } // 번호 for (i = startPageNo; i <= endPageNo; i++) { if (i <= totalPageNo) { var classname = ''; if (i == pageno) { classname = ' class="active"'; } else { classname = ''; } sbPaging.AppendFormat('\n<li{2}><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;">{0}</a>', i, scriptCallName, classname); } } if (totalPageNo > 1 && pageno < totalPageNo) { // 다음 pageBoxSize 만큼 이동 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Next"><span aria-hidden="true">&gt;</span></a></li>', pageno + 1, scriptCallName); // 마지막 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Next"><span aria-hidden="true">&raquo;</span></a></li>', totalPageNo, scriptCallName); } $('.pagination').html(sbPaging.ToString()); }; //=================================== /** * @Content 다운로드 * @param * fileFullPath : 파일 패스 * fileName : 파일명 * @return **/ goFileDownload = function (fileFullPath, fileName) { try { var param = "?fileFullPath=" + fileFullPath + "&fileName=" + fileName; var url = CinemaServerDomain + "/LCHS/Contents/Common/download.aspx" + param; goUrl(url, '1'); } catch (e) { checkException(e, "goFileDownload", NOW_MENU_TYPE); //오류 발생 처리 } } //=================================== /** * @Content 이전 페이지로 이동 * @param * @return **/ locationBack = function () { history.back(-1); return; }
{// 12자리 번호 일
identifier_name
common.js
/**************************************** * 공통 상수 선언 ****************************************/ $.fn.Global = function () { this.isLoading = false; this.loadingNum = 0; this.Init(); }; $.fn.Global.prototype = { Init: function () { var G = this; }, showLoading: function (showYN, validateReservation) { var G = this; G.validateReservation = (validateReservation != undefined) ? validateReservation : false; if (showYN) { ++G.loadingNum; if (!G.isLoading) { G.isLoading = true; //if(!G.validateReservation){ //$('body').append('<div class="d_loading" style="position:fixed; top:50%; left:50%; z-index:9999;"><img src="/images/viewLoading.gif" alt="데이터 로딩중입니다. 잠시만 기다려 주세요."/></div>'); // 1603313 //화면의 높이와 너비를 구한다. var maskHeight = $(document).height(); // var maskWidth = $(document).width(); var maskWidth = window.document.body.clientWidth; var mask = "<div id='mask' style='position:absolute; z-index:9000; background-color:#000000; display:none; left:0; top:0;'></div>"; var loadingImg = ''; loadingImg += "<div id='loadingImg' style='position:absolute; left:50%; top:40%; display:none; z-index:10000;'>"; loadingImg += " <img src='/images/ajax_loader_blue_64.gif'/>"; loadingImg += "</div>"; //화면에 레이어 추가 $('body') .append(mask) .append(loadingImg) //마스크의 높이와 너비를 화면 것으로 만들어 전체 화면을 채운다. $('#mask').css({ 'width' : maskWidth , 'height': maskHeight , 'opacity' : '0.3' }); //마스크 표시 $('#mask').show(); //로딩중 이미지 표시 $('#loadingImg').show(); //} //else { // $('body').append('<div class="d_loading" style="position:fixed; top:50%; left:50%; z-index:100; margin-left: -90px; margin-top: -40px;"><img src="/LCHS/Image/preloader_icon.GIF" alt="결제 진행 중에 브라우져를 닫으시면 결제가 취소 됩니다. 결제가 진행중입니다. 잠시만 기다려주세요. 결제진행 시간은 시스템 환경에 다라 최대 10분가량 소요될 수 있습니다."/></div>'); //} } } else { --G.loadingNum; if (G.isLoading == true && G.loadingNum == 0) { //if ($('body').find('.d_loading')[0] != undefined) $('.d_loading').remove(); if ($('body').find('#mask')[0] != undefined) { $('#mask').remove(); $('#loadingImg').remove(); } G.isLoading = false; } } } }; var global = new $.fn.Global(); // ajax json call - 비동기 방식(기본:로딩 화면 있음) JsonCall = function (url, params, reCall, showLoading) { //params = "paramList=" + JSON.stringify(params); if (showLoading == undefined) { showLoading = true; } try { if (showLoading) global.showLoading(true); $.ajax({ type: "post", //async:false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { if (showLoading) global.showLoading(false); }, complete: function (data) { if (showLoading) global.showLoading(false); reCall(data); } }); } catch (e) { alert("JSON Error: " + e.message); }
//ajax json call - 동기 방식(로딩 화면 없음) JsonCallSync = function (url, params, reCall) { //params = "paramList=" + JSON.stringify(params); try { $.ajax({ type: "post", async: false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { }, complete: function (data) { reCall(data); } }); } catch (e) { alert("JSON Error: " + e.message); } }; //ajax json call - 동기 방식(데이터 바로 받음) JsonReturnDataSync = function (url, params) { //params = "paramList=" + JSON.stringify(params); var jsonVal = ""; try { $.ajax({ type: "post", async: false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { }, complete: function (data) { jsonVal = data; } }); return jsonVal; } catch (e) { alert("JSON Error: " + e.message); } }; /**************************************** * 타입에 따라 링크 이동 방법이 바뀐다. * url : 링크 주소 * type : 3 :return url, 2:window.open(), 그외:windows.location ****************************************/ goUrl = function (url, type) { // type 기본값 설정 type = (type == undefined) ? '1' : type; if (type == "") { type = "1"; //1:링크 } if (type == "3") { return url; } else if (type == "2") { if (url == "") { alert(' 개발 중'); } else { window.open(url, '_blank');//새창링크 } } else { if (url == "") { alert(' 개발 중'); } else { location.href = url;//링크 } } } /************************************** * 예외 처리 **************************************/ var _debug; //디버깅 모드 설정용, 각 페이지에서 true로 설정하면, 오류를 alert 창으로 표시한다. var _notRedirection; //페이지 이동 금지 설정용, 각 페이지에서 true로 설정하면, 페이지 이동하지 않는다. //오류발생시 처리 //params> // e: Exception 객체 // headerText: 오류 구분용 Header Text // menu: 메뉴 구분자, 페이지 이동 등의 메뉴별 오류 처리용 구분자, null || "" 일 때 이동하지 않는다. //return> checkException = function (e, headerText, menu) { //디버깅 모드 이면 메시지 창으로 오류 표시 if (_debug) { var sb = new StringBuilder(); sb.AppendFormat("* Error [ {0} ]\n\n* Message: {1}\n\n* Stack:\n{2}", headerText, e.message, e.stack); alert(sb.ToString()); } else { //디버딩 모드가 아닐 때 오류 공통 처리 } //화면 이동 금지 상태 이면 return if (_notRedirection) { return; } else { //메뉴별 화면 이동 처리 switch (menu) { case "main": break; } } } // 파라미터 받아오기 // 사용법 : var param = getParameters(); function getParameters() { var searchString = window.location.search.substring(1), params = searchString.split("&"), hash = {}; if (searchString == "") return {}; for (var i = 0; i < params.length; i++) { var val = params[i].split("="); hash[unescape(val[0])] = unescape(val[1]); } return hash; } //휴대폰 번호 Array 형식으로 변환 getMemberInfoHandPhoneArray = function (phoneNum) { var hpArray = []; if (phoneNum == undefined || phoneNum == '') return hpArray; var hpLen = phoneNum.length; var hp1 = ''; var hp2 = ''; var hp3 = ''; if (hpLen == 13) {// 13자리 번호 일때 : 010-2222-3333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(4, 8); hp3 = phoneNum.substring(9); } else if (hpLen == 12) {// 12자리 번호 일때 : 010-222-3333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(4, 7); hp3 = phoneNum.substring(8); } else if (hpLen == 11) {// 11자리 번호 일때 : 01022223333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 7); hp3 = phoneNum.substring(7); } else if (hpLen == 10) {// 10자리 번호 일때 : 0102223333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 6); hp3 = phoneNum.substring(6); } else { hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 7); hp3 = phoneNum.substring(7); } hpArray.push(hp1); hpArray.push(hp2); hpArray.push(hp3); return hpArray; } //메인 화면으로 이동 한다. //params> //return> goToMain = function () { $(location).attr("href", "/com/main.do"); } //팝업창을 띠운다 //sUrl - 띠울 URL //sFrame - 띠울이름 //sFeature - 창 속성 openDialog = function (sUrl, sFrame, sFeature) { var op = window.open(sUrl, sFrame, sFeature); return op; } var ctrlDown = false; //숫자만 입력 Input Key Event //params> //return> //$("#userName").attr("onkeydown", "onlyNumberInputEvent(event);"); onlyNumberInputEvent = function (event) { try { var ctrlKey = 17, vKey = 86, cKey = 67; var key = event.which || event.keyCode; // backspace:8 // tab:9 // delete:46 if (key == 8 || key == 9 || key == 46) { // 키 통과 } else { if (ctrlDown && (key == 86 || key == 67)) { } else if (key >= 48 && key <= 57) { // 숫자 확인 } else if (key >= 96 && key <= 105) { // 숫자 확인 } else { //이벤트 해제 if (event.preventDefault) { event.preventDefault(); } else { event.returnValue = false; } } } } catch (e) { } } $(function () { $.fn.ClientInfo = function () { this.channelType = "HO"; this.browserAgent = ""; this.browserVersion = ""; this.Init(); }; $.fn.ClientInfo.prototype = { Init: function () { this.browserAgent = this.BrowserAgent(); this.browserVersion = navigator.userAgent; }, BrowserAgent: function () { var _ua = navigator.userAgent; var result = ""; //IE 11,10,9,8 var trident = _ua.match(/Trident\/(\d.\d)/i); if (trident != null) { if (trident[1] == "7.0") return result = "IE" + 11; if (trident[1] == "6.0") return result = "IE" + 10; if (trident[1] == "5.0") return result = "IE" + 9; if (trident[1] == "4.0") return result = "IE" + 8; } //IE 7... if (navigator.appName == 'Microsoft Internet Explorer') return result = "IE" + 7; /* var re = new RegExp("MSIE ([0-9]{1,}[\.0-9]{0,})"); if(re.exec(_ua) != null) result = parseFloat(RegExp.$1); if( result == 7 ) return result = "IE" + 7; */ //other var agt = _ua.toLowerCase(); if (agt.indexOf("chrome") != -1) return result = 'Chrome'; if (agt.indexOf("opera") != -1) return result = 'Opera'; if (agt.indexOf("staroffice") != -1) return result = 'Star Office'; if (agt.indexOf("webtv") != -1) return result = 'WebTV'; if (agt.indexOf("beonex") != -1) return result = 'Beonex'; if (agt.indexOf("chimera") != -1) return result = 'Chimera'; if (agt.indexOf("netpositive") != -1) return result = 'NetPositive'; if (agt.indexOf("phoenix") != -1) return result = 'Phoenix'; if (agt.indexOf("firefox") != -1) return result = 'Firefox'; if (agt.indexOf("safari") != -1) return result = 'Safari'; if (agt.indexOf("skipstone") != -1) return result = 'SkipStone'; if (agt.indexOf("netscape") != -1) return result = 'Netscape'; if (agt.indexOf("mozilla/5.0") != -1) return result = 'Mozilla'; return result; }, Android: function () { return navigator.userAgent.match(/Android/i); }, BlackBerry: function () { return navigator.userAgent.match(/BlackBerry/i); }, iOS: function () { return navigator.userAgent.match(/iPhone|iPad|iPod/i); }, isiPad: function () { return navigator.userAgent.match(/iPad/i); }, isiPhone: function () { return navigator.userAgent.match(/iPhone|iPod/i); }, Opera: function () { return navigator.userAgent.match(/Opera Mini/i); }, Windows: function () { return navigator.userAgent.match(/IEMobile/i); }, any: function () { return (this.Android() || this.BlackBerry() || this.iOS() || this.Opera() || this.Windows()); }, Chrome: function () { return navigator.userAgent.match(/Chrome/i); }, }; }); /*********************************************************************************************** * @Description : 페이지 공통 시작 * @param : totalcount - 전체 게시물 갯수 * @param : pageno - 현재 페이지 번호 * @param : blocksize - 화면 당 리스트 갯수 * @param : pageBoxSize - 하단 링크 갯수 * @param : scriptCallName - 링크 호출 javascript 함수명 * ************************************************************************************************/ function pageDisplay(totalcount, pageno, blocksize, pageBoxSize, scriptCallName) { var sbPaging = new StringBuilder(); totalcount = (totalcount == undefined) ? '0' : parseInt(totalcount, 10); pageno = (pageno == undefined) ? '0' : parseInt(pageno, 10); blocksize = (blocksize == undefined) ? '0' : parseInt(blocksize, 10); pageBoxSize = (pageBoxSize == undefined) ? '0' : parseInt(pageBoxSize, 10); var currentBoxSize = Math.ceil(pageno / pageBoxSize);// 현재 page block 번호 var startPageNo = Math.ceil((currentBoxSize - 1) * pageBoxSize) + 1;// 시작할 페이지 번호 var endPageNo = currentBoxSize * pageBoxSize;// block당 마지막 페이지 번호 var totalPageNo = Math.ceil((totalcount) / blocksize);// 총 페이지 번호 //console.log(totalPageNo); //var scriptCallName = ''; //scriptCallName = 'viewListNumberSearch'; // 시작 div //sbPaging.Append('\n<ul class="pagination">'); if (pageno > 1) { // 맨 처음 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{0}(1); return false;" aria-label="Previous"><span aria-hidden="true">&laquo;</span></a></li>', scriptCallName); // 이전 pageBoxSize 만큼 이동 //if (startPageNo > 1) { sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Previous"><span aria-hidden="true">&lt;</span></a></li>', pageno - 1, scriptCallName); } // 번호 for (i = startPageNo; i <= endPageNo; i++) { if (i <= totalPageNo) { var classname = ''; if (i == pageno) { classname = ' class="active"'; } else { classname = ''; } sbPaging.AppendFormat('\n<li{2}><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;">{0}</a>', i, scriptCallName, classname); } } if (totalPageNo > 1 && pageno < totalPageNo) { // 다음 pageBoxSize 만큼 이동 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Next"><span aria-hidden="true">&gt;</span></a></li>', pageno + 1, scriptCallName); // 마지막 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Next"><span aria-hidden="true">&raquo;</span></a></li>', totalPageNo, scriptCallName); } $('.pagination').html(sbPaging.ToString()); }; //=================================== /** * @Content 다운로드 * @param * fileFullPath : 파일 패스 * fileName : 파일명 * @return **/ goFileDownload = function (fileFullPath, fileName) { try { var param = "?fileFullPath=" + fileFullPath + "&fileName=" + fileName; var url = CinemaServerDomain + "/LCHS/Contents/Common/download.aspx" + param; goUrl(url, '1'); } catch (e) { checkException(e, "goFileDownload", NOW_MENU_TYPE); //오류 발생 처리 } } //=================================== /** * @Content 이전 페이지로 이동 * @param * @return **/ locationBack = function () { history.back(-1); return; }
};
random_line_split
common.js
/**************************************** * 공통 상수 선언 ****************************************/ $.fn.Global = function () { this.isLoading = false; this.loadingNum = 0; this.Init(); }; $.fn.Global.prototype = { Init: function () { var G = this; }, showLoading: function (showYN, validateReservation) { var G = this; G.validateReservation = (validateReservation != undefined) ? validateReservation : false; if (showYN) { ++G.loadingNum; if (!G.isLoading) { G.isLoading = true; //if(!G.validateReservation){ //$('body').append('<div class="d_loading" style="position:fixed; top:50%; left:50%; z-index:9999;"><img src="/images/viewLoading.gif" alt="데이터 로딩중입니다. 잠시만 기다려 주세요."/></div>'); // 1603313 //화면의 높이와 너비를 구한다. var maskHeight = $(document).height(); // var maskWidth = $(document).width(); var maskWidth = window.document.body.clientWidth; var mask = "<div id='mask' style='position:absolute; z-index:9000; background-color:#000000; display:none; left:0; top:0;'></div>"; var loadingImg = ''; loadingImg += "<div id='loadingImg' style='position:absolute; left:50%; top:40%; display:none; z-index:10000;'>"; loadingImg += " <img src='/images/ajax_loader_blue_64.gif'/>"; loadingImg += "</div>"; //화면에 레이어 추가 $('body') .append(mask) .append(loadingImg) //마스크의 높이와 너비를 화면 것으로 만들어 전체 화면을 채운다. $('#mask').css({ 'width' : maskWidth , 'height': maskHeight , 'opacity' : '0.3' }); //마스크 표시 $('#mask').show(); //로딩중 이미지 표시 $('#loadingImg').show(); //} //else { // $('body').append('<div class="d_loading" style="position:fixed; top:50%; left:50%; z-index:100; margin-left: -90px; margin-top: -40px;"><img src="/LCHS/Image/preloader_icon.GIF" alt="결제 진행 중에 브라우져를 닫으시면 결제가 취소 됩니다. 결제가 진행중입니다. 잠시만 기다려주세요. 결제진행 시간은 시스템 환경에 다라 최대 10분가량 소요될 수 있습니다."/></div>'); //} } } else { --G.loadingNum; if (G.isLoading == true && G.loadingNum == 0) { //if ($('body').find('.d_loading')[0] != undefined) $('.d_loading').remove(); if ($('body').find('#mask')[0] != undefined) { $('#mask').remove(); $('#loadingImg').remove(); } G.isLoading = false; } } } }; var global = new $.fn.Global(); // ajax json call - 비동기 방식(기본:로딩 화면 있음) JsonCall = function (url, params, reCall, showLoading) { //params = "paramList=" + JSON.stringify(params); if (showLoading == undefined) { showLoading = true; } try { if (showLoading) global.showLoading(true); $.ajax({ type: "post", //async:false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { if (showLoading) global.showLoading(false); }, complete: function (data) { if (showLoading) global.showLoading(false); reCall(data); } }); } catch (e) { alert("JSON Error: " + e.message); } }; //ajax json call - 동기 방식(로딩 화면 없음) JsonCallSync = function (url, params, reCall) { //params = "paramList=" + JSON.stringify(params); try { $.ajax({ type: "post", async: false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { }, complete: function (data) { reCall(data); } }); } catch (e) { alert("JSON Error: " + e.message); } }; //ajax json call - 동기 방식(데이터 바로 받음) JsonReturnDataSync = function (url, params) { //params = "paramList=" + JSON.stringify(params); var jsonVal = ""; try { $.ajax({ type: "post", async: false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { }, complete: function (data) { jsonVal = data; } }); return jsonVal; } catch (e) { alert("JSON Error: " + e.message); } }; /**************************************** * 타입에 따라 링크 이동 방법이 바뀐다. * url : 링크 주소 * type : 3 :return url, 2:window.open(), 그외:windows.location ****************************************/ goUrl = function (url, type) { // type 기본값 설정 type = (type == undefined) ? '1' : type; if (type == "") { type = "1"; //1:링크 } if (type == "3") { return url; } else if (type == "2") { if (url == "") { alert(' 개발 중'); } else { window.open(url, '_blank');//새창링크 } } else { if (url == "") { alert(' 개발 중'); } else { location.href = url;//링크 } } } /************************************** * 예외 처리 **************************************/ var _debug; //디버깅 모드 설정용, 각 페이지에서 true로 설정하면, 오류를 alert 창으로 표시한다. var _notRedirection; //페이지 이동 금지 설정용, 각 페이지에서 true로 설정하면, 페이지 이동하지 않는다. //오류발생시 처리 //params> // e: Exception 객체 // headerText: 오류 구분용 Header Text // menu: 메뉴 구분자, 페이지 이동 등의 메뉴별 오류 처리용 구분자, null || "" 일 때 이동하지 않는다. //return> checkException = function (e, headerText, menu) { //디버깅 모드 이면 메시지 창으로 오류 표시 if (_debug) { var sb = new StringBuilder(); sb.AppendFormat("* Error [ {0} ]\n\n* Message: {1}\n\n* Stack:\n{2}", headerText, e.message, e.stack); alert(sb.ToString()); } else { //디버딩 모드가 아닐 때 오류 공통 처리 } //화면 이동 금지 상태 이면 return if (_notRedirection) { return; } else { //메뉴별 화면 이동 처리 switch (menu) { case "main": break; } } } // 파라미터 받아오기 // 사용법 : var param = getParameters(); function getParameters() { var searchString = window.location.search.substring(1), params = searchString.split("&"), hash = {}; if (searchString == "") return {}; for (var i = 0; i < params.length; i++) { var val = params[i].split("="); hash[unescape(val[0])] = unescape(val[1]); } return hash; } //휴대폰 번호 Array 형식으로 변환 getMemberInfoHandPhoneArray = function (phoneNum) { var hpArray = []; if (phoneNum == undefined || phoneNum == '') return hpArray; var hpLen = phoneNum.length; var hp1 = ''; var hp2 = ''; var hp3 = ''; if (hpLen == 13) {// 13자리 번호 일때 : 010-2222-3333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(4, 8); hp3 = phoneNum.substring(9); } else if (hpLen == 12) {// 12자리 번호 일때 :
pLen == 10) {// 10자리 번호 일때 : 0102223333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 6); hp3 = phoneNum.substring(6); } else { hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 7); hp3 = phoneNum.substring(7); } hpArray.push(hp1); hpArray.push(hp2); hpArray.push(hp3); return hpArray; } //메인 화면으로 이동 한다. //params> //return> goToMain = function () { $(location).attr("href", "/com/main.do"); } //팝업창을 띠운다 //sUrl - 띠울 URL //sFrame - 띠울이름 //sFeature - 창 속성 openDialog = function (sUrl, sFrame, sFeature) { var op = window.open(sUrl, sFrame, sFeature); return op; } var ctrlDown = false; //숫자만 입력 Input Key Event //params> //return> //$("#userName").attr("onkeydown", "onlyNumberInputEvent(event);"); onlyNumberInputEvent = function (event) { try { var ctrlKey = 17, vKey = 86, cKey = 67; var key = event.which || event.keyCode; // backspace:8 // tab:9 // delete:46 if (key == 8 || key == 9 || key == 46) { // 키 통과 } else { if (ctrlDown && (key == 86 || key == 67)) { } else if (key >= 48 && key <= 57) { // 숫자 확인 } else if (key >= 96 && key <= 105) { // 숫자 확인 } else { //이벤트 해제 if (event.preventDefault) { event.preventDefault(); } else { event.returnValue = false; } } } } catch (e) { } } $(function () { $.fn.ClientInfo = function () { this.channelType = "HO"; this.browserAgent = ""; this.browserVersion = ""; this.Init(); }; $.fn.ClientInfo.prototype = { Init: function () { this.browserAgent = this.BrowserAgent(); this.browserVersion = navigator.userAgent; }, BrowserAgent: function () { var _ua = navigator.userAgent; var result = ""; //IE 11,10,9,8 var trident = _ua.match(/Trident\/(\d.\d)/i); if (trident != null) { if (trident[1] == "7.0") return result = "IE" + 11; if (trident[1] == "6.0") return result = "IE" + 10; if (trident[1] == "5.0") return result = "IE" + 9; if (trident[1] == "4.0") return result = "IE" + 8; } //IE 7... if (navigator.appName == 'Microsoft Internet Explorer') return result = "IE" + 7; /* var re = new RegExp("MSIE ([0-9]{1,}[\.0-9]{0,})"); if(re.exec(_ua) != null) result = parseFloat(RegExp.$1); if( result == 7 ) return result = "IE" + 7; */ //other var agt = _ua.toLowerCase(); if (agt.indexOf("chrome") != -1) return result = 'Chrome'; if (agt.indexOf("opera") != -1) return result = 'Opera'; if (agt.indexOf("staroffice") != -1) return result = 'Star Office'; if (agt.indexOf("webtv") != -1) return result = 'WebTV'; if (agt.indexOf("beonex") != -1) return result = 'Beonex'; if (agt.indexOf("chimera") != -1) return result = 'Chimera'; if (agt.indexOf("netpositive") != -1) return result = 'NetPositive'; if (agt.indexOf("phoenix") != -1) return result = 'Phoenix'; if (agt.indexOf("firefox") != -1) return result = 'Firefox'; if (agt.indexOf("safari") != -1) return result = 'Safari'; if (agt.indexOf("skipstone") != -1) return result = 'SkipStone'; if (agt.indexOf("netscape") != -1) return result = 'Netscape'; if (agt.indexOf("mozilla/5.0") != -1) return result = 'Mozilla'; return result; }, Android: function () { return navigator.userAgent.match(/Android/i); }, BlackBerry: function () { return navigator.userAgent.match(/BlackBerry/i); }, iOS: function () { return navigator.userAgent.match(/iPhone|iPad|iPod/i); }, isiPad: function () { return navigator.userAgent.match(/iPad/i); }, isiPhone: function () { return navigator.userAgent.match(/iPhone|iPod/i); }, Opera: function () { return navigator.userAgent.match(/Opera Mini/i); }, Windows: function () { return navigator.userAgent.match(/IEMobile/i); }, any: function () { return (this.Android() || this.BlackBerry() || this.iOS() || this.Opera() || this.Windows()); }, Chrome: function () { return navigator.userAgent.match(/Chrome/i); }, }; }); /*********************************************************************************************** * @Description : 페이지 공통 시작 * @param : totalcount - 전체 게시물 갯수 * @param : pageno - 현재 페이지 번호 * @param : blocksize - 화면 당 리스트 갯수 * @param : pageBoxSize - 하단 링크 갯수 * @param : scriptCallName - 링크 호출 javascript 함수명 * ************************************************************************************************/ function pageDisplay(totalcount, pageno, blocksize, pageBoxSize, scriptCallName) { var sbPaging = new StringBuilder(); totalcount = (totalcount == undefined) ? '0' : parseInt(totalcount, 10); pageno = (pageno == undefined) ? '0' : parseInt(pageno, 10); blocksize = (blocksize == undefined) ? '0' : parseInt(blocksize, 10); pageBoxSize = (pageBoxSize == undefined) ? '0' : parseInt(pageBoxSize, 10); var currentBoxSize = Math.ceil(pageno / pageBoxSize);// 현재 page block 번호 var startPageNo = Math.ceil((currentBoxSize - 1) * pageBoxSize) + 1;// 시작할 페이지 번호 var endPageNo = currentBoxSize * pageBoxSize;// block당 마지막 페이지 번호 var totalPageNo = Math.ceil((totalcount) / blocksize);// 총 페이지 번호 //console.log(totalPageNo); //var scriptCallName = ''; //scriptCallName = 'viewListNumberSearch'; // 시작 div //sbPaging.Append('\n<ul class="pagination">'); if (pageno > 1) { // 맨 처음 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{0}(1); return false;" aria-label="Previous"><span aria-hidden="true">&laquo;</span></a></li>', scriptCallName); // 이전 pageBoxSize 만큼 이동 //if (startPageNo > 1) { sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Previous"><span aria-hidden="true">&lt;</span></a></li>', pageno - 1, scriptCallName); } // 번호 for (i = startPageNo; i <= endPageNo; i++) { if (i <= totalPageNo) { var classname = ''; if (i == pageno) { classname = ' class="active"'; } else { classname = ''; } sbPaging.AppendFormat('\n<li{2}><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;">{0}</a>', i, scriptCallName, classname); } } if (totalPageNo > 1 && pageno < totalPageNo) { // 다음 pageBoxSize 만큼 이동 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Next"><span aria-hidden="true">&gt;</span></a></li>', pageno + 1, scriptCallName); // 마지막 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Next"><span aria-hidden="true">&raquo;</span></a></li>', totalPageNo, scriptCallName); } $('.pagination').html(sbPaging.ToString()); }; //=================================== /** * @Content 다운로드 * @param * fileFullPath : 파일 패스 * fileName : 파일명 * @return **/ goFileDownload = function (fileFullPath, fileName) { try { var param = "?fileFullPath=" + fileFullPath + "&fileName=" + fileName; var url = CinemaServerDomain + "/LCHS/Contents/Common/download.aspx" + param; goUrl(url, '1'); } catch (e) { checkException(e, "goFileDownload", NOW_MENU_TYPE); //오류 발생 처리 } } //=================================== /** * @Content 이전 페이지로 이동 * @param * @return **/ locationBack = function () { history.back(-1); return; }
010-222-3333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(4, 7); hp3 = phoneNum.substring(8); } else if (hpLen == 11) {// 11자리 번호 일때 : 01022223333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 7); hp3 = phoneNum.substring(7); } else if (h
identifier_body
common.js
/**************************************** * 공통 상수 선언 ****************************************/ $.fn.Global = function () { this.isLoading = false; this.loadingNum = 0; this.Init(); }; $.fn.Global.prototype = { Init: function () { var G = this; }, showLoading: function (showYN, validateReservation) { var G = this; G.validateReservation = (validateReservation != undefined) ? validateReservation : false; if (showYN) { ++G.loadingNum; if (!G.isLoading) { G.isLoading = true; //if(!G.validateReservation){ //$('body').append('<div class="d_loading" style="position:fixed; top:50%; left:50%; z-index:9999;"><img src="/images/viewLoading.gif" alt="데이터 로딩중입니다. 잠시만 기다려 주세요."/></div>'); // 1603313 //화면의 높이와 너비를 구한다. var maskHeight = $(document).height(); // var maskWidth = $(document).width(); var maskWidth = window.document.body.clientWidth; var mask = "<div id='mask' style='position:absolute; z-index:9000; background-color:#000000; display:none; left:0; top:0;'></div>"; var loadingImg = ''; loadingImg += "<div id='loadingImg' style='position:absolute; left:50%; top:40%; display:none; z-index:10000;'>"; loadingImg += " <img src='/images/ajax_loader_blue_64.gif'/>"; loadingImg += "</div>"; //화면에 레이어 추가 $('body') .append(mask) .append(loadingImg) //마스크의 높이와 너비를 화면 것으로 만들어 전체 화면을 채운다. $('#mask').css({ 'width' : maskWidth , 'height': maskHeight , 'opacity' : '0.3' }); //마스크 표시 $('#mask').show(); //로딩중 이미지 표시 $('#loadingImg').show(); //} //else { // $('body').append('<div class="d_loading" style="position:fixed; top:50%; left:50%; z-index:100; margin-left: -90px; margin-top: -40px;"><img src="/LCHS/Image/preloader_icon.GIF" alt="결제 진행 중에 브라우져를 닫으시면 결제가 취소 됩니다. 결제가 진행중입니다. 잠시만 기다려주세요. 결제진행 시간은 시스템 환경에 다라 최대 10분가량 소요될 수 있습니다."/></div>'); //} } } else { --G.loadingNum; if (G.isLoading == true && G.loadingNum == 0) { //if ($('body').find('.d_loading')[0] != undefined) $('.d_loading').remove(); if ($('body').find('#mask')[0] != undefined) { $('#mask').remove(); $('#loadingImg').remove(); } G.isLoading = false; } } } }; var global = new $.fn.Global(); // ajax json call - 비동기 방식(기본:로딩 화면 있음) JsonCall = function (url, params, reCall, showLoading) { //params = "paramList=" + JSON.stringify(params); if (showLoading == undefined) { showLoading = true; } try { if (showLoading) global.showLoading(true); $.ajax({ type: "post", //async:false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { if (showLoading) global.showLoading(false); }, complete: function (data) { if (showLoading) global.showLoading(false); reCall(data); } }); } catch (e) { alert("JSON Error: " + e.message); } }; //ajax json call - 동기 방식(로딩 화면 없음) JsonCallSync = function (url, params, reCall) { //params = "paramList=" + JSON.stringify(params); try { $.ajax({ type: "post", async: false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { }, complete: function (data) { reCall(data); } }); } catch (e) { alert("JSON Error: " + e.message); } }; //ajax json call - 동기 방식(데이터 바로 받음) JsonReturnDataSync = function (url, params) { //params = "paramList=" + JSON.stringify(params); var jsonVal = ""; try { $.ajax({ type: "post", async: false, url: url + "?nocashe=" + String(Math.random()), //dataType: "json", data: params, contentType: "application/x-www-form-urlencoded; charset=UTF-8", beforeSend: function (xhr) { xhr.setRequestHeader("Content-Encoding", "gzip"); }, fail: function (data) { }, complete: function (data) { jsonVal = data; } }); return jsonVal; } catch (e) { alert("JSON Error: " + e.message); } }; /**************************************** * 타입에 따라 링크 이동 방법이 바뀐다. * url : 링크 주소 * type : 3 :return url, 2:window.open(), 그외:windows.location ****************************************/ goUrl = function (url, type) { // type 기본값 설정 type = (type == undefined) ? '1' : type; if (type == "") { type = "1"; //1:링크 } if (type == "3") { return url; } else if (type == "2") { if (url == "") { alert(' 개발 중'); } else { window.open(url, '_blank');//새창링크 } } else { if (url == "") { alert(' 개발 중'); } else { location.href = url;//링크 } } } /************************************** * 예외 처리 **************************************/ var _debug; //디버깅 모드 설정용, 각 페이지에서 true로 설정하면, 오류를 alert 창으로 표시한다. var _notRedirection; //페이지 이동 금지 설정용, 각 페이지에서 true로 설정하면, 페이지 이동하지 않는다. //오류발생시 처리 //params> // e: Exception 객체 // headerText: 오류 구분용 Header Text // menu: 메뉴 구분자, 페이지 이동 등의 메뉴별 오류 처리용 구분자, null || "" 일 때 이동하지 않는다. //return> checkException = function (e, headerText, menu) { //디버깅 모드 이면 메시지 창으로 오류 표시 if (_debug) { var sb = new StringBuilder(); sb.AppendFormat("* Error [ {0} ]\n\n* Message: {1}\n\n* Stack:\n{2}", headerText, e.message, e.stack); alert(sb.ToString()); } else { //디버딩 모드가 아닐 때 오류 공통 처리 } //화면 이동 금지 상태 이면 return if (_notRedirection) { return; } else { //메뉴별 화면 이동 처리 switch (menu) { case "main": break; } } } // 파라미터 받아오기 // 사용법 : var param = getParameters(); function getParameters() { var searchString = window.location.search.substring(1), params = searchString.split("&"), hash = {}; if (searchString == "") return {}; for (var i = 0; i < params.length; i++) { var val = params[i].split("="); hash[unescape(val[0])] = unescape(val[1]); } return hash; } //휴대폰 번호 Array 형식으로 변환 getMemberInfoHandPhoneArray = function (phoneNum) { var hpArray = []; if (phoneNum == undefined || phoneNum == '') return hpArray; var hpLen = phoneNum.length; var hp1 = ''; var hp2 = ''; var hp3 = ''; if (hpLen == 13) {// 13자리 번호 일때 : 010-2222-3333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(4, 8); hp3 = phoneNum.substring(9); } else if (hpLen == 12) {// 12자리 번호 일때 : 010-222-3333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(4, 7); hp3 = phoneNum.substring(8); } else if (hpLen == 11) {// 11자리 번호 일때 : 01022223333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 7); hp3 = phoneNum.substring(7); } else if (hpLen == 10) {// 10자리 번호 일때 : 0102223333 hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 6); hp3 = phoneNum.substring(6); } else { hp1 = phoneNum.substring(0, 3); hp2 = phoneNum.substring(3, 7); hp3 = phoneNum.substring(7); } hpArray
"href", "/com/main.do"); } //팝업창을 띠운다 //sUrl - 띠울 URL //sFrame - 띠울이름 //sFeature - 창 속성 openDialog = function (sUrl, sFrame, sFeature) { var op = window.open(sUrl, sFrame, sFeature); return op; } var ctrlDown = false; //숫자만 입력 Input Key Event //params> //return> //$("#userName").attr("onkeydown", "onlyNumberInputEvent(event);"); onlyNumberInputEvent = function (event) { try { var ctrlKey = 17, vKey = 86, cKey = 67; var key = event.which || event.keyCode; // backspace:8 // tab:9 // delete:46 if (key == 8 || key == 9 || key == 46) { // 키 통과 } else { if (ctrlDown && (key == 86 || key == 67)) { } else if (key >= 48 && key <= 57) { // 숫자 확인 } else if (key >= 96 && key <= 105) { // 숫자 확인 } else { //이벤트 해제 if (event.preventDefault) { event.preventDefault(); } else { event.returnValue = false; } } } } catch (e) { } } $(function () { $.fn.ClientInfo = function () { this.channelType = "HO"; this.browserAgent = ""; this.browserVersion = ""; this.Init(); }; $.fn.ClientInfo.prototype = { Init: function () { this.browserAgent = this.BrowserAgent(); this.browserVersion = navigator.userAgent; }, BrowserAgent: function () { var _ua = navigator.userAgent; var result = ""; //IE 11,10,9,8 var trident = _ua.match(/Trident\/(\d.\d)/i); if (trident != null) { if (trident[1] == "7.0") return result = "IE" + 11; if (trident[1] == "6.0") return result = "IE" + 10; if (trident[1] == "5.0") return result = "IE" + 9; if (trident[1] == "4.0") return result = "IE" + 8; } //IE 7... if (navigator.appName == 'Microsoft Internet Explorer') return result = "IE" + 7; /* var re = new RegExp("MSIE ([0-9]{1,}[\.0-9]{0,})"); if(re.exec(_ua) != null) result = parseFloat(RegExp.$1); if( result == 7 ) return result = "IE" + 7; */ //other var agt = _ua.toLowerCase(); if (agt.indexOf("chrome") != -1) return result = 'Chrome'; if (agt.indexOf("opera") != -1) return result = 'Opera'; if (agt.indexOf("staroffice") != -1) return result = 'Star Office'; if (agt.indexOf("webtv") != -1) return result = 'WebTV'; if (agt.indexOf("beonex") != -1) return result = 'Beonex'; if (agt.indexOf("chimera") != -1) return result = 'Chimera'; if (agt.indexOf("netpositive") != -1) return result = 'NetPositive'; if (agt.indexOf("phoenix") != -1) return result = 'Phoenix'; if (agt.indexOf("firefox") != -1) return result = 'Firefox'; if (agt.indexOf("safari") != -1) return result = 'Safari'; if (agt.indexOf("skipstone") != -1) return result = 'SkipStone'; if (agt.indexOf("netscape") != -1) return result = 'Netscape'; if (agt.indexOf("mozilla/5.0") != -1) return result = 'Mozilla'; return result; }, Android: function () { return navigator.userAgent.match(/Android/i); }, BlackBerry: function () { return navigator.userAgent.match(/BlackBerry/i); }, iOS: function () { return navigator.userAgent.match(/iPhone|iPad|iPod/i); }, isiPad: function () { return navigator.userAgent.match(/iPad/i); }, isiPhone: function () { return navigator.userAgent.match(/iPhone|iPod/i); }, Opera: function () { return navigator.userAgent.match(/Opera Mini/i); }, Windows: function () { return navigator.userAgent.match(/IEMobile/i); }, any: function () { return (this.Android() || this.BlackBerry() || this.iOS() || this.Opera() || this.Windows()); }, Chrome: function () { return navigator.userAgent.match(/Chrome/i); }, }; }); /*********************************************************************************************** * @Description : 페이지 공통 시작 * @param : totalcount - 전체 게시물 갯수 * @param : pageno - 현재 페이지 번호 * @param : blocksize - 화면 당 리스트 갯수 * @param : pageBoxSize - 하단 링크 갯수 * @param : scriptCallName - 링크 호출 javascript 함수명 * ************************************************************************************************/ function pageDisplay(totalcount, pageno, blocksize, pageBoxSize, scriptCallName) { var sbPaging = new StringBuilder(); totalcount = (totalcount == undefined) ? '0' : parseInt(totalcount, 10); pageno = (pageno == undefined) ? '0' : parseInt(pageno, 10); blocksize = (blocksize == undefined) ? '0' : parseInt(blocksize, 10); pageBoxSize = (pageBoxSize == undefined) ? '0' : parseInt(pageBoxSize, 10); var currentBoxSize = Math.ceil(pageno / pageBoxSize);// 현재 page block 번호 var startPageNo = Math.ceil((currentBoxSize - 1) * pageBoxSize) + 1;// 시작할 페이지 번호 var endPageNo = currentBoxSize * pageBoxSize;// block당 마지막 페이지 번호 var totalPageNo = Math.ceil((totalcount) / blocksize);// 총 페이지 번호 //console.log(totalPageNo); //var scriptCallName = ''; //scriptCallName = 'viewListNumberSearch'; // 시작 div //sbPaging.Append('\n<ul class="pagination">'); if (pageno > 1) { // 맨 처음 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{0}(1); return false;" aria-label="Previous"><span aria-hidden="true">&laquo;</span></a></li>', scriptCallName); // 이전 pageBoxSize 만큼 이동 //if (startPageNo > 1) { sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Previous"><span aria-hidden="true">&lt;</span></a></li>', pageno - 1, scriptCallName); } // 번호 for (i = startPageNo; i <= endPageNo; i++) { if (i <= totalPageNo) { var classname = ''; if (i == pageno) { classname = ' class="active"'; } else { classname = ''; } sbPaging.AppendFormat('\n<li{2}><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;">{0}</a>', i, scriptCallName, classname); } } if (totalPageNo > 1 && pageno < totalPageNo) { // 다음 pageBoxSize 만큼 이동 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Next"><span aria-hidden="true">&gt;</span></a></li>', pageno + 1, scriptCallName); // 마지막 sbPaging.AppendFormat('\n<li><a href="javascript:void(0);" onclick="javascript:{1}({0}); return false;" aria-label="Next"><span aria-hidden="true">&raquo;</span></a></li>', totalPageNo, scriptCallName); } $('.pagination').html(sbPaging.ToString()); }; //=================================== /** * @Content 다운로드 * @param * fileFullPath : 파일 패스 * fileName : 파일명 * @return **/ goFileDownload = function (fileFullPath, fileName) { try { var param = "?fileFullPath=" + fileFullPath + "&fileName=" + fileName; var url = CinemaServerDomain + "/LCHS/Contents/Common/download.aspx" + param; goUrl(url, '1'); } catch (e) { checkException(e, "goFileDownload", NOW_MENU_TYPE); //오류 발생 처리 } } //=================================== /** * @Content 이전 페이지로 이동 * @param * @return **/ locationBack = function () { history.back(-1); return; }
.push(hp1); hpArray.push(hp2); hpArray.push(hp3); return hpArray; } //메인 화면으로 이동 한다. //params> //return> goToMain = function () { $(location).attr(
conditional_block
renderer.rs
use std::f64; use pcg_rand::Pcg32; use rand::{Rng, SeedableRng}; use crate::hitpoint::Hitpoint; use crate::math::{add, brdf, dot, elementwise_mul, intensity_to_color, min, mul, norm, random_from_brdf, sub}; use crate::ray::Ray; use crate::renderershape::RendererShape; use crate::rendereroutputpixel::RendererOutputPixel; use crate::rendererscene::RendererScene; use crate::renderertype::RendererType; use crate::NUMBER_OF_BINS; //static MODULO: [usize; 5] = [0, 1, 2, 0, 1]; pub struct Renderer { width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene, renderer_output_pixels: Vec<RendererOutputPixel> } impl Renderer { pub fn new(width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene) -> Self { let renderer_output_pixels: Vec<RendererOutputPixel> = Vec::new(); Self { width, height, spp_per_iteration, maximum_spp, maximum_error, maximum_brdf_value, perform_post_process, scene, renderer_output_pixels, } } pub fn get_renderer_output_pixels(&self) -> Vec<RendererOutputPixel> { self.renderer_output_pixels.to_vec() } pub fn render(&mut self, y: u32, x: u32) { if x == self.width/2 { println!("Rendering row {} of {}.", y, self.height); } let number_of_light_spheres = self.scene.light_spheres.len(); let random_exclusive_max_lights: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_light_spheres as u32); let number_of_cameras = self.scene.cameras.len(); self.renderer_output_pixels.push(RendererOutputPixel::new(y, x)); let last_pos = self.renderer_output_pixels.len()-1; let mut pcg: Pcg32 = Pcg32::from_entropy(); let mut colors: Vec<[f64; 3]> = Vec::new(); let mut converged = false; // Loop over this pixel until we estimate the error to be small enough. let mut iterations = 0; while !converged { iterations += 1; for _ in 0..self.spp_per_iteration { // Create a hitpoint path starting from the camera. The first hitpoint will be the hitpoint of the ray coming from the retina, through the pinhole of the camera - that is, it is a point somewhere on a renderer shape. let mut hitpoint_path_from_camera: Vec<Hitpoint> = Vec::new(); self.renderer_output_pixels[last_pos].number_of_rays += 1.0; let index = (pcg.gen_range(0, random_exclusive_max_lights)%(number_of_cameras as u32)) as usize; let mut ray = self.scene.cameras[index].create_ray(x, y, self.width, self.height, &mut pcg); let mut color = self.scene.cameras[index].color; self.create_hitpoint_path(&mut ray, &mut color, &mut hitpoint_path_from_camera, &mut pcg); let mut total_color = [0.0, 0.0, 0.0]; if hitpoint_path_from_camera.is_empty() { colors.push(total_color); continue; } let direct_light_sampling = false; if direct_light_sampling { // Connect the camera path hitpoints with points on lights. for hitpoint_in_camera_path in &hitpoint_path_from_camera { let color = self.connect_and_compute_color(hitpoint_in_camera_path, &mut pcg); total_color = add(total_color, color); } self.store(last_pos, total_color); colors.push(total_color); } else { for hitpoint in hitpoint_path_from_camera.iter().rev() { total_color = elementwise_mul(total_color, hitpoint.material.color); total_color = add(total_color, hitpoint.material.emission); } self.store(last_pos, total_color); colors.push(total_color); } } if colors.is_empty() { break; } // Estimate the error. If it's too large, create more rays for this pixel. let number_of_batches: usize = 20; let number_of_rays = colors.len(); let batch_size = number_of_rays/number_of_batches; let mut averages = self.averages(&colors, number_of_batches, batch_size); self.gamma_correct_averages(&mut averages); let use_standard_deviation = true; let error = if use_standard_deviation { self.standard_deviation(&averages) } else { self.maximum_distance(&averages) }; if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp { converged = true; } else if iterations%10 == 0 { println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error); } } } // @TODO: Fix so that it works even if colors.len()%batch_size != 0. fn
(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> { let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches]; for i in 0..colors.len() { averages[i/batch_size] = add(averages[i/batch_size], colors[i]); } for average in &mut averages { *average = mul(1.0/(batch_size as f64), *average); } averages } fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) { for average in averages { *average = intensity_to_color(*average); } } fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 { let length = averages.len(); let mut r = 0.0; let mut g = 0.0; let mut b = 0.0; let mut r_squared = 0.0; let mut g_squared = 0.0; let mut b_squared = 0.0; for average in averages { r += average[0]; g += average[1]; b += average[2]; r_squared += average[0]*average[0]; g_squared += average[1]*average[1]; b_squared += average[2]*average[2]; } let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0); // Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0. if variance < 0.0 { 0.0 } else { variance.sqrt() } } fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 { let mut smallest = [f64::MAX, f64::MAX, f64::MAX]; let mut largest = [f64::MIN, f64::MIN, f64::MIN]; for average in averages { for j in 0..3 { if average[j] < smallest[j] { smallest[j] = average[j]; } if average[j] > largest[j] { largest[j] = average[j]; } } } let max_r_distance = (largest[0]-smallest[0]).abs(); let max_g_distance = (largest[1]-smallest[1]).abs(); let max_b_distance = (largest[2]-smallest[2]).abs(); max_r_distance+max_g_distance+max_b_distance } fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) { let bullet_probability = 0.0; let survival_boost_factor = 1.0/(1.0-bullet_probability); loop { let r = pcg.gen::<f64>(); if r < bullet_probability { return; } else { *color = mul(survival_boost_factor, *color); } let hitpoint = self.closest_renderer_shape(&mut ray); if let Some(mut hitpoint) = hitpoint { let ingoing_direction = mul(-1.0, ray.direction); let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside { (1.0, hitpoint.material.refractive_index) } else { (hitpoint.material.refractive_index, 1.0) }; let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 { hitpoint.normal } else { mul(-1.0, hitpoint.normal) }; let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg); ray.position = hitpoint.position; ray.direction = direction; *color = elementwise_mul(*color, hitpoint.material.color); hitpoint.accumulated_color = *color; hitpoint_path.push(hitpoint); } else { return; } } } // @TODO: Implement support of triangular lightsources. fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] { let number_of_light_spheres = self.scene.light_spheres.len(); let number_of_cameras = self.scene.cameras.len(); let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32); let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize; let light_position = self.scene.light_spheres[index].get_position(&mut pcg); // @TODO: Should it not be .emission rather than .color? let light_color = self.scene.light_spheres[index].color; let direction = sub(hitpoint_in_camera_path.position, light_position); let distance = norm(direction); let direction_normalised = mul(1.0/distance, direction); if dot(hitpoint_in_camera_path.normal, direction_normalised) > 0.0 { return [0.0, 0.0, 0.0]; } let closest_hitpoint = self.closest_renderer_shape(&mut Ray::new(light_position, direction_normalised)); if let Some(closest_hitpoint) = closest_hitpoint { // @TODO Check if this is sane. if distance-closest_hitpoint.distance > 1.0e-9 { return [0.0, 0.0, 0.0]; } } let ingoing_direction = mul(-1.0, hitpoint_in_camera_path.incoming_direction); let outgoing_direction = mul(-1.0, direction_normalised); let normal = if dot(ingoing_direction, hitpoint_in_camera_path.normal) > 0.0 { hitpoint_in_camera_path.normal } else { mul(-1.0, hitpoint_in_camera_path.normal) }; let (refractive_index_1, refractive_index_2) = if hitpoint_in_camera_path.hit_from_outside { (1.0, hitpoint_in_camera_path.material.refractive_index) } else { (hitpoint_in_camera_path.material.refractive_index, 1.0) }; // @TODO: Get rid of the upper limit of the brdf. let brdf = min(brdf(ingoing_direction, outgoing_direction, normal, refractive_index_1, refractive_index_2, hitpoint_in_camera_path.material), self.maximum_brdf_value); mul(brdf/(distance*distance), elementwise_mul(hitpoint_in_camera_path.accumulated_color, light_color)) } fn store(&mut self, last_pos: usize, color: [f64; 3]) { self.renderer_output_pixels[last_pos].pixel.color = add(self.renderer_output_pixels[last_pos].pixel.color, color); self.renderer_output_pixels[last_pos].color = add(self.renderer_output_pixels[last_pos].color, color); self.renderer_output_pixels[last_pos].number_of_bin_elements += 1; if self.perform_post_process { let color = intensity_to_color(color); self.renderer_output_pixels[last_pos].pixel.bins[(color[0]/255.0*(NUMBER_OF_BINS as f64)) as usize][0] += 1; self.renderer_output_pixels[last_pos].pixel.bins[(color[1]/255.0*(NUMBER_OF_BINS as f64)) as usize][1] += 1; self.renderer_output_pixels[last_pos].pixel.bins[(color[2]/255.0*(NUMBER_OF_BINS as f64)) as usize][2] += 1; } } // Find the closest hitpoint. fn closest_renderer_shape(&self, ray: &mut Ray) -> Option<Hitpoint> { let mut min_distance = f64::MAX; let mut closest_renderer_shape_index: Option<usize> = None; let mut renderer_type = RendererType::Cylinder; for (i, cylinder) in self.scene.renderer_cylinders.iter().enumerate() { if !cylinder.active { continue; } let distance = cylinder.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); } } for (i, sphere) in self.scene.renderer_spheres.iter().enumerate() { if !sphere.active { continue; } let distance = sphere.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); renderer_type = RendererType::Sphere; } } for (i, triangle) in self.scene.renderer_triangles.iter().enumerate() { if !triangle.active { continue; } let distance = triangle.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); renderer_type = RendererType::Triangle; } } if let Some(index) = closest_renderer_shape_index { let position = add(ray.position, mul(min_distance, ray.direction)); let (normal, material) = match renderer_type { RendererType::Cylinder => { (self.scene.renderer_cylinders[index].normal(position), self.scene.renderer_cylinders[index].material()) } RendererType::Sphere => { (self.scene.renderer_spheres[index].normal(position), self.scene.renderer_spheres[index].material()) } RendererType::Triangle => { (self.scene.renderer_triangles[index].normal(position), self.scene.renderer_triangles[index].material()) } }; let hit_from_outside = dot(ray.direction, normal) < 0.0; Some(Hitpoint::new(position, ray.direction, min_distance, normal, material, hit_from_outside, false, [0.0, 0.0, 0.0])) } else { None } } }
averages
identifier_name
renderer.rs
use std::f64; use pcg_rand::Pcg32; use rand::{Rng, SeedableRng}; use crate::hitpoint::Hitpoint; use crate::math::{add, brdf, dot, elementwise_mul, intensity_to_color, min, mul, norm, random_from_brdf, sub}; use crate::ray::Ray; use crate::renderershape::RendererShape; use crate::rendereroutputpixel::RendererOutputPixel; use crate::rendererscene::RendererScene; use crate::renderertype::RendererType; use crate::NUMBER_OF_BINS; //static MODULO: [usize; 5] = [0, 1, 2, 0, 1]; pub struct Renderer { width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene, renderer_output_pixels: Vec<RendererOutputPixel> } impl Renderer { pub fn new(width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene) -> Self { let renderer_output_pixels: Vec<RendererOutputPixel> = Vec::new(); Self { width, height, spp_per_iteration, maximum_spp, maximum_error, maximum_brdf_value, perform_post_process, scene, renderer_output_pixels, } } pub fn get_renderer_output_pixels(&self) -> Vec<RendererOutputPixel> { self.renderer_output_pixels.to_vec() } pub fn render(&mut self, y: u32, x: u32) { if x == self.width/2 { println!("Rendering row {} of {}.", y, self.height); } let number_of_light_spheres = self.scene.light_spheres.len(); let random_exclusive_max_lights: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_light_spheres as u32); let number_of_cameras = self.scene.cameras.len(); self.renderer_output_pixels.push(RendererOutputPixel::new(y, x)); let last_pos = self.renderer_output_pixels.len()-1; let mut pcg: Pcg32 = Pcg32::from_entropy(); let mut colors: Vec<[f64; 3]> = Vec::new(); let mut converged = false; // Loop over this pixel until we estimate the error to be small enough. let mut iterations = 0; while !converged { iterations += 1; for _ in 0..self.spp_per_iteration { // Create a hitpoint path starting from the camera. The first hitpoint will be the hitpoint of the ray coming from the retina, through the pinhole of the camera - that is, it is a point somewhere on a renderer shape. let mut hitpoint_path_from_camera: Vec<Hitpoint> = Vec::new(); self.renderer_output_pixels[last_pos].number_of_rays += 1.0; let index = (pcg.gen_range(0, random_exclusive_max_lights)%(number_of_cameras as u32)) as usize; let mut ray = self.scene.cameras[index].create_ray(x, y, self.width, self.height, &mut pcg); let mut color = self.scene.cameras[index].color; self.create_hitpoint_path(&mut ray, &mut color, &mut hitpoint_path_from_camera, &mut pcg); let mut total_color = [0.0, 0.0, 0.0]; if hitpoint_path_from_camera.is_empty() { colors.push(total_color); continue; } let direct_light_sampling = false; if direct_light_sampling { // Connect the camera path hitpoints with points on lights. for hitpoint_in_camera_path in &hitpoint_path_from_camera { let color = self.connect_and_compute_color(hitpoint_in_camera_path, &mut pcg); total_color = add(total_color, color); } self.store(last_pos, total_color); colors.push(total_color); } else { for hitpoint in hitpoint_path_from_camera.iter().rev() { total_color = elementwise_mul(total_color, hitpoint.material.color); total_color = add(total_color, hitpoint.material.emission); } self.store(last_pos, total_color); colors.push(total_color); } } if colors.is_empty() { break; } // Estimate the error. If it's too large, create more rays for this pixel. let number_of_batches: usize = 20; let number_of_rays = colors.len(); let batch_size = number_of_rays/number_of_batches; let mut averages = self.averages(&colors, number_of_batches, batch_size); self.gamma_correct_averages(&mut averages); let use_standard_deviation = true; let error = if use_standard_deviation { self.standard_deviation(&averages) } else { self.maximum_distance(&averages) }; if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp { converged = true; } else if iterations%10 == 0 { println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error); } } } // @TODO: Fix so that it works even if colors.len()%batch_size != 0. fn averages(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> { let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches]; for i in 0..colors.len() { averages[i/batch_size] = add(averages[i/batch_size], colors[i]); } for average in &mut averages { *average = mul(1.0/(batch_size as f64), *average); } averages } fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) { for average in averages { *average = intensity_to_color(*average); } } fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 { let length = averages.len(); let mut r = 0.0; let mut g = 0.0; let mut b = 0.0; let mut r_squared = 0.0; let mut g_squared = 0.0; let mut b_squared = 0.0; for average in averages { r += average[0]; g += average[1]; b += average[2]; r_squared += average[0]*average[0]; g_squared += average[1]*average[1]; b_squared += average[2]*average[2]; } let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0); // Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0. if variance < 0.0 { 0.0 } else { variance.sqrt() } } fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 { let mut smallest = [f64::MAX, f64::MAX, f64::MAX]; let mut largest = [f64::MIN, f64::MIN, f64::MIN]; for average in averages { for j in 0..3 { if average[j] < smallest[j] { smallest[j] = average[j]; } if average[j] > largest[j]
} } let max_r_distance = (largest[0]-smallest[0]).abs(); let max_g_distance = (largest[1]-smallest[1]).abs(); let max_b_distance = (largest[2]-smallest[2]).abs(); max_r_distance+max_g_distance+max_b_distance } fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) { let bullet_probability = 0.0; let survival_boost_factor = 1.0/(1.0-bullet_probability); loop { let r = pcg.gen::<f64>(); if r < bullet_probability { return; } else { *color = mul(survival_boost_factor, *color); } let hitpoint = self.closest_renderer_shape(&mut ray); if let Some(mut hitpoint) = hitpoint { let ingoing_direction = mul(-1.0, ray.direction); let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside { (1.0, hitpoint.material.refractive_index) } else { (hitpoint.material.refractive_index, 1.0) }; let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 { hitpoint.normal } else { mul(-1.0, hitpoint.normal) }; let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg); ray.position = hitpoint.position; ray.direction = direction; *color = elementwise_mul(*color, hitpoint.material.color); hitpoint.accumulated_color = *color; hitpoint_path.push(hitpoint); } else { return; } } } // @TODO: Implement support of triangular lightsources. fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] { let number_of_light_spheres = self.scene.light_spheres.len(); let number_of_cameras = self.scene.cameras.len(); let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32); let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize; let light_position = self.scene.light_spheres[index].get_position(&mut pcg); // @TODO: Should it not be .emission rather than .color? let light_color = self.scene.light_spheres[index].color; let direction = sub(hitpoint_in_camera_path.position, light_position); let distance = norm(direction); let direction_normalised = mul(1.0/distance, direction); if dot(hitpoint_in_camera_path.normal, direction_normalised) > 0.0 { return [0.0, 0.0, 0.0]; } let closest_hitpoint = self.closest_renderer_shape(&mut Ray::new(light_position, direction_normalised)); if let Some(closest_hitpoint) = closest_hitpoint { // @TODO Check if this is sane. if distance-closest_hitpoint.distance > 1.0e-9 { return [0.0, 0.0, 0.0]; } } let ingoing_direction = mul(-1.0, hitpoint_in_camera_path.incoming_direction); let outgoing_direction = mul(-1.0, direction_normalised); let normal = if dot(ingoing_direction, hitpoint_in_camera_path.normal) > 0.0 { hitpoint_in_camera_path.normal } else { mul(-1.0, hitpoint_in_camera_path.normal) }; let (refractive_index_1, refractive_index_2) = if hitpoint_in_camera_path.hit_from_outside { (1.0, hitpoint_in_camera_path.material.refractive_index) } else { (hitpoint_in_camera_path.material.refractive_index, 1.0) }; // @TODO: Get rid of the upper limit of the brdf. let brdf = min(brdf(ingoing_direction, outgoing_direction, normal, refractive_index_1, refractive_index_2, hitpoint_in_camera_path.material), self.maximum_brdf_value); mul(brdf/(distance*distance), elementwise_mul(hitpoint_in_camera_path.accumulated_color, light_color)) } fn store(&mut self, last_pos: usize, color: [f64; 3]) { self.renderer_output_pixels[last_pos].pixel.color = add(self.renderer_output_pixels[last_pos].pixel.color, color); self.renderer_output_pixels[last_pos].color = add(self.renderer_output_pixels[last_pos].color, color); self.renderer_output_pixels[last_pos].number_of_bin_elements += 1; if self.perform_post_process { let color = intensity_to_color(color); self.renderer_output_pixels[last_pos].pixel.bins[(color[0]/255.0*(NUMBER_OF_BINS as f64)) as usize][0] += 1; self.renderer_output_pixels[last_pos].pixel.bins[(color[1]/255.0*(NUMBER_OF_BINS as f64)) as usize][1] += 1; self.renderer_output_pixels[last_pos].pixel.bins[(color[2]/255.0*(NUMBER_OF_BINS as f64)) as usize][2] += 1; } } // Find the closest hitpoint. fn closest_renderer_shape(&self, ray: &mut Ray) -> Option<Hitpoint> { let mut min_distance = f64::MAX; let mut closest_renderer_shape_index: Option<usize> = None; let mut renderer_type = RendererType::Cylinder; for (i, cylinder) in self.scene.renderer_cylinders.iter().enumerate() { if !cylinder.active { continue; } let distance = cylinder.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); } } for (i, sphere) in self.scene.renderer_spheres.iter().enumerate() { if !sphere.active { continue; } let distance = sphere.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); renderer_type = RendererType::Sphere; } } for (i, triangle) in self.scene.renderer_triangles.iter().enumerate() { if !triangle.active { continue; } let distance = triangle.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); renderer_type = RendererType::Triangle; } } if let Some(index) = closest_renderer_shape_index { let position = add(ray.position, mul(min_distance, ray.direction)); let (normal, material) = match renderer_type { RendererType::Cylinder => { (self.scene.renderer_cylinders[index].normal(position), self.scene.renderer_cylinders[index].material()) } RendererType::Sphere => { (self.scene.renderer_spheres[index].normal(position), self.scene.renderer_spheres[index].material()) } RendererType::Triangle => { (self.scene.renderer_triangles[index].normal(position), self.scene.renderer_triangles[index].material()) } }; let hit_from_outside = dot(ray.direction, normal) < 0.0; Some(Hitpoint::new(position, ray.direction, min_distance, normal, material, hit_from_outside, false, [0.0, 0.0, 0.0])) } else { None } } }
{ largest[j] = average[j]; }
conditional_block
renderer.rs
use std::f64; use pcg_rand::Pcg32; use rand::{Rng, SeedableRng}; use crate::hitpoint::Hitpoint; use crate::math::{add, brdf, dot, elementwise_mul, intensity_to_color, min, mul, norm, random_from_brdf, sub}; use crate::ray::Ray; use crate::renderershape::RendererShape; use crate::rendereroutputpixel::RendererOutputPixel; use crate::rendererscene::RendererScene; use crate::renderertype::RendererType; use crate::NUMBER_OF_BINS; //static MODULO: [usize; 5] = [0, 1, 2, 0, 1]; pub struct Renderer { width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene, renderer_output_pixels: Vec<RendererOutputPixel> } impl Renderer { pub fn new(width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene) -> Self { let renderer_output_pixels: Vec<RendererOutputPixel> = Vec::new(); Self { width, height, spp_per_iteration, maximum_spp, maximum_error, maximum_brdf_value, perform_post_process, scene, renderer_output_pixels, } } pub fn get_renderer_output_pixels(&self) -> Vec<RendererOutputPixel> { self.renderer_output_pixels.to_vec() } pub fn render(&mut self, y: u32, x: u32) { if x == self.width/2 { println!("Rendering row {} of {}.", y, self.height); } let number_of_light_spheres = self.scene.light_spheres.len(); let random_exclusive_max_lights: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_light_spheres as u32); let number_of_cameras = self.scene.cameras.len(); self.renderer_output_pixels.push(RendererOutputPixel::new(y, x)); let last_pos = self.renderer_output_pixels.len()-1; let mut pcg: Pcg32 = Pcg32::from_entropy(); let mut colors: Vec<[f64; 3]> = Vec::new(); let mut converged = false; // Loop over this pixel until we estimate the error to be small enough. let mut iterations = 0; while !converged { iterations += 1; for _ in 0..self.spp_per_iteration { // Create a hitpoint path starting from the camera. The first hitpoint will be the hitpoint of the ray coming from the retina, through the pinhole of the camera - that is, it is a point somewhere on a renderer shape. let mut hitpoint_path_from_camera: Vec<Hitpoint> = Vec::new(); self.renderer_output_pixels[last_pos].number_of_rays += 1.0; let index = (pcg.gen_range(0, random_exclusive_max_lights)%(number_of_cameras as u32)) as usize; let mut ray = self.scene.cameras[index].create_ray(x, y, self.width, self.height, &mut pcg); let mut color = self.scene.cameras[index].color; self.create_hitpoint_path(&mut ray, &mut color, &mut hitpoint_path_from_camera, &mut pcg); let mut total_color = [0.0, 0.0, 0.0]; if hitpoint_path_from_camera.is_empty() { colors.push(total_color); continue; } let direct_light_sampling = false; if direct_light_sampling { // Connect the camera path hitpoints with points on lights. for hitpoint_in_camera_path in &hitpoint_path_from_camera { let color = self.connect_and_compute_color(hitpoint_in_camera_path, &mut pcg); total_color = add(total_color, color); } self.store(last_pos, total_color); colors.push(total_color); } else { for hitpoint in hitpoint_path_from_camera.iter().rev() { total_color = elementwise_mul(total_color, hitpoint.material.color); total_color = add(total_color, hitpoint.material.emission); } self.store(last_pos, total_color); colors.push(total_color); } } if colors.is_empty() { break; } // Estimate the error. If it's too large, create more rays for this pixel. let number_of_batches: usize = 20; let number_of_rays = colors.len(); let batch_size = number_of_rays/number_of_batches; let mut averages = self.averages(&colors, number_of_batches, batch_size); self.gamma_correct_averages(&mut averages); let use_standard_deviation = true; let error = if use_standard_deviation { self.standard_deviation(&averages) } else { self.maximum_distance(&averages) }; if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp { converged = true; } else if iterations%10 == 0 { println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error); } } } // @TODO: Fix so that it works even if colors.len()%batch_size != 0. fn averages(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> { let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches]; for i in 0..colors.len() { averages[i/batch_size] = add(averages[i/batch_size], colors[i]); } for average in &mut averages { *average = mul(1.0/(batch_size as f64), *average); } averages } fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) { for average in averages { *average = intensity_to_color(*average); } } fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 { let length = averages.len(); let mut r = 0.0; let mut g = 0.0; let mut b = 0.0; let mut r_squared = 0.0; let mut g_squared = 0.0; let mut b_squared = 0.0; for average in averages { r += average[0]; g += average[1]; b += average[2]; r_squared += average[0]*average[0]; g_squared += average[1]*average[1]; b_squared += average[2]*average[2]; } let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0); // Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0. if variance < 0.0 { 0.0 } else { variance.sqrt() } } fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 { let mut smallest = [f64::MAX, f64::MAX, f64::MAX]; let mut largest = [f64::MIN, f64::MIN, f64::MIN]; for average in averages { for j in 0..3 { if average[j] < smallest[j] { smallest[j] = average[j]; } if average[j] > largest[j] { largest[j] = average[j]; } } } let max_r_distance = (largest[0]-smallest[0]).abs(); let max_g_distance = (largest[1]-smallest[1]).abs(); let max_b_distance = (largest[2]-smallest[2]).abs(); max_r_distance+max_g_distance+max_b_distance } fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) { let bullet_probability = 0.0; let survival_boost_factor = 1.0/(1.0-bullet_probability); loop { let r = pcg.gen::<f64>(); if r < bullet_probability { return; } else { *color = mul(survival_boost_factor, *color); } let hitpoint = self.closest_renderer_shape(&mut ray); if let Some(mut hitpoint) = hitpoint { let ingoing_direction = mul(-1.0, ray.direction); let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside { (1.0, hitpoint.material.refractive_index) } else { (hitpoint.material.refractive_index, 1.0) }; let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 { hitpoint.normal } else { mul(-1.0, hitpoint.normal) }; let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg); ray.position = hitpoint.position; ray.direction = direction; *color = elementwise_mul(*color, hitpoint.material.color); hitpoint.accumulated_color = *color; hitpoint_path.push(hitpoint); } else { return; } } } // @TODO: Implement support of triangular lightsources. fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] { let number_of_light_spheres = self.scene.light_spheres.len(); let number_of_cameras = self.scene.cameras.len(); let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32); let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize; let light_position = self.scene.light_spheres[index].get_position(&mut pcg); // @TODO: Should it not be .emission rather than .color? let light_color = self.scene.light_spheres[index].color; let direction = sub(hitpoint_in_camera_path.position, light_position); let distance = norm(direction); let direction_normalised = mul(1.0/distance, direction); if dot(hitpoint_in_camera_path.normal, direction_normalised) > 0.0 { return [0.0, 0.0, 0.0]; } let closest_hitpoint = self.closest_renderer_shape(&mut Ray::new(light_position, direction_normalised)); if let Some(closest_hitpoint) = closest_hitpoint { // @TODO Check if this is sane. if distance-closest_hitpoint.distance > 1.0e-9 { return [0.0, 0.0, 0.0]; } } let ingoing_direction = mul(-1.0, hitpoint_in_camera_path.incoming_direction); let outgoing_direction = mul(-1.0, direction_normalised); let normal = if dot(ingoing_direction, hitpoint_in_camera_path.normal) > 0.0 { hitpoint_in_camera_path.normal } else { mul(-1.0, hitpoint_in_camera_path.normal) }; let (refractive_index_1, refractive_index_2) = if hitpoint_in_camera_path.hit_from_outside { (1.0, hitpoint_in_camera_path.material.refractive_index) } else { (hitpoint_in_camera_path.material.refractive_index, 1.0) }; // @TODO: Get rid of the upper limit of the brdf. let brdf = min(brdf(ingoing_direction, outgoing_direction, normal, refractive_index_1, refractive_index_2, hitpoint_in_camera_path.material), self.maximum_brdf_value); mul(brdf/(distance*distance), elementwise_mul(hitpoint_in_camera_path.accumulated_color, light_color)) } fn store(&mut self, last_pos: usize, color: [f64; 3]) { self.renderer_output_pixels[last_pos].pixel.color = add(self.renderer_output_pixels[last_pos].pixel.color, color); self.renderer_output_pixels[last_pos].color = add(self.renderer_output_pixels[last_pos].color, color); self.renderer_output_pixels[last_pos].number_of_bin_elements += 1; if self.perform_post_process { let color = intensity_to_color(color); self.renderer_output_pixels[last_pos].pixel.bins[(color[0]/255.0*(NUMBER_OF_BINS as f64)) as usize][0] += 1; self.renderer_output_pixels[last_pos].pixel.bins[(color[1]/255.0*(NUMBER_OF_BINS as f64)) as usize][1] += 1; self.renderer_output_pixels[last_pos].pixel.bins[(color[2]/255.0*(NUMBER_OF_BINS as f64)) as usize][2] += 1; } } // Find the closest hitpoint. fn closest_renderer_shape(&self, ray: &mut Ray) -> Option<Hitpoint> { let mut min_distance = f64::MAX; let mut closest_renderer_shape_index: Option<usize> = None; let mut renderer_type = RendererType::Cylinder; for (i, cylinder) in self.scene.renderer_cylinders.iter().enumerate() { if !cylinder.active { continue;
} } for (i, sphere) in self.scene.renderer_spheres.iter().enumerate() { if !sphere.active { continue; } let distance = sphere.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); renderer_type = RendererType::Sphere; } } for (i, triangle) in self.scene.renderer_triangles.iter().enumerate() { if !triangle.active { continue; } let distance = triangle.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); renderer_type = RendererType::Triangle; } } if let Some(index) = closest_renderer_shape_index { let position = add(ray.position, mul(min_distance, ray.direction)); let (normal, material) = match renderer_type { RendererType::Cylinder => { (self.scene.renderer_cylinders[index].normal(position), self.scene.renderer_cylinders[index].material()) } RendererType::Sphere => { (self.scene.renderer_spheres[index].normal(position), self.scene.renderer_spheres[index].material()) } RendererType::Triangle => { (self.scene.renderer_triangles[index].normal(position), self.scene.renderer_triangles[index].material()) } }; let hit_from_outside = dot(ray.direction, normal) < 0.0; Some(Hitpoint::new(position, ray.direction, min_distance, normal, material, hit_from_outside, false, [0.0, 0.0, 0.0])) } else { None } } }
} let distance = cylinder.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i);
random_line_split
renderer.rs
use std::f64; use pcg_rand::Pcg32; use rand::{Rng, SeedableRng}; use crate::hitpoint::Hitpoint; use crate::math::{add, brdf, dot, elementwise_mul, intensity_to_color, min, mul, norm, random_from_brdf, sub}; use crate::ray::Ray; use crate::renderershape::RendererShape; use crate::rendereroutputpixel::RendererOutputPixel; use crate::rendererscene::RendererScene; use crate::renderertype::RendererType; use crate::NUMBER_OF_BINS; //static MODULO: [usize; 5] = [0, 1, 2, 0, 1]; pub struct Renderer { width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene, renderer_output_pixels: Vec<RendererOutputPixel> } impl Renderer { pub fn new(width: u32, height: u32, spp_per_iteration: u32, maximum_spp: u32, maximum_error: f64, maximum_brdf_value: f64, perform_post_process: bool, scene: RendererScene) -> Self { let renderer_output_pixels: Vec<RendererOutputPixel> = Vec::new(); Self { width, height, spp_per_iteration, maximum_spp, maximum_error, maximum_brdf_value, perform_post_process, scene, renderer_output_pixels, } } pub fn get_renderer_output_pixels(&self) -> Vec<RendererOutputPixel> { self.renderer_output_pixels.to_vec() } pub fn render(&mut self, y: u32, x: u32) { if x == self.width/2 { println!("Rendering row {} of {}.", y, self.height); } let number_of_light_spheres = self.scene.light_spheres.len(); let random_exclusive_max_lights: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_light_spheres as u32); let number_of_cameras = self.scene.cameras.len(); self.renderer_output_pixels.push(RendererOutputPixel::new(y, x)); let last_pos = self.renderer_output_pixels.len()-1; let mut pcg: Pcg32 = Pcg32::from_entropy(); let mut colors: Vec<[f64; 3]> = Vec::new(); let mut converged = false; // Loop over this pixel until we estimate the error to be small enough. let mut iterations = 0; while !converged { iterations += 1; for _ in 0..self.spp_per_iteration { // Create a hitpoint path starting from the camera. The first hitpoint will be the hitpoint of the ray coming from the retina, through the pinhole of the camera - that is, it is a point somewhere on a renderer shape. let mut hitpoint_path_from_camera: Vec<Hitpoint> = Vec::new(); self.renderer_output_pixels[last_pos].number_of_rays += 1.0; let index = (pcg.gen_range(0, random_exclusive_max_lights)%(number_of_cameras as u32)) as usize; let mut ray = self.scene.cameras[index].create_ray(x, y, self.width, self.height, &mut pcg); let mut color = self.scene.cameras[index].color; self.create_hitpoint_path(&mut ray, &mut color, &mut hitpoint_path_from_camera, &mut pcg); let mut total_color = [0.0, 0.0, 0.0]; if hitpoint_path_from_camera.is_empty() { colors.push(total_color); continue; } let direct_light_sampling = false; if direct_light_sampling { // Connect the camera path hitpoints with points on lights. for hitpoint_in_camera_path in &hitpoint_path_from_camera { let color = self.connect_and_compute_color(hitpoint_in_camera_path, &mut pcg); total_color = add(total_color, color); } self.store(last_pos, total_color); colors.push(total_color); } else { for hitpoint in hitpoint_path_from_camera.iter().rev() { total_color = elementwise_mul(total_color, hitpoint.material.color); total_color = add(total_color, hitpoint.material.emission); } self.store(last_pos, total_color); colors.push(total_color); } } if colors.is_empty() { break; } // Estimate the error. If it's too large, create more rays for this pixel. let number_of_batches: usize = 20; let number_of_rays = colors.len(); let batch_size = number_of_rays/number_of_batches; let mut averages = self.averages(&colors, number_of_batches, batch_size); self.gamma_correct_averages(&mut averages); let use_standard_deviation = true; let error = if use_standard_deviation { self.standard_deviation(&averages) } else { self.maximum_distance(&averages) }; if error < self.maximum_error || iterations*self.spp_per_iteration >= self.maximum_spp { converged = true; } else if iterations%10 == 0 { println!("Currently: {} iterations at ({}, {}) with an error of {}. Iterating until it is less than {}.", iterations, x, y, error, self.maximum_error); } } } // @TODO: Fix so that it works even if colors.len()%batch_size != 0. fn averages(&self, colors: &[[f64; 3]], number_of_batches: usize, batch_size: usize) -> Vec<[f64; 3]> { let mut averages: Vec<[f64; 3]> = vec![[0.0, 0.0, 0.0]; number_of_batches]; for i in 0..colors.len() { averages[i/batch_size] = add(averages[i/batch_size], colors[i]); } for average in &mut averages { *average = mul(1.0/(batch_size as f64), *average); } averages } fn gamma_correct_averages(&self, averages: &mut Vec<[f64; 3]>) { for average in averages { *average = intensity_to_color(*average); } } fn standard_deviation(&self, averages: &[[f64; 3]]) -> f64 { let length = averages.len(); let mut r = 0.0; let mut g = 0.0; let mut b = 0.0; let mut r_squared = 0.0; let mut g_squared = 0.0; let mut b_squared = 0.0; for average in averages { r += average[0]; g += average[1]; b += average[2]; r_squared += average[0]*average[0]; g_squared += average[1]*average[1]; b_squared += average[2]*average[2]; } let variance = (r_squared + g_squared + b_squared - (r*r+g*g+b*b)/(length as f64))/(length as f64-1.0); // Due to rounding erros, the computed variance could in rare cases be slightly lower than 0.0. if variance < 0.0 { 0.0 } else { variance.sqrt() } } fn maximum_distance(&self, averages: &[[f64; 3]]) -> f64 { let mut smallest = [f64::MAX, f64::MAX, f64::MAX]; let mut largest = [f64::MIN, f64::MIN, f64::MIN]; for average in averages { for j in 0..3 { if average[j] < smallest[j] { smallest[j] = average[j]; } if average[j] > largest[j] { largest[j] = average[j]; } } } let max_r_distance = (largest[0]-smallest[0]).abs(); let max_g_distance = (largest[1]-smallest[1]).abs(); let max_b_distance = (largest[2]-smallest[2]).abs(); max_r_distance+max_g_distance+max_b_distance } fn create_hitpoint_path(&mut self, mut ray: &mut Ray, color: &mut [f64; 3], hitpoint_path: &mut Vec<Hitpoint>, pcg: &mut Pcg32) { let bullet_probability = 0.0; let survival_boost_factor = 1.0/(1.0-bullet_probability); loop { let r = pcg.gen::<f64>(); if r < bullet_probability { return; } else { *color = mul(survival_boost_factor, *color); } let hitpoint = self.closest_renderer_shape(&mut ray); if let Some(mut hitpoint) = hitpoint { let ingoing_direction = mul(-1.0, ray.direction); let (refractive_index_1, refractive_index_2) = if hitpoint.hit_from_outside { (1.0, hitpoint.material.refractive_index) } else { (hitpoint.material.refractive_index, 1.0) }; let normal = if dot(ingoing_direction, hitpoint.normal) > 0.0 { hitpoint.normal } else { mul(-1.0, hitpoint.normal) }; let (direction, _) = random_from_brdf(ingoing_direction, normal, hitpoint.material, refractive_index_1, refractive_index_2, pcg); ray.position = hitpoint.position; ray.direction = direction; *color = elementwise_mul(*color, hitpoint.material.color); hitpoint.accumulated_color = *color; hitpoint_path.push(hitpoint); } else { return; } } } // @TODO: Implement support of triangular lightsources. fn connect_and_compute_color(&self, hitpoint_in_camera_path: &Hitpoint, mut pcg: &mut Pcg32) -> [f64; 3] { let number_of_light_spheres = self.scene.light_spheres.len(); let number_of_cameras = self.scene.cameras.len(); let random_exclusive_max_cameras: u32 = <u32>::max_value() - <u32>::max_value()%(number_of_cameras as u32); let index = (pcg.gen_range(0, random_exclusive_max_cameras)%(number_of_light_spheres as u32)) as usize; let light_position = self.scene.light_spheres[index].get_position(&mut pcg); // @TODO: Should it not be .emission rather than .color? let light_color = self.scene.light_spheres[index].color; let direction = sub(hitpoint_in_camera_path.position, light_position); let distance = norm(direction); let direction_normalised = mul(1.0/distance, direction); if dot(hitpoint_in_camera_path.normal, direction_normalised) > 0.0 { return [0.0, 0.0, 0.0]; } let closest_hitpoint = self.closest_renderer_shape(&mut Ray::new(light_position, direction_normalised)); if let Some(closest_hitpoint) = closest_hitpoint { // @TODO Check if this is sane. if distance-closest_hitpoint.distance > 1.0e-9 { return [0.0, 0.0, 0.0]; } } let ingoing_direction = mul(-1.0, hitpoint_in_camera_path.incoming_direction); let outgoing_direction = mul(-1.0, direction_normalised); let normal = if dot(ingoing_direction, hitpoint_in_camera_path.normal) > 0.0 { hitpoint_in_camera_path.normal } else { mul(-1.0, hitpoint_in_camera_path.normal) }; let (refractive_index_1, refractive_index_2) = if hitpoint_in_camera_path.hit_from_outside { (1.0, hitpoint_in_camera_path.material.refractive_index) } else { (hitpoint_in_camera_path.material.refractive_index, 1.0) }; // @TODO: Get rid of the upper limit of the brdf. let brdf = min(brdf(ingoing_direction, outgoing_direction, normal, refractive_index_1, refractive_index_2, hitpoint_in_camera_path.material), self.maximum_brdf_value); mul(brdf/(distance*distance), elementwise_mul(hitpoint_in_camera_path.accumulated_color, light_color)) } fn store(&mut self, last_pos: usize, color: [f64; 3])
// Find the closest hitpoint. fn closest_renderer_shape(&self, ray: &mut Ray) -> Option<Hitpoint> { let mut min_distance = f64::MAX; let mut closest_renderer_shape_index: Option<usize> = None; let mut renderer_type = RendererType::Cylinder; for (i, cylinder) in self.scene.renderer_cylinders.iter().enumerate() { if !cylinder.active { continue; } let distance = cylinder.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); } } for (i, sphere) in self.scene.renderer_spheres.iter().enumerate() { if !sphere.active { continue; } let distance = sphere.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); renderer_type = RendererType::Sphere; } } for (i, triangle) in self.scene.renderer_triangles.iter().enumerate() { if !triangle.active { continue; } let distance = triangle.distance(&ray); if distance < min_distance { min_distance = distance; closest_renderer_shape_index = Some(i); renderer_type = RendererType::Triangle; } } if let Some(index) = closest_renderer_shape_index { let position = add(ray.position, mul(min_distance, ray.direction)); let (normal, material) = match renderer_type { RendererType::Cylinder => { (self.scene.renderer_cylinders[index].normal(position), self.scene.renderer_cylinders[index].material()) } RendererType::Sphere => { (self.scene.renderer_spheres[index].normal(position), self.scene.renderer_spheres[index].material()) } RendererType::Triangle => { (self.scene.renderer_triangles[index].normal(position), self.scene.renderer_triangles[index].material()) } }; let hit_from_outside = dot(ray.direction, normal) < 0.0; Some(Hitpoint::new(position, ray.direction, min_distance, normal, material, hit_from_outside, false, [0.0, 0.0, 0.0])) } else { None } } }
{ self.renderer_output_pixels[last_pos].pixel.color = add(self.renderer_output_pixels[last_pos].pixel.color, color); self.renderer_output_pixels[last_pos].color = add(self.renderer_output_pixels[last_pos].color, color); self.renderer_output_pixels[last_pos].number_of_bin_elements += 1; if self.perform_post_process { let color = intensity_to_color(color); self.renderer_output_pixels[last_pos].pixel.bins[(color[0]/255.0*(NUMBER_OF_BINS as f64)) as usize][0] += 1; self.renderer_output_pixels[last_pos].pixel.bins[(color[1]/255.0*(NUMBER_OF_BINS as f64)) as usize][1] += 1; self.renderer_output_pixels[last_pos].pixel.bins[(color[2]/255.0*(NUMBER_OF_BINS as f64)) as usize][2] += 1; } }
identifier_body
de.rs
use serde::{ de::{DeserializeSeed, MapAccess, SeqAccess, Visitor}, forward_to_deserialize_any, }; use serde::Deserialize; use std::{cell::Cell, fmt}; #[derive(Debug, PartialEq)] pub enum DeserializeError { UnexpectedEOF, WrongCharacter(u8), End, InfoHashMissing, TooDeep, NoFile, EmptyFile, UnalignedPieces, Message(String), } type Result<T> = std::result::Result<T, DeserializeError>; impl serde::de::Error for DeserializeError { fn custom<T: fmt::Display>(msg: T) -> Self { DeserializeError::Message(msg.to_string()) } } impl fmt::Display for DeserializeError { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str(&format!("{:?}", self)) //formatter.write_str(std::error::Error::description(self)) } } impl std::error::Error for DeserializeError { fn
(&self) -> &str { "aa" //self.msg.as_str() } } pub fn from_bytes<'de, T>(s: &'de [u8]) -> Result<T> where T: Deserialize<'de>, { let mut de: Deserializer = Deserializer::new(s); T::deserialize(&mut de) } pub fn from_bytes_with_hash<'de, T>(s: &'de [u8]) -> Result<(T, Vec<u8>)> where T: Deserialize<'de>, { let mut de: Deserializer = Deserializer::new(s); let res = T::deserialize(&mut de)?; let info_hash = if !de.start_info.is_null() && de.end_info > de.start_info { let len = de.end_info as usize - de.start_info as usize; let slice = unsafe { std::slice::from_raw_parts(de.start_info, len) }; sha1::Sha1::from(&slice[..]).digest().bytes().to_vec() } else { //eprintln!("START={:?} END={:?}", de.start_info, de.end_info); return Err(DeserializeError::InfoHashMissing); }; Ok((res, info_hash)) } use crate::metadata::{InfoFile, MetaTorrent, Torrent}; pub fn read_meta(s: &[u8]) -> Result<Torrent> { let (meta, info_hash): (MetaTorrent, Vec<u8>) = from_bytes_with_hash(s)?; if meta.info.pieces.len() % 20 != 0 { return Err(DeserializeError::UnalignedPieces); } match &meta.info.files { InfoFile::Multiple { files, .. } => { if files.is_empty() { return Err(DeserializeError::NoFile); } } InfoFile::Single { length, .. } => { if *length == 0 { return Err(DeserializeError::EmptyFile); } } } Ok(Torrent { meta, info_hash: info_hash.into(), }) } // 4b3ea6a5b1e62537dceb67230248ff092a723e4d // 4b3ea6a5b1e62537dceb67230248ff092a723e4d #[doc(hidden)] pub struct Deserializer<'de> { input: &'de [u8], start_info: *const u8, end_info: *const u8, info_depth: i64, // Fix v2_deep_recursion.torrent depth: Cell<u16>, } #[doc(hidbn)] impl<'de> Deserializer<'de> { fn new(input: &'de [u8]) -> Self { Deserializer { input, start_info: std::ptr::null(), end_info: std::ptr::null(), info_depth: 0, depth: Cell::new(0), } } fn peek(&self) -> Option<u8> { self.input.get(0).copied() } fn next(&mut self) -> Result<u8> { if let Some(c) = self.peek() { let _ = self.consume(); return Ok(c); } Err(DeserializeError::UnexpectedEOF) } fn consume(&mut self) -> Result<()> { self.input = &self.input.get(1..).ok_or(DeserializeError::UnexpectedEOF)?; Ok(()) } fn skip(&mut self, n: i64) -> Result<()> { self.input = &self .input .get(n as usize..) .ok_or(DeserializeError::UnexpectedEOF)?; Ok(()) } fn read_integer(&mut self, stop: u8) -> Result<i64> { let mut n: i64 = 0; loop { match self.next()? { c @ b'0'..=b'9' => n = (n * 10) + (c - b'0') as i64, c if c == stop => break, c => return Err(DeserializeError::WrongCharacter(c)), } } Ok(n) } fn read_number(&mut self) -> Result<i64> { self.consume()?; // 'i' let negative = match self.peek() { Some(b'-') => { self.consume()?; true } _ => false, }; let n = self.read_integer(b'e')?; Ok(if negative { -n } else { n }) } fn read_string(&mut self) -> Result<&'de [u8]> { let len = self.read_integer(b':')?; let s = self .input .get(..len as usize) .ok_or(DeserializeError::UnexpectedEOF)?; self.skip(len)?; if s == b"info" { //println!("INFO FOUND: {:?}", String::from_utf8(s.to_vec())); self.start_info = self.input.as_ptr(); self.info_depth = 1; } //println!("STRING={:?}", String::from_utf8((&s[0..std::cmp::min(100, s.len())]).to_vec())); Ok(s) } } #[doc(hidden)] impl<'a, 'de> serde::de::Deserializer<'de> for &'a mut Deserializer<'de> { type Error = DeserializeError; fn deserialize_any<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { // println!("NEXT: {:?}", self.peek()); match self.peek().ok_or(DeserializeError::UnexpectedEOF)? { b'i' => { // println!("FOUND NUMBER", ); visitor.visit_i64(self.read_number()?) } b'l' => { self.consume()?; // println!("FOUND LIST {:?}", &self.input[..10]); visitor.visit_seq(BencAccess::new(self)) } b'd' => { let depth = self.depth.get(); if depth > 100 { return Err(DeserializeError::TooDeep); } self.depth.set(depth + 1); // println!("FOUND DICT {}", self.depth.get()); self.consume()?; visitor.visit_map(BencAccess::new(self)) } _n @ b'0'..=b'9' => { // println!("FOUND STRING", ); visitor.visit_borrowed_bytes(self.read_string()?) } c => Err(DeserializeError::WrongCharacter(c)), } } fn deserialize_option<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_some(self) } forward_to_deserialize_any! { bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string unit unit_struct seq tuple tuple_struct map struct identifier newtype_struct ignored_any enum bytes byte_buf } } struct BencAccess<'a, 'de> { de: &'a mut Deserializer<'de>, } impl<'a, 'de> BencAccess<'a, 'de> { fn new(de: &'a mut Deserializer<'de>) -> BencAccess<'a, 'de> { if de.info_depth >= 1 { de.info_depth += 1; let _s = de.input; //println!("DEPTH[NEW]={:?} {:?}", de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec())); } BencAccess { de } } } impl<'a, 'de> MapAccess<'de> for BencAccess<'a, 'de> { type Error = DeserializeError; fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>> where K: DeserializeSeed<'de>, { if self.de.peek() == Some(b'e') { let _ = self.de.consume(); self.de.info_depth -= 1; if self.de.info_depth == 1 { //println!("FOUND END !"); self.de.end_info = self.de.input.as_ptr(); } let _s = self.de.input; //println!("DEPTH[END_DICT]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec())); return Ok(None); } seed.deserialize(&mut *self.de).map(Some) } fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value> where V: DeserializeSeed<'de>, { seed.deserialize(&mut *self.de) } } impl<'a, 'de> SeqAccess<'de> for BencAccess<'a, 'de> { type Error = DeserializeError; fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>> where T: DeserializeSeed<'de>, { // println!("LAAA {:?}", &self.de.input[..5]); if self.de.peek() == Some(b'e') { let _ = self.de.consume(); if self.de.info_depth >= 1 { self.de.info_depth -= 1; } let _s = self.de.input; //println!("DEPTH[END_LIST]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec())); // println!("DEPTH={}", self.de.info_depth); return Ok(None); } seed.deserialize(&mut *self.de).map(Some) } } #[allow(non_snake_case)] #[cfg(test)] mod tests { use super::{from_bytes, DeserializeError, Result}; use serde::Deserialize; #[test] fn test_dict() { #[derive(Deserialize, PartialEq, Debug)] struct Dict<'b> { a: i64, b: &'b str, c: &'b str, X: &'b str, } let bc: Dict = from_bytes(b"d1:ai12453e1:b3:aaa1:c3:bbb1:X10:0123456789e").unwrap(); assert_eq!( bc, Dict { a: 12453, b: "aaa", c: "bbb", X: "0123456789", } ); } #[test] fn test_key_no_value() { #[derive(Deserialize, PartialEq, Debug)] struct Dict<'b> { a: i64, b: &'b str, } let res: Result<Dict> = from_bytes(b"d1:ai1e1:be"); println!("{:?}", res); assert_eq!(res, Err(DeserializeError::WrongCharacter(101))); } #[test] fn test_key_not_string() { #[derive(Deserialize, Debug)] struct Dict<'b> { a: i64, b: &'b str, } let res: Result<Dict> = from_bytes(b"di5e1:ae"); println!("{:?}", res); assert!(res.is_err()); } // TODO: Add more tests from // https://github.com/arvidn/libtorrent/blob/RC_1_2/test/test_bdecode.cpp }
description
identifier_name
de.rs
use serde::{ de::{DeserializeSeed, MapAccess, SeqAccess, Visitor}, forward_to_deserialize_any, }; use serde::Deserialize; use std::{cell::Cell, fmt}; #[derive(Debug, PartialEq)] pub enum DeserializeError { UnexpectedEOF, WrongCharacter(u8), End, InfoHashMissing, TooDeep, NoFile, EmptyFile, UnalignedPieces, Message(String), } type Result<T> = std::result::Result<T, DeserializeError>; impl serde::de::Error for DeserializeError { fn custom<T: fmt::Display>(msg: T) -> Self { DeserializeError::Message(msg.to_string()) } } impl fmt::Display for DeserializeError { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str(&format!("{:?}", self)) //formatter.write_str(std::error::Error::description(self)) } } impl std::error::Error for DeserializeError { fn description(&self) -> &str { "aa" //self.msg.as_str() } } pub fn from_bytes<'de, T>(s: &'de [u8]) -> Result<T> where T: Deserialize<'de>, { let mut de: Deserializer = Deserializer::new(s); T::deserialize(&mut de) } pub fn from_bytes_with_hash<'de, T>(s: &'de [u8]) -> Result<(T, Vec<u8>)> where T: Deserialize<'de>, { let mut de: Deserializer = Deserializer::new(s); let res = T::deserialize(&mut de)?; let info_hash = if !de.start_info.is_null() && de.end_info > de.start_info { let len = de.end_info as usize - de.start_info as usize; let slice = unsafe { std::slice::from_raw_parts(de.start_info, len) }; sha1::Sha1::from(&slice[..]).digest().bytes().to_vec() } else { //eprintln!("START={:?} END={:?}", de.start_info, de.end_info); return Err(DeserializeError::InfoHashMissing); }; Ok((res, info_hash)) } use crate::metadata::{InfoFile, MetaTorrent, Torrent}; pub fn read_meta(s: &[u8]) -> Result<Torrent> { let (meta, info_hash): (MetaTorrent, Vec<u8>) = from_bytes_with_hash(s)?; if meta.info.pieces.len() % 20 != 0 { return Err(DeserializeError::UnalignedPieces); } match &meta.info.files { InfoFile::Multiple { files, .. } => { if files.is_empty() { return Err(DeserializeError::NoFile); } } InfoFile::Single { length, .. } => { if *length == 0 { return Err(DeserializeError::EmptyFile); } } } Ok(Torrent { meta, info_hash: info_hash.into(), }) } // 4b3ea6a5b1e62537dceb67230248ff092a723e4d // 4b3ea6a5b1e62537dceb67230248ff092a723e4d #[doc(hidden)] pub struct Deserializer<'de> { input: &'de [u8], start_info: *const u8, end_info: *const u8, info_depth: i64, // Fix v2_deep_recursion.torrent depth: Cell<u16>, } #[doc(hidbn)] impl<'de> Deserializer<'de> { fn new(input: &'de [u8]) -> Self { Deserializer { input, start_info: std::ptr::null(), end_info: std::ptr::null(), info_depth: 0, depth: Cell::new(0), } } fn peek(&self) -> Option<u8> { self.input.get(0).copied() } fn next(&mut self) -> Result<u8> { if let Some(c) = self.peek() { let _ = self.consume(); return Ok(c); } Err(DeserializeError::UnexpectedEOF) } fn consume(&mut self) -> Result<()> { self.input = &self.input.get(1..).ok_or(DeserializeError::UnexpectedEOF)?; Ok(()) } fn skip(&mut self, n: i64) -> Result<()> { self.input = &self .input .get(n as usize..) .ok_or(DeserializeError::UnexpectedEOF)?; Ok(()) } fn read_integer(&mut self, stop: u8) -> Result<i64> { let mut n: i64 = 0; loop { match self.next()? { c @ b'0'..=b'9' => n = (n * 10) + (c - b'0') as i64, c if c == stop => break, c => return Err(DeserializeError::WrongCharacter(c)), } } Ok(n) } fn read_number(&mut self) -> Result<i64> { self.consume()?; // 'i' let negative = match self.peek() { Some(b'-') => { self.consume()?; true } _ => false, }; let n = self.read_integer(b'e')?; Ok(if negative { -n } else { n }) } fn read_string(&mut self) -> Result<&'de [u8]> { let len = self.read_integer(b':')?; let s = self .input .get(..len as usize) .ok_or(DeserializeError::UnexpectedEOF)?; self.skip(len)?; if s == b"info" { //println!("INFO FOUND: {:?}", String::from_utf8(s.to_vec())); self.start_info = self.input.as_ptr(); self.info_depth = 1; } //println!("STRING={:?}", String::from_utf8((&s[0..std::cmp::min(100, s.len())]).to_vec())); Ok(s) } } #[doc(hidden)] impl<'a, 'de> serde::de::Deserializer<'de> for &'a mut Deserializer<'de> { type Error = DeserializeError; fn deserialize_any<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { // println!("NEXT: {:?}", self.peek()); match self.peek().ok_or(DeserializeError::UnexpectedEOF)? { b'i' => { // println!("FOUND NUMBER", ); visitor.visit_i64(self.read_number()?) } b'l' => { self.consume()?; // println!("FOUND LIST {:?}", &self.input[..10]); visitor.visit_seq(BencAccess::new(self)) } b'd' => { let depth = self.depth.get(); if depth > 100 { return Err(DeserializeError::TooDeep); } self.depth.set(depth + 1); // println!("FOUND DICT {}", self.depth.get()); self.consume()?; visitor.visit_map(BencAccess::new(self)) } _n @ b'0'..=b'9' => { // println!("FOUND STRING", ); visitor.visit_borrowed_bytes(self.read_string()?) } c => Err(DeserializeError::WrongCharacter(c)), } } fn deserialize_option<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_some(self) } forward_to_deserialize_any! { bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string unit unit_struct seq tuple tuple_struct map struct identifier newtype_struct ignored_any enum bytes byte_buf } } struct BencAccess<'a, 'de> { de: &'a mut Deserializer<'de>, } impl<'a, 'de> BencAccess<'a, 'de> { fn new(de: &'a mut Deserializer<'de>) -> BencAccess<'a, 'de> { if de.info_depth >= 1 { de.info_depth += 1; let _s = de.input; //println!("DEPTH[NEW]={:?} {:?}", de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec())); } BencAccess { de } } } impl<'a, 'de> MapAccess<'de> for BencAccess<'a, 'de> { type Error = DeserializeError; fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>> where K: DeserializeSeed<'de>, { if self.de.peek() == Some(b'e') { let _ = self.de.consume(); self.de.info_depth -= 1; if self.de.info_depth == 1 { //println!("FOUND END !"); self.de.end_info = self.de.input.as_ptr(); } let _s = self.de.input; //println!("DEPTH[END_DICT]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec())); return Ok(None); } seed.deserialize(&mut *self.de).map(Some) } fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value> where V: DeserializeSeed<'de>, { seed.deserialize(&mut *self.de) } } impl<'a, 'de> SeqAccess<'de> for BencAccess<'a, 'de> { type Error = DeserializeError; fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>> where T: DeserializeSeed<'de>, { // println!("LAAA {:?}", &self.de.input[..5]); if self.de.peek() == Some(b'e') { let _ = self.de.consume(); if self.de.info_depth >= 1 { self.de.info_depth -= 1; } let _s = self.de.input; //println!("DEPTH[END_LIST]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec())); // println!("DEPTH={}", self.de.info_depth); return Ok(None); } seed.deserialize(&mut *self.de).map(Some) } } #[allow(non_snake_case)] #[cfg(test)] mod tests { use super::{from_bytes, DeserializeError, Result}; use serde::Deserialize; #[test] fn test_dict()
#[test] fn test_key_no_value() { #[derive(Deserialize, PartialEq, Debug)] struct Dict<'b> { a: i64, b: &'b str, } let res: Result<Dict> = from_bytes(b"d1:ai1e1:be"); println!("{:?}", res); assert_eq!(res, Err(DeserializeError::WrongCharacter(101))); } #[test] fn test_key_not_string() { #[derive(Deserialize, Debug)] struct Dict<'b> { a: i64, b: &'b str, } let res: Result<Dict> = from_bytes(b"di5e1:ae"); println!("{:?}", res); assert!(res.is_err()); } // TODO: Add more tests from // https://github.com/arvidn/libtorrent/blob/RC_1_2/test/test_bdecode.cpp }
{ #[derive(Deserialize, PartialEq, Debug)] struct Dict<'b> { a: i64, b: &'b str, c: &'b str, X: &'b str, } let bc: Dict = from_bytes(b"d1:ai12453e1:b3:aaa1:c3:bbb1:X10:0123456789e").unwrap(); assert_eq!( bc, Dict { a: 12453, b: "aaa", c: "bbb", X: "0123456789", } ); }
identifier_body
de.rs
use serde::{ de::{DeserializeSeed, MapAccess, SeqAccess, Visitor}, forward_to_deserialize_any, }; use serde::Deserialize; use std::{cell::Cell, fmt}; #[derive(Debug, PartialEq)] pub enum DeserializeError { UnexpectedEOF, WrongCharacter(u8), End, InfoHashMissing, TooDeep, NoFile, EmptyFile, UnalignedPieces, Message(String), } type Result<T> = std::result::Result<T, DeserializeError>; impl serde::de::Error for DeserializeError { fn custom<T: fmt::Display>(msg: T) -> Self { DeserializeError::Message(msg.to_string()) } } impl fmt::Display for DeserializeError { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str(&format!("{:?}", self)) //formatter.write_str(std::error::Error::description(self)) } } impl std::error::Error for DeserializeError { fn description(&self) -> &str { "aa" //self.msg.as_str() } } pub fn from_bytes<'de, T>(s: &'de [u8]) -> Result<T> where T: Deserialize<'de>, { let mut de: Deserializer = Deserializer::new(s); T::deserialize(&mut de) } pub fn from_bytes_with_hash<'de, T>(s: &'de [u8]) -> Result<(T, Vec<u8>)> where T: Deserialize<'de>, { let mut de: Deserializer = Deserializer::new(s); let res = T::deserialize(&mut de)?; let info_hash = if !de.start_info.is_null() && de.end_info > de.start_info { let len = de.end_info as usize - de.start_info as usize; let slice = unsafe { std::slice::from_raw_parts(de.start_info, len) }; sha1::Sha1::from(&slice[..]).digest().bytes().to_vec() } else { //eprintln!("START={:?} END={:?}", de.start_info, de.end_info); return Err(DeserializeError::InfoHashMissing); }; Ok((res, info_hash)) } use crate::metadata::{InfoFile, MetaTorrent, Torrent}; pub fn read_meta(s: &[u8]) -> Result<Torrent> { let (meta, info_hash): (MetaTorrent, Vec<u8>) = from_bytes_with_hash(s)?; if meta.info.pieces.len() % 20 != 0 { return Err(DeserializeError::UnalignedPieces); } match &meta.info.files { InfoFile::Multiple { files, .. } => { if files.is_empty() { return Err(DeserializeError::NoFile); } } InfoFile::Single { length, .. } => { if *length == 0 { return Err(DeserializeError::EmptyFile); } } } Ok(Torrent { meta, info_hash: info_hash.into(), }) } // 4b3ea6a5b1e62537dceb67230248ff092a723e4d // 4b3ea6a5b1e62537dceb67230248ff092a723e4d #[doc(hidden)] pub struct Deserializer<'de> { input: &'de [u8], start_info: *const u8, end_info: *const u8, info_depth: i64, // Fix v2_deep_recursion.torrent depth: Cell<u16>, } #[doc(hidbn)] impl<'de> Deserializer<'de> { fn new(input: &'de [u8]) -> Self { Deserializer { input, start_info: std::ptr::null(), end_info: std::ptr::null(), info_depth: 0, depth: Cell::new(0), } } fn peek(&self) -> Option<u8> { self.input.get(0).copied() } fn next(&mut self) -> Result<u8> { if let Some(c) = self.peek() { let _ = self.consume(); return Ok(c); } Err(DeserializeError::UnexpectedEOF) } fn consume(&mut self) -> Result<()> { self.input = &self.input.get(1..).ok_or(DeserializeError::UnexpectedEOF)?; Ok(()) } fn skip(&mut self, n: i64) -> Result<()> { self.input = &self .input .get(n as usize..) .ok_or(DeserializeError::UnexpectedEOF)?; Ok(()) } fn read_integer(&mut self, stop: u8) -> Result<i64> { let mut n: i64 = 0; loop { match self.next()? { c @ b'0'..=b'9' => n = (n * 10) + (c - b'0') as i64, c if c == stop => break, c => return Err(DeserializeError::WrongCharacter(c)), } } Ok(n) } fn read_number(&mut self) -> Result<i64> { self.consume()?; // 'i' let negative = match self.peek() { Some(b'-') => { self.consume()?; true } _ => false, }; let n = self.read_integer(b'e')?; Ok(if negative { -n } else { n }) } fn read_string(&mut self) -> Result<&'de [u8]> { let len = self.read_integer(b':')?; let s = self .input .get(..len as usize) .ok_or(DeserializeError::UnexpectedEOF)?; self.skip(len)?; if s == b"info" { //println!("INFO FOUND: {:?}", String::from_utf8(s.to_vec())); self.start_info = self.input.as_ptr(); self.info_depth = 1; } //println!("STRING={:?}", String::from_utf8((&s[0..std::cmp::min(100, s.len())]).to_vec())); Ok(s) } }
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { // println!("NEXT: {:?}", self.peek()); match self.peek().ok_or(DeserializeError::UnexpectedEOF)? { b'i' => { // println!("FOUND NUMBER", ); visitor.visit_i64(self.read_number()?) } b'l' => { self.consume()?; // println!("FOUND LIST {:?}", &self.input[..10]); visitor.visit_seq(BencAccess::new(self)) } b'd' => { let depth = self.depth.get(); if depth > 100 { return Err(DeserializeError::TooDeep); } self.depth.set(depth + 1); // println!("FOUND DICT {}", self.depth.get()); self.consume()?; visitor.visit_map(BencAccess::new(self)) } _n @ b'0'..=b'9' => { // println!("FOUND STRING", ); visitor.visit_borrowed_bytes(self.read_string()?) } c => Err(DeserializeError::WrongCharacter(c)), } } fn deserialize_option<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_some(self) } forward_to_deserialize_any! { bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string unit unit_struct seq tuple tuple_struct map struct identifier newtype_struct ignored_any enum bytes byte_buf } } struct BencAccess<'a, 'de> { de: &'a mut Deserializer<'de>, } impl<'a, 'de> BencAccess<'a, 'de> { fn new(de: &'a mut Deserializer<'de>) -> BencAccess<'a, 'de> { if de.info_depth >= 1 { de.info_depth += 1; let _s = de.input; //println!("DEPTH[NEW]={:?} {:?}", de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec())); } BencAccess { de } } } impl<'a, 'de> MapAccess<'de> for BencAccess<'a, 'de> { type Error = DeserializeError; fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>> where K: DeserializeSeed<'de>, { if self.de.peek() == Some(b'e') { let _ = self.de.consume(); self.de.info_depth -= 1; if self.de.info_depth == 1 { //println!("FOUND END !"); self.de.end_info = self.de.input.as_ptr(); } let _s = self.de.input; //println!("DEPTH[END_DICT]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec())); return Ok(None); } seed.deserialize(&mut *self.de).map(Some) } fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value> where V: DeserializeSeed<'de>, { seed.deserialize(&mut *self.de) } } impl<'a, 'de> SeqAccess<'de> for BencAccess<'a, 'de> { type Error = DeserializeError; fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>> where T: DeserializeSeed<'de>, { // println!("LAAA {:?}", &self.de.input[..5]); if self.de.peek() == Some(b'e') { let _ = self.de.consume(); if self.de.info_depth >= 1 { self.de.info_depth -= 1; } let _s = self.de.input; //println!("DEPTH[END_LIST]={:?} {:?}", self.de.info_depth, String::from_utf8((&s[0..std::cmp::min(50, s.len())]).to_vec())); // println!("DEPTH={}", self.de.info_depth); return Ok(None); } seed.deserialize(&mut *self.de).map(Some) } } #[allow(non_snake_case)] #[cfg(test)] mod tests { use super::{from_bytes, DeserializeError, Result}; use serde::Deserialize; #[test] fn test_dict() { #[derive(Deserialize, PartialEq, Debug)] struct Dict<'b> { a: i64, b: &'b str, c: &'b str, X: &'b str, } let bc: Dict = from_bytes(b"d1:ai12453e1:b3:aaa1:c3:bbb1:X10:0123456789e").unwrap(); assert_eq!( bc, Dict { a: 12453, b: "aaa", c: "bbb", X: "0123456789", } ); } #[test] fn test_key_no_value() { #[derive(Deserialize, PartialEq, Debug)] struct Dict<'b> { a: i64, b: &'b str, } let res: Result<Dict> = from_bytes(b"d1:ai1e1:be"); println!("{:?}", res); assert_eq!(res, Err(DeserializeError::WrongCharacter(101))); } #[test] fn test_key_not_string() { #[derive(Deserialize, Debug)] struct Dict<'b> { a: i64, b: &'b str, } let res: Result<Dict> = from_bytes(b"di5e1:ae"); println!("{:?}", res); assert!(res.is_err()); } // TODO: Add more tests from // https://github.com/arvidn/libtorrent/blob/RC_1_2/test/test_bdecode.cpp }
#[doc(hidden)] impl<'a, 'de> serde::de::Deserializer<'de> for &'a mut Deserializer<'de> { type Error = DeserializeError;
random_line_split
main.rs
// Copyright (c) 2016 The vulkano developers // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, // at your option. All files in the project carrying such // notice may not be copied, modified, or distributed except // according to those terms. use cgmath::{Matrix3, Matrix4, Point3, Rad, Vector3}; use examples::{Normal, Position, INDICES, NORMALS, POSITIONS}; use std::{sync::Arc, time::Instant}; use vulkano::{ buffer::{ allocator::{SubbufferAllocator, SubbufferAllocatorCreateInfo}, Buffer, BufferCreateInfo, BufferUsage, }, command_buffer::{ allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage, RenderPassBeginInfo, }, descriptor_set::{ allocator::StandardDescriptorSetAllocator, PersistentDescriptorSet, WriteDescriptorSet, }, device::{ physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, DeviceOwned, QueueCreateInfo, QueueFlags, }, format::Format, image::{view::ImageView, Image, ImageCreateInfo, ImageType, ImageUsage}, instance::{Instance, InstanceCreateFlags, InstanceCreateInfo}, memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator}, pipeline::{ graphics::{ color_blend::ColorBlendState, depth_stencil::DepthStencilState, input_assembly::InputAssemblyState, multisample::MultisampleState, rasterization::RasterizationState, vertex_input::{Vertex, VertexDefinition}, viewport::{Viewport, ViewportState}, GraphicsPipelineCreateInfo, }, layout::PipelineDescriptorSetLayoutCreateInfo, GraphicsPipeline, Pipeline, PipelineBindPoint, PipelineLayout, PipelineShaderStageCreateInfo, }, render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass}, shader::EntryPoint, swapchain::{ acquire_next_image, Surface, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo, }, sync::{self, GpuFuture}, Validated, VulkanError, VulkanLibrary, }; use winit::{ event::{Event, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::WindowBuilder, }; fn main()
/// This function is called once during initialization, then again whenever the window is resized. fn window_size_dependent_setup( memory_allocator: &StandardMemoryAllocator, vs: EntryPoint, fs: EntryPoint, images: &[Arc<Image>], render_pass: Arc<RenderPass>, ) -> (Arc<GraphicsPipeline>, Vec<Arc<Framebuffer>>) { let extent = images[0].extent(); let depth_buffer = ImageView::new_default( Image::new( memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::D16_UNORM, extent: images[0].extent(), usage: ImageUsage::DEPTH_STENCIL_ATTACHMENT | ImageUsage::TRANSIENT_ATTACHMENT, ..Default::default() }, AllocationCreateInfo::default(), ) .unwrap(), ) .unwrap(); let framebuffers = images .iter() .map(|image| { let view = ImageView::new_default(image.clone()).unwrap(); Framebuffer::new( render_pass.clone(), FramebufferCreateInfo { attachments: vec![view, depth_buffer.clone()], ..Default::default() }, ) .unwrap() }) .collect::<Vec<_>>(); // In the triangle example we use a dynamic viewport, as its a simple example. However in the // teapot example, we recreate the pipelines with a hardcoded viewport instead. This allows the // driver to optimize things, at the cost of slower window resizes. // https://computergraphics.stackexchange.com/questions/5742/vulkan-best-way-of-updating-pipeline-viewport let pipeline = { let device = memory_allocator.device(); let vertex_input_state = [Position::per_vertex(), Normal::per_vertex()] .definition(&vs.info().input_interface) .unwrap(); let stages = [ PipelineShaderStageCreateInfo::new(vs), PipelineShaderStageCreateInfo::new(fs), ]; let layout = PipelineLayout::new( device.clone(), PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages) .into_pipeline_layout_create_info(device.clone()) .unwrap(), ) .unwrap(); let subpass = Subpass::from(render_pass, 0).unwrap(); GraphicsPipeline::new( device.clone(), None, GraphicsPipelineCreateInfo { stages: stages.into_iter().collect(), vertex_input_state: Some(vertex_input_state), input_assembly_state: Some(InputAssemblyState::default()), viewport_state: Some(ViewportState::viewport_fixed_scissor_irrelevant([ Viewport { offset: [0.0, 0.0], extent: [extent[0] as f32, extent[1] as f32], depth_range: 0.0..=1.0, }, ])), rasterization_state: Some(RasterizationState::default()), depth_stencil_state: Some(DepthStencilState::simple_depth_test()), multisample_state: Some(MultisampleState::default()), color_blend_state: Some(ColorBlendState::new(subpass.num_color_attachments())), subpass: Some(subpass.into()), ..GraphicsPipelineCreateInfo::layout(layout) }, ) .unwrap() }; (pipeline, framebuffers) } mod vs { vulkano_shaders::shader! { ty: "vertex", path: "src/bin/teapot/vert.glsl", } } mod fs { vulkano_shaders::shader! { ty: "fragment", path: "src/bin/teapot/frag.glsl", } }
{ // The start of this example is exactly the same as `triangle`. You should read the `triangle` // example if you haven't done so yet. let event_loop = EventLoop::new(); let library = VulkanLibrary::new().unwrap(); let required_extensions = Surface::required_extensions(&event_loop); let instance = Instance::new( library, InstanceCreateInfo { flags: InstanceCreateFlags::ENUMERATE_PORTABILITY, enabled_extensions: required_extensions, ..Default::default() }, ) .unwrap(); let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap()); let surface = Surface::from_window(instance.clone(), window.clone()).unwrap(); let device_extensions = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::empty() }; let (physical_device, queue_family_index) = instance .enumerate_physical_devices() .unwrap() .filter(|p| p.supported_extensions().contains(&device_extensions)) .filter_map(|p| { p.queue_family_properties() .iter() .enumerate() .position(|(i, q)| { q.queue_flags.intersects(QueueFlags::GRAPHICS) && p.surface_support(i as u32, &surface).unwrap_or(false) }) .map(|i| (p, i as u32)) }) .min_by_key(|(p, _)| match p.properties().device_type { PhysicalDeviceType::DiscreteGpu => 0, PhysicalDeviceType::IntegratedGpu => 1, PhysicalDeviceType::VirtualGpu => 2, PhysicalDeviceType::Cpu => 3, PhysicalDeviceType::Other => 4, _ => 5, }) .unwrap(); println!( "Using device: {} (type: {:?})", physical_device.properties().device_name, physical_device.properties().device_type, ); let (device, mut queues) = Device::new( physical_device, DeviceCreateInfo { enabled_extensions: device_extensions, queue_create_infos: vec![QueueCreateInfo { queue_family_index, ..Default::default() }], ..Default::default() }, ) .unwrap(); let queue = queues.next().unwrap(); let (mut swapchain, images) = { let surface_capabilities = device .physical_device() .surface_capabilities(&surface, Default::default()) .unwrap(); let image_format = device .physical_device() .surface_formats(&surface, Default::default()) .unwrap()[0] .0; Swapchain::new( device.clone(), surface, SwapchainCreateInfo { min_image_count: surface_capabilities.min_image_count.max(2), image_format, image_extent: window.inner_size().into(), image_usage: ImageUsage::COLOR_ATTACHMENT, composite_alpha: surface_capabilities .supported_composite_alpha .into_iter() .next() .unwrap(), ..Default::default() }, ) .unwrap() }; let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let vertex_buffer = Buffer::from_iter( &memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() }, AllocationCreateInfo { memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, POSITIONS, ) .unwrap(); let normals_buffer = Buffer::from_iter( &memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() }, AllocationCreateInfo { memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, NORMALS, ) .unwrap(); let index_buffer = Buffer::from_iter( &memory_allocator, BufferCreateInfo { usage: BufferUsage::INDEX_BUFFER, ..Default::default() }, AllocationCreateInfo { memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, INDICES, ) .unwrap(); let uniform_buffer = SubbufferAllocator::new( memory_allocator.clone(), SubbufferAllocatorCreateInfo { buffer_usage: BufferUsage::UNIFORM_BUFFER, memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, ); let render_pass = vulkano::single_pass_renderpass!( device.clone(), attachments: { color: { format: swapchain.image_format(), samples: 1, load_op: Clear, store_op: Store, }, depth_stencil: { format: Format::D16_UNORM, samples: 1, load_op: Clear, store_op: DontCare, }, }, pass: { color: [color], depth_stencil: {depth_stencil}, }, ) .unwrap(); let vs = vs::load(device.clone()) .unwrap() .entry_point("main") .unwrap(); let fs = fs::load(device.clone()) .unwrap() .entry_point("main") .unwrap(); let (mut pipeline, mut framebuffers) = window_size_dependent_setup( &memory_allocator, vs.clone(), fs.clone(), &images, render_pass.clone(), ); let mut recreate_swapchain = false; let mut previous_frame_end = Some(sync::now(device.clone()).boxed()); let rotation_start = Instant::now(); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); event_loop.run(move |event, _, control_flow| { match event { Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => { *control_flow = ControlFlow::Exit; } Event::WindowEvent { event: WindowEvent::Resized(_), .. } => { recreate_swapchain = true; } Event::RedrawEventsCleared => { let image_extent: [u32; 2] = window.inner_size().into(); if image_extent.contains(&0) { return; } previous_frame_end.as_mut().unwrap().cleanup_finished(); if recreate_swapchain { let (new_swapchain, new_images) = swapchain .recreate(SwapchainCreateInfo { image_extent, ..swapchain.create_info() }) .expect("failed to recreate swapchain"); swapchain = new_swapchain; let (new_pipeline, new_framebuffers) = window_size_dependent_setup( &memory_allocator, vs.clone(), fs.clone(), &new_images, render_pass.clone(), ); pipeline = new_pipeline; framebuffers = new_framebuffers; recreate_swapchain = false; } let uniform_buffer_subbuffer = { let elapsed = rotation_start.elapsed(); let rotation = elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0; let rotation = Matrix3::from_angle_y(Rad(rotation as f32)); // note: this teapot was meant for OpenGL where the origin is at the lower left // instead the origin is at the upper left in Vulkan, so we reverse the Y axis let aspect_ratio = swapchain.image_extent()[0] as f32 / swapchain.image_extent()[1] as f32; let proj = cgmath::perspective( Rad(std::f32::consts::FRAC_PI_2), aspect_ratio, 0.01, 100.0, ); let view = Matrix4::look_at_rh( Point3::new(0.3, 0.3, 1.0), Point3::new(0.0, 0.0, 0.0), Vector3::new(0.0, -1.0, 0.0), ); let scale = Matrix4::from_scale(0.01); let uniform_data = vs::Data { world: Matrix4::from(rotation).into(), view: (view * scale).into(), proj: proj.into(), }; let subbuffer = uniform_buffer.allocate_sized().unwrap(); *subbuffer.write().unwrap() = uniform_data; subbuffer }; let layout = pipeline.layout().set_layouts().get(0).unwrap(); let set = PersistentDescriptorSet::new( &descriptor_set_allocator, layout.clone(), [WriteDescriptorSet::buffer(0, uniform_buffer_subbuffer)], [], ) .unwrap(); let (image_index, suboptimal, acquire_future) = match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) { Ok(r) => r, Err(VulkanError::OutOfDate) => { recreate_swapchain = true; return; } Err(e) => panic!("failed to acquire next image: {e}"), }; if suboptimal { recreate_swapchain = true; } let mut builder = AutoCommandBufferBuilder::primary( &command_buffer_allocator, queue.queue_family_index(), CommandBufferUsage::OneTimeSubmit, ) .unwrap(); builder .begin_render_pass( RenderPassBeginInfo { clear_values: vec![ Some([0.0, 0.0, 1.0, 1.0].into()), Some(1f32.into()), ], ..RenderPassBeginInfo::framebuffer( framebuffers[image_index as usize].clone(), ) }, Default::default(), ) .unwrap() .bind_pipeline_graphics(pipeline.clone()) .unwrap() .bind_descriptor_sets( PipelineBindPoint::Graphics, pipeline.layout().clone(), 0, set, ) .unwrap() .bind_vertex_buffers(0, (vertex_buffer.clone(), normals_buffer.clone())) .unwrap() .bind_index_buffer(index_buffer.clone()) .unwrap() .draw_indexed(index_buffer.len() as u32, 1, 0, 0, 0) .unwrap() .end_render_pass(Default::default()) .unwrap(); let command_buffer = builder.build().unwrap(); let future = previous_frame_end .take() .unwrap() .join(acquire_future) .then_execute(queue.clone(), command_buffer) .unwrap() .then_swapchain_present( queue.clone(), SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index), ) .then_signal_fence_and_flush(); match future.map_err(Validated::unwrap) { Ok(future) => { previous_frame_end = Some(future.boxed()); } Err(VulkanError::OutOfDate) => { recreate_swapchain = true; previous_frame_end = Some(sync::now(device.clone()).boxed()); } Err(e) => { println!("failed to flush future: {e}"); previous_frame_end = Some(sync::now(device.clone()).boxed()); } } } _ => (), } }); }
identifier_body
main.rs
// Copyright (c) 2016 The vulkano developers // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, // at your option. All files in the project carrying such // notice may not be copied, modified, or distributed except // according to those terms. use cgmath::{Matrix3, Matrix4, Point3, Rad, Vector3}; use examples::{Normal, Position, INDICES, NORMALS, POSITIONS}; use std::{sync::Arc, time::Instant}; use vulkano::{ buffer::{ allocator::{SubbufferAllocator, SubbufferAllocatorCreateInfo}, Buffer, BufferCreateInfo, BufferUsage, }, command_buffer::{ allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage, RenderPassBeginInfo, }, descriptor_set::{ allocator::StandardDescriptorSetAllocator, PersistentDescriptorSet, WriteDescriptorSet, }, device::{ physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, DeviceOwned, QueueCreateInfo, QueueFlags, }, format::Format, image::{view::ImageView, Image, ImageCreateInfo, ImageType, ImageUsage}, instance::{Instance, InstanceCreateFlags, InstanceCreateInfo}, memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator}, pipeline::{ graphics::{ color_blend::ColorBlendState, depth_stencil::DepthStencilState, input_assembly::InputAssemblyState, multisample::MultisampleState, rasterization::RasterizationState, vertex_input::{Vertex, VertexDefinition}, viewport::{Viewport, ViewportState}, GraphicsPipelineCreateInfo, }, layout::PipelineDescriptorSetLayoutCreateInfo, GraphicsPipeline, Pipeline, PipelineBindPoint, PipelineLayout, PipelineShaderStageCreateInfo, }, render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass}, shader::EntryPoint, swapchain::{ acquire_next_image, Surface, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo, }, sync::{self, GpuFuture}, Validated, VulkanError, VulkanLibrary, }; use winit::{ event::{Event, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::WindowBuilder, }; fn
() { // The start of this example is exactly the same as `triangle`. You should read the `triangle` // example if you haven't done so yet. let event_loop = EventLoop::new(); let library = VulkanLibrary::new().unwrap(); let required_extensions = Surface::required_extensions(&event_loop); let instance = Instance::new( library, InstanceCreateInfo { flags: InstanceCreateFlags::ENUMERATE_PORTABILITY, enabled_extensions: required_extensions, ..Default::default() }, ) .unwrap(); let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap()); let surface = Surface::from_window(instance.clone(), window.clone()).unwrap(); let device_extensions = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::empty() }; let (physical_device, queue_family_index) = instance .enumerate_physical_devices() .unwrap() .filter(|p| p.supported_extensions().contains(&device_extensions)) .filter_map(|p| { p.queue_family_properties() .iter() .enumerate() .position(|(i, q)| { q.queue_flags.intersects(QueueFlags::GRAPHICS) && p.surface_support(i as u32, &surface).unwrap_or(false) }) .map(|i| (p, i as u32)) }) .min_by_key(|(p, _)| match p.properties().device_type { PhysicalDeviceType::DiscreteGpu => 0, PhysicalDeviceType::IntegratedGpu => 1, PhysicalDeviceType::VirtualGpu => 2, PhysicalDeviceType::Cpu => 3, PhysicalDeviceType::Other => 4, _ => 5, }) .unwrap(); println!( "Using device: {} (type: {:?})", physical_device.properties().device_name, physical_device.properties().device_type, ); let (device, mut queues) = Device::new( physical_device, DeviceCreateInfo { enabled_extensions: device_extensions, queue_create_infos: vec![QueueCreateInfo { queue_family_index, ..Default::default() }], ..Default::default() }, ) .unwrap(); let queue = queues.next().unwrap(); let (mut swapchain, images) = { let surface_capabilities = device .physical_device() .surface_capabilities(&surface, Default::default()) .unwrap(); let image_format = device .physical_device() .surface_formats(&surface, Default::default()) .unwrap()[0] .0; Swapchain::new( device.clone(), surface, SwapchainCreateInfo { min_image_count: surface_capabilities.min_image_count.max(2), image_format, image_extent: window.inner_size().into(), image_usage: ImageUsage::COLOR_ATTACHMENT, composite_alpha: surface_capabilities .supported_composite_alpha .into_iter() .next() .unwrap(), ..Default::default() }, ) .unwrap() }; let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let vertex_buffer = Buffer::from_iter( &memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() }, AllocationCreateInfo { memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, POSITIONS, ) .unwrap(); let normals_buffer = Buffer::from_iter( &memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() }, AllocationCreateInfo { memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, NORMALS, ) .unwrap(); let index_buffer = Buffer::from_iter( &memory_allocator, BufferCreateInfo { usage: BufferUsage::INDEX_BUFFER, ..Default::default() }, AllocationCreateInfo { memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, INDICES, ) .unwrap(); let uniform_buffer = SubbufferAllocator::new( memory_allocator.clone(), SubbufferAllocatorCreateInfo { buffer_usage: BufferUsage::UNIFORM_BUFFER, memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, ); let render_pass = vulkano::single_pass_renderpass!( device.clone(), attachments: { color: { format: swapchain.image_format(), samples: 1, load_op: Clear, store_op: Store, }, depth_stencil: { format: Format::D16_UNORM, samples: 1, load_op: Clear, store_op: DontCare, }, }, pass: { color: [color], depth_stencil: {depth_stencil}, }, ) .unwrap(); let vs = vs::load(device.clone()) .unwrap() .entry_point("main") .unwrap(); let fs = fs::load(device.clone()) .unwrap() .entry_point("main") .unwrap(); let (mut pipeline, mut framebuffers) = window_size_dependent_setup( &memory_allocator, vs.clone(), fs.clone(), &images, render_pass.clone(), ); let mut recreate_swapchain = false; let mut previous_frame_end = Some(sync::now(device.clone()).boxed()); let rotation_start = Instant::now(); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); event_loop.run(move |event, _, control_flow| { match event { Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => { *control_flow = ControlFlow::Exit; } Event::WindowEvent { event: WindowEvent::Resized(_), .. } => { recreate_swapchain = true; } Event::RedrawEventsCleared => { let image_extent: [u32; 2] = window.inner_size().into(); if image_extent.contains(&0) { return; } previous_frame_end.as_mut().unwrap().cleanup_finished(); if recreate_swapchain { let (new_swapchain, new_images) = swapchain .recreate(SwapchainCreateInfo { image_extent, ..swapchain.create_info() }) .expect("failed to recreate swapchain"); swapchain = new_swapchain; let (new_pipeline, new_framebuffers) = window_size_dependent_setup( &memory_allocator, vs.clone(), fs.clone(), &new_images, render_pass.clone(), ); pipeline = new_pipeline; framebuffers = new_framebuffers; recreate_swapchain = false; } let uniform_buffer_subbuffer = { let elapsed = rotation_start.elapsed(); let rotation = elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0; let rotation = Matrix3::from_angle_y(Rad(rotation as f32)); // note: this teapot was meant for OpenGL where the origin is at the lower left // instead the origin is at the upper left in Vulkan, so we reverse the Y axis let aspect_ratio = swapchain.image_extent()[0] as f32 / swapchain.image_extent()[1] as f32; let proj = cgmath::perspective( Rad(std::f32::consts::FRAC_PI_2), aspect_ratio, 0.01, 100.0, ); let view = Matrix4::look_at_rh( Point3::new(0.3, 0.3, 1.0), Point3::new(0.0, 0.0, 0.0), Vector3::new(0.0, -1.0, 0.0), ); let scale = Matrix4::from_scale(0.01); let uniform_data = vs::Data { world: Matrix4::from(rotation).into(), view: (view * scale).into(), proj: proj.into(), }; let subbuffer = uniform_buffer.allocate_sized().unwrap(); *subbuffer.write().unwrap() = uniform_data; subbuffer }; let layout = pipeline.layout().set_layouts().get(0).unwrap(); let set = PersistentDescriptorSet::new( &descriptor_set_allocator, layout.clone(), [WriteDescriptorSet::buffer(0, uniform_buffer_subbuffer)], [], ) .unwrap(); let (image_index, suboptimal, acquire_future) = match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) { Ok(r) => r, Err(VulkanError::OutOfDate) => { recreate_swapchain = true; return; } Err(e) => panic!("failed to acquire next image: {e}"), }; if suboptimal { recreate_swapchain = true; } let mut builder = AutoCommandBufferBuilder::primary( &command_buffer_allocator, queue.queue_family_index(), CommandBufferUsage::OneTimeSubmit, ) .unwrap(); builder .begin_render_pass( RenderPassBeginInfo { clear_values: vec![ Some([0.0, 0.0, 1.0, 1.0].into()), Some(1f32.into()), ], ..RenderPassBeginInfo::framebuffer( framebuffers[image_index as usize].clone(), ) }, Default::default(), ) .unwrap() .bind_pipeline_graphics(pipeline.clone()) .unwrap() .bind_descriptor_sets( PipelineBindPoint::Graphics, pipeline.layout().clone(), 0, set, ) .unwrap() .bind_vertex_buffers(0, (vertex_buffer.clone(), normals_buffer.clone())) .unwrap() .bind_index_buffer(index_buffer.clone()) .unwrap() .draw_indexed(index_buffer.len() as u32, 1, 0, 0, 0) .unwrap() .end_render_pass(Default::default()) .unwrap(); let command_buffer = builder.build().unwrap(); let future = previous_frame_end .take() .unwrap() .join(acquire_future) .then_execute(queue.clone(), command_buffer) .unwrap() .then_swapchain_present( queue.clone(), SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index), ) .then_signal_fence_and_flush(); match future.map_err(Validated::unwrap) { Ok(future) => { previous_frame_end = Some(future.boxed()); } Err(VulkanError::OutOfDate) => { recreate_swapchain = true; previous_frame_end = Some(sync::now(device.clone()).boxed()); } Err(e) => { println!("failed to flush future: {e}"); previous_frame_end = Some(sync::now(device.clone()).boxed()); } } } _ => (), } }); } /// This function is called once during initialization, then again whenever the window is resized. fn window_size_dependent_setup( memory_allocator: &StandardMemoryAllocator, vs: EntryPoint, fs: EntryPoint, images: &[Arc<Image>], render_pass: Arc<RenderPass>, ) -> (Arc<GraphicsPipeline>, Vec<Arc<Framebuffer>>) { let extent = images[0].extent(); let depth_buffer = ImageView::new_default( Image::new( memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::D16_UNORM, extent: images[0].extent(), usage: ImageUsage::DEPTH_STENCIL_ATTACHMENT | ImageUsage::TRANSIENT_ATTACHMENT, ..Default::default() }, AllocationCreateInfo::default(), ) .unwrap(), ) .unwrap(); let framebuffers = images .iter() .map(|image| { let view = ImageView::new_default(image.clone()).unwrap(); Framebuffer::new( render_pass.clone(), FramebufferCreateInfo { attachments: vec![view, depth_buffer.clone()], ..Default::default() }, ) .unwrap() }) .collect::<Vec<_>>(); // In the triangle example we use a dynamic viewport, as its a simple example. However in the // teapot example, we recreate the pipelines with a hardcoded viewport instead. This allows the // driver to optimize things, at the cost of slower window resizes. // https://computergraphics.stackexchange.com/questions/5742/vulkan-best-way-of-updating-pipeline-viewport let pipeline = { let device = memory_allocator.device(); let vertex_input_state = [Position::per_vertex(), Normal::per_vertex()] .definition(&vs.info().input_interface) .unwrap(); let stages = [ PipelineShaderStageCreateInfo::new(vs), PipelineShaderStageCreateInfo::new(fs), ]; let layout = PipelineLayout::new( device.clone(), PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages) .into_pipeline_layout_create_info(device.clone()) .unwrap(), ) .unwrap(); let subpass = Subpass::from(render_pass, 0).unwrap(); GraphicsPipeline::new( device.clone(), None, GraphicsPipelineCreateInfo { stages: stages.into_iter().collect(), vertex_input_state: Some(vertex_input_state), input_assembly_state: Some(InputAssemblyState::default()), viewport_state: Some(ViewportState::viewport_fixed_scissor_irrelevant([ Viewport { offset: [0.0, 0.0], extent: [extent[0] as f32, extent[1] as f32], depth_range: 0.0..=1.0, }, ])), rasterization_state: Some(RasterizationState::default()), depth_stencil_state: Some(DepthStencilState::simple_depth_test()), multisample_state: Some(MultisampleState::default()), color_blend_state: Some(ColorBlendState::new(subpass.num_color_attachments())), subpass: Some(subpass.into()), ..GraphicsPipelineCreateInfo::layout(layout) }, ) .unwrap() }; (pipeline, framebuffers) } mod vs { vulkano_shaders::shader! { ty: "vertex", path: "src/bin/teapot/vert.glsl", } } mod fs { vulkano_shaders::shader! { ty: "fragment", path: "src/bin/teapot/frag.glsl", } }
main
identifier_name
main.rs
// Copyright (c) 2016 The vulkano developers // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, // at your option. All files in the project carrying such // notice may not be copied, modified, or distributed except // according to those terms. use cgmath::{Matrix3, Matrix4, Point3, Rad, Vector3}; use examples::{Normal, Position, INDICES, NORMALS, POSITIONS}; use std::{sync::Arc, time::Instant}; use vulkano::{ buffer::{ allocator::{SubbufferAllocator, SubbufferAllocatorCreateInfo}, Buffer, BufferCreateInfo, BufferUsage, }, command_buffer::{ allocator::StandardCommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferUsage, RenderPassBeginInfo, }, descriptor_set::{ allocator::StandardDescriptorSetAllocator, PersistentDescriptorSet, WriteDescriptorSet, }, device::{ physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, DeviceOwned, QueueCreateInfo, QueueFlags, }, format::Format, image::{view::ImageView, Image, ImageCreateInfo, ImageType, ImageUsage}, instance::{Instance, InstanceCreateFlags, InstanceCreateInfo}, memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator}, pipeline::{ graphics::{ color_blend::ColorBlendState, depth_stencil::DepthStencilState, input_assembly::InputAssemblyState, multisample::MultisampleState, rasterization::RasterizationState, vertex_input::{Vertex, VertexDefinition}, viewport::{Viewport, ViewportState}, GraphicsPipelineCreateInfo, }, layout::PipelineDescriptorSetLayoutCreateInfo, GraphicsPipeline, Pipeline, PipelineBindPoint, PipelineLayout, PipelineShaderStageCreateInfo, }, render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass}, shader::EntryPoint, swapchain::{ acquire_next_image, Surface, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo, }, sync::{self, GpuFuture}, Validated, VulkanError, VulkanLibrary, }; use winit::{ event::{Event, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::WindowBuilder, }; fn main() { // The start of this example is exactly the same as `triangle`. You should read the `triangle` // example if you haven't done so yet. let event_loop = EventLoop::new(); let library = VulkanLibrary::new().unwrap(); let required_extensions = Surface::required_extensions(&event_loop); let instance = Instance::new( library, InstanceCreateInfo { flags: InstanceCreateFlags::ENUMERATE_PORTABILITY, enabled_extensions: required_extensions, ..Default::default() }, ) .unwrap(); let window = Arc::new(WindowBuilder::new().build(&event_loop).unwrap()); let surface = Surface::from_window(instance.clone(), window.clone()).unwrap(); let device_extensions = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::empty() }; let (physical_device, queue_family_index) = instance .enumerate_physical_devices() .unwrap() .filter(|p| p.supported_extensions().contains(&device_extensions)) .filter_map(|p| { p.queue_family_properties() .iter() .enumerate() .position(|(i, q)| { q.queue_flags.intersects(QueueFlags::GRAPHICS) && p.surface_support(i as u32, &surface).unwrap_or(false) }) .map(|i| (p, i as u32)) }) .min_by_key(|(p, _)| match p.properties().device_type { PhysicalDeviceType::DiscreteGpu => 0, PhysicalDeviceType::IntegratedGpu => 1, PhysicalDeviceType::VirtualGpu => 2, PhysicalDeviceType::Cpu => 3, PhysicalDeviceType::Other => 4, _ => 5, }) .unwrap(); println!( "Using device: {} (type: {:?})", physical_device.properties().device_name, physical_device.properties().device_type, ); let (device, mut queues) = Device::new( physical_device, DeviceCreateInfo { enabled_extensions: device_extensions, queue_create_infos: vec![QueueCreateInfo { queue_family_index, ..Default::default() }], ..Default::default() }, ) .unwrap(); let queue = queues.next().unwrap(); let (mut swapchain, images) = { let surface_capabilities = device .physical_device() .surface_capabilities(&surface, Default::default()) .unwrap(); let image_format = device .physical_device() .surface_formats(&surface, Default::default()) .unwrap()[0] .0; Swapchain::new( device.clone(), surface, SwapchainCreateInfo { min_image_count: surface_capabilities.min_image_count.max(2), image_format, image_extent: window.inner_size().into(), image_usage: ImageUsage::COLOR_ATTACHMENT, composite_alpha: surface_capabilities .supported_composite_alpha .into_iter() .next() .unwrap(), ..Default::default() }, ) .unwrap() }; let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let vertex_buffer = Buffer::from_iter( &memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() }, AllocationCreateInfo { memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, POSITIONS, ) .unwrap(); let normals_buffer = Buffer::from_iter( &memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() }, AllocationCreateInfo { memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, NORMALS, ) .unwrap(); let index_buffer = Buffer::from_iter( &memory_allocator, BufferCreateInfo { usage: BufferUsage::INDEX_BUFFER, ..Default::default() }, AllocationCreateInfo { memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, INDICES, ) .unwrap(); let uniform_buffer = SubbufferAllocator::new( memory_allocator.clone(), SubbufferAllocatorCreateInfo { buffer_usage: BufferUsage::UNIFORM_BUFFER, memory_type_filter: MemoryTypeFilter::PREFER_DEVICE | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, ..Default::default() }, ); let render_pass = vulkano::single_pass_renderpass!( device.clone(), attachments: { color: { format: swapchain.image_format(), samples: 1, load_op: Clear, store_op: Store, }, depth_stencil: { format: Format::D16_UNORM, samples: 1, load_op: Clear, store_op: DontCare, }, }, pass: { color: [color], depth_stencil: {depth_stencil}, }, ) .unwrap(); let vs = vs::load(device.clone()) .unwrap() .entry_point("main") .unwrap(); let fs = fs::load(device.clone()) .unwrap() .entry_point("main") .unwrap(); let (mut pipeline, mut framebuffers) = window_size_dependent_setup( &memory_allocator, vs.clone(), fs.clone(), &images, render_pass.clone(), ); let mut recreate_swapchain = false; let mut previous_frame_end = Some(sync::now(device.clone()).boxed()); let rotation_start = Instant::now();
event_loop.run(move |event, _, control_flow| { match event { Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => { *control_flow = ControlFlow::Exit; } Event::WindowEvent { event: WindowEvent::Resized(_), .. } => { recreate_swapchain = true; } Event::RedrawEventsCleared => { let image_extent: [u32; 2] = window.inner_size().into(); if image_extent.contains(&0) { return; } previous_frame_end.as_mut().unwrap().cleanup_finished(); if recreate_swapchain { let (new_swapchain, new_images) = swapchain .recreate(SwapchainCreateInfo { image_extent, ..swapchain.create_info() }) .expect("failed to recreate swapchain"); swapchain = new_swapchain; let (new_pipeline, new_framebuffers) = window_size_dependent_setup( &memory_allocator, vs.clone(), fs.clone(), &new_images, render_pass.clone(), ); pipeline = new_pipeline; framebuffers = new_framebuffers; recreate_swapchain = false; } let uniform_buffer_subbuffer = { let elapsed = rotation_start.elapsed(); let rotation = elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000_000.0; let rotation = Matrix3::from_angle_y(Rad(rotation as f32)); // note: this teapot was meant for OpenGL where the origin is at the lower left // instead the origin is at the upper left in Vulkan, so we reverse the Y axis let aspect_ratio = swapchain.image_extent()[0] as f32 / swapchain.image_extent()[1] as f32; let proj = cgmath::perspective( Rad(std::f32::consts::FRAC_PI_2), aspect_ratio, 0.01, 100.0, ); let view = Matrix4::look_at_rh( Point3::new(0.3, 0.3, 1.0), Point3::new(0.0, 0.0, 0.0), Vector3::new(0.0, -1.0, 0.0), ); let scale = Matrix4::from_scale(0.01); let uniform_data = vs::Data { world: Matrix4::from(rotation).into(), view: (view * scale).into(), proj: proj.into(), }; let subbuffer = uniform_buffer.allocate_sized().unwrap(); *subbuffer.write().unwrap() = uniform_data; subbuffer }; let layout = pipeline.layout().set_layouts().get(0).unwrap(); let set = PersistentDescriptorSet::new( &descriptor_set_allocator, layout.clone(), [WriteDescriptorSet::buffer(0, uniform_buffer_subbuffer)], [], ) .unwrap(); let (image_index, suboptimal, acquire_future) = match acquire_next_image(swapchain.clone(), None).map_err(Validated::unwrap) { Ok(r) => r, Err(VulkanError::OutOfDate) => { recreate_swapchain = true; return; } Err(e) => panic!("failed to acquire next image: {e}"), }; if suboptimal { recreate_swapchain = true; } let mut builder = AutoCommandBufferBuilder::primary( &command_buffer_allocator, queue.queue_family_index(), CommandBufferUsage::OneTimeSubmit, ) .unwrap(); builder .begin_render_pass( RenderPassBeginInfo { clear_values: vec![ Some([0.0, 0.0, 1.0, 1.0].into()), Some(1f32.into()), ], ..RenderPassBeginInfo::framebuffer( framebuffers[image_index as usize].clone(), ) }, Default::default(), ) .unwrap() .bind_pipeline_graphics(pipeline.clone()) .unwrap() .bind_descriptor_sets( PipelineBindPoint::Graphics, pipeline.layout().clone(), 0, set, ) .unwrap() .bind_vertex_buffers(0, (vertex_buffer.clone(), normals_buffer.clone())) .unwrap() .bind_index_buffer(index_buffer.clone()) .unwrap() .draw_indexed(index_buffer.len() as u32, 1, 0, 0, 0) .unwrap() .end_render_pass(Default::default()) .unwrap(); let command_buffer = builder.build().unwrap(); let future = previous_frame_end .take() .unwrap() .join(acquire_future) .then_execute(queue.clone(), command_buffer) .unwrap() .then_swapchain_present( queue.clone(), SwapchainPresentInfo::swapchain_image_index(swapchain.clone(), image_index), ) .then_signal_fence_and_flush(); match future.map_err(Validated::unwrap) { Ok(future) => { previous_frame_end = Some(future.boxed()); } Err(VulkanError::OutOfDate) => { recreate_swapchain = true; previous_frame_end = Some(sync::now(device.clone()).boxed()); } Err(e) => { println!("failed to flush future: {e}"); previous_frame_end = Some(sync::now(device.clone()).boxed()); } } } _ => (), } }); } /// This function is called once during initialization, then again whenever the window is resized. fn window_size_dependent_setup( memory_allocator: &StandardMemoryAllocator, vs: EntryPoint, fs: EntryPoint, images: &[Arc<Image>], render_pass: Arc<RenderPass>, ) -> (Arc<GraphicsPipeline>, Vec<Arc<Framebuffer>>) { let extent = images[0].extent(); let depth_buffer = ImageView::new_default( Image::new( memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::D16_UNORM, extent: images[0].extent(), usage: ImageUsage::DEPTH_STENCIL_ATTACHMENT | ImageUsage::TRANSIENT_ATTACHMENT, ..Default::default() }, AllocationCreateInfo::default(), ) .unwrap(), ) .unwrap(); let framebuffers = images .iter() .map(|image| { let view = ImageView::new_default(image.clone()).unwrap(); Framebuffer::new( render_pass.clone(), FramebufferCreateInfo { attachments: vec![view, depth_buffer.clone()], ..Default::default() }, ) .unwrap() }) .collect::<Vec<_>>(); // In the triangle example we use a dynamic viewport, as its a simple example. However in the // teapot example, we recreate the pipelines with a hardcoded viewport instead. This allows the // driver to optimize things, at the cost of slower window resizes. // https://computergraphics.stackexchange.com/questions/5742/vulkan-best-way-of-updating-pipeline-viewport let pipeline = { let device = memory_allocator.device(); let vertex_input_state = [Position::per_vertex(), Normal::per_vertex()] .definition(&vs.info().input_interface) .unwrap(); let stages = [ PipelineShaderStageCreateInfo::new(vs), PipelineShaderStageCreateInfo::new(fs), ]; let layout = PipelineLayout::new( device.clone(), PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages) .into_pipeline_layout_create_info(device.clone()) .unwrap(), ) .unwrap(); let subpass = Subpass::from(render_pass, 0).unwrap(); GraphicsPipeline::new( device.clone(), None, GraphicsPipelineCreateInfo { stages: stages.into_iter().collect(), vertex_input_state: Some(vertex_input_state), input_assembly_state: Some(InputAssemblyState::default()), viewport_state: Some(ViewportState::viewport_fixed_scissor_irrelevant([ Viewport { offset: [0.0, 0.0], extent: [extent[0] as f32, extent[1] as f32], depth_range: 0.0..=1.0, }, ])), rasterization_state: Some(RasterizationState::default()), depth_stencil_state: Some(DepthStencilState::simple_depth_test()), multisample_state: Some(MultisampleState::default()), color_blend_state: Some(ColorBlendState::new(subpass.num_color_attachments())), subpass: Some(subpass.into()), ..GraphicsPipelineCreateInfo::layout(layout) }, ) .unwrap() }; (pipeline, framebuffers) } mod vs { vulkano_shaders::shader! { ty: "vertex", path: "src/bin/teapot/vert.glsl", } } mod fs { vulkano_shaders::shader! { ty: "fragment", path: "src/bin/teapot/frag.glsl", } }
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default());
random_line_split
c8.go
package main import ( "fmt" "image/color" "log" "math/rand" "os" "strings" "time" "github.com/hajimehoshi/ebiten/v2" "github.com/hajimehoshi/ebiten/v2/audio" "github.com/hajimehoshi/ebiten/v2/ebitenutil" "github.com/hajimehoshi/ebiten/v2/inpututil" "github.com/hajimehoshi/ebiten/v2/text" "golang.org/x/image/font" "golang.org/x/image/font/opentype" ) const ( V_PIXELS = 32 H_PIXELS = 64 SCALE = 10 WIDTH = H_PIXELS * SCALE HEIGHT = V_PIXELS * SCALE BUTTON_WIDTH = 80 // Button width of Game Select UI BUTTON_HIGHT = 23 // Button height of Game Select UI SELECT_HIGHT = 45 // Title height of Game Select UI ) // A pixel in Chip8 console. type Pixel struct { x int y int enable bool } func (p *Pixel) image() *ebiten.Image { img := ebiten.NewImage(10, 10) if p.enable { img.Fill(color.White) } else { img.Fill(color.Black) } return img } func (p *Pixel) Draw(screen *ebiten.Image) { opts := &ebiten.DrawImageOptions{} opts.GeoM.Translate(float64(10*p.x), float64(10*p.y)) screen.DrawImage(p.image(), opts) } // Game main. type Chip8 struct { cpu *Cpu mem *Memory vme *VideoMemory audio *audio.Player kb *Keyboard } func (c8 *Chip8) Update() { c8.kb.Update() if len(c8.kb.queue) > 0 { keys := []string{} for _, key := range c8.kb.queue { keys = append(keys, fmt.Sprintf("%d", key)) } log.Printf("Unprocessed keys: %s", strings.Join(keys, " ")) } err := c8.cpu.Tick(c8.mem, c8.vme, c8.audio, c8.kb) if err != nil { log.Fatal(err) } } func (c8 *Chip8) Draw(screen *ebiten.Image) { for x := 0; x < H_PIXELS; x++ { for y := 0; y < V_PIXELS; y++ { xor := c8.vme.mem[x][y] ^ c8.vme.buf[x][y] if xor == 1 { pixel := Pixel{x, y, bytob(c8.vme.buf[x][y])} pixel.Draw(screen) } } } } func bytob(value byte) bool { if value == 1 { return true } else { return false } } type Keyboard struct { queue []uint16 } func (kb *Keyboard) Update() { // 0~9: 43~52 for _, key := range inpututil.PressedKeys() { if (key >= 43 && key <= 52) || (key >= 0 && key <= 5) { kb.queue = append(kb.queue, uint16(keytohex(key))) // log.Printf("keyPressed=%d \n", key) } } } func NewKeyboard() *Keyboard { kb := new(Keyboard) kb.queue = []uint16{} return kb } func (kb *Keyboard) Pop() *uint16 { len := len(kb.queue) if len > 0 { key := kb.queue[0] kb.queue = kb.queue[1:] return &key } else { return nil } } func (kb *Keyboard) Clear() { kb.queue = []uint16{} } func keytohex(key ebiten.Key) uint16 { if key >= 43 && key <= 52 { return uint16(key) - 43 } else { return uint16(key) + 0x10 } } type Cpu struct { v [64]uint8 i uint16 stack [16]uint16 sp uint16 pc uint16 dt uint16 st uint16 rnd *rand.Rand lastd time.Time lasts time.Time } func NewCpu() *Cpu { cpu := new(Cpu) cpu.pc = 0x200 cpu.rnd = rand.New(rand.NewSource(time.Now().UnixNano())) cpu.lastd = time.Now() cpu.lasts = time.Now() return cpu } func (cpu *Cpu) rand() uint8 { return uint8(cpu.rnd.Intn(256)) } func (cpu *Cpu) Tick(mem *Memory, vme *VideoMemory, audio *audio.Player, kb *Keyboard) error { o1 := mem.buf[cpu.pc] >> 4 o2 := mem.buf[cpu.pc] & 0x0F o3 := mem.buf[cpu.pc+1] >> 4 o4 := mem.buf[cpu.pc+1] & 0x0F opcode := fmt.Sprintf("%02X%02X%02X%02X", o1, o2, o3, o4) log.Printf("Tick sp=%d pc=%d dt=%d st=%d opcode=%s", cpu.sp, cpu.pc, cpu.dt, cpu.st, opcode) nnn := (uint16(o2) << 8) + (uint16(o3) << 4) + uint16(o4) kk := (uint8(o3) << 4) + uint8(o4) x := o2 y := o3 vx := uint16(cpu.v[o2]) vy := uint16(cpu.v[o3]) xy := vx + vy var cmd Command switch o1 { case 0x0: switch o2 { case 0x0: switch o3 { case 0xE: switch o4 { case 0x0: log.Println("CLS") vme.clear() cmd = Next{} case 0xE: log.Println("00EE RET") pc := cpu.stack[cpu.sp-1] cpu.sp -= 1 cmd = Jump{pc + 2} } } default: log.Println("SYS addr") cmd = Jump{nnn} } case 0x1: log.Println("1nnn JP") cmd = Jump{nnn} case 0x2: log.Println("2nnn CALL") cpu.stack[cpu.sp] = cpu.pc cpu.sp += 1 cmd = Jump{nnn} case 0x3: log.Println("3xkk SE") if vx == uint16(kk) { cmd = Skip{} } else { cmd = Next{} } case 0x4: log.Println("4xkk SNE") if vx != uint16(kk)
else { cmd = Next{} } case 0x5: log.Println("5xy0 - SE") if vx == vy { cmd = Skip{} } else { cmd = Next{} } case 0x6: log.Println("6xkk - LD") cpu.v[x] = kk cmd = Next{} case 0x7: log.Println("7xkk - ADD") cpu.v[x] += kk cmd = Next{} case 0x8: switch o4 { case 0x0: log.Println("8xk0 - LD Vx, Vy") cpu.v[x] = cpu.v[y] case 0x1: log.Println("8xk1 - OR Vx, Vy") cpu.v[x] |= cpu.v[y] case 0x2: log.Println("8xk2 - AND Vx, Vy") cpu.v[x] &= cpu.v[y] case 0x3: log.Println("8xk3 - XOR Vx, Vy") cpu.v[x] ^= cpu.v[y] case 0x4: log.Println("8xk4 - ADD Vx, Vy") if xy > 0xFF { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(xy & 0xFF) case 0x5: log.Println("8xk5 - SUB Vx, Vy") if vx > vy { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(vx - vy) case 0x6: log.Println("8xk6 - SHR Vx, Vy") cpu.v[0xF] = uint8(vx & 0x1) cpu.v[x] /= 2 case 0x7: log.Println("8xk7 - SUBN Vx, Vy") if vy > vx { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(vy - vx) case 0xE: log.Println("8xkE - SHL Vx, Vy") cpu.v[0xF] = cpu.v[x] >> 7 cpu.v[x] *= 2 } cmd = Next{} case 0x9: log.Println("9xy0 - SNE") if vx != vy { cmd = Skip{} } else { cmd = Next{} } case 0xA: log.Println("Annn - LD I") cpu.i = nnn cmd = Next{} case 0xB: log.Println("Bnnn - JP") cmd = Jump{nnn + uint16(cpu.v[0])} case 0xC: log.Println("Cxkk - RND") cpu.v[x] = cpu.rand() & kk cmd = Next{} case 0xD: log.Println("DRW - Vx, Vy, nibble") n := o4 bytes := mem.buf[cpu.i : cpu.i+uint16(n)] cpu.v[0xF] = vme.draw(vx, vy, bytes) cmd = Next{} case 0xE: switch o3 { case 0x9: log.Println("Ex9E - SKP") pressed := false for true { key := kb.Pop() if key == nil { break } if vx == *key { pressed = true } } if pressed { cmd = Skip{} } else { cmd = Next{} } case 0xA: log.Println("ExA1 - SKNP") pressed := false for true { key := kb.Pop() if key == nil { break } if vx == *key { pressed = true } } if !pressed { cmd = Skip{} } else { cmd = Next{} } } case 0xF: switch o3 { case 0x0: switch o4 { case 0x7: log.Println("Fx07 - LD Vx, DT") cpu.v[x] = uint8(cpu.dt) cmd = Next{} case 0xA: log.Println("Fx0A - LD Vx, K") key := kb.Pop() if key != nil { cpu.v[x] = uint8(*key) cmd = Next{} } else { // Do nothing. } } case 0x1: switch o4 { case 0x5: log.Println("Fx15 - LD DT") cpu.dt = vx cpu.lastd = time.Now() cmd = Next{} case 0x8: log.Println("Fx18 - LD ST") cpu.st = vx cpu.lasts = time.Now() cmd = Next{} case 0xE: log.Println("Fx1E - ADD I Vx") cpu.i += vx cmd = Next{} } case 0x2: log.Println("Fx29 - LD F") cpu.i = vx * 5 cmd = Next{} case 0x3: log.Println("Fx33 - LD B") mem.buf[cpu.i] = (uint8(vx) / 100) % 10 mem.buf[cpu.i+1] = (uint8(vx) / 10) % 10 mem.buf[cpu.i+2] = uint8(vx) % 10 cmd = Next{} case 0x5: log.Println("Fx55 - LD [I]") for n := 0; n <= int(x); n++ { mem.buf[cpu.i+uint16(n)] = cpu.v[n] } cmd = Next{} case 0x6: log.Println("Fx65 - LD") for n := 0; n <= int(x); n++ { cpu.v[n] = mem.buf[cpu.i+uint16(n)] } cmd = Next{} } } if cmd != nil { cmd.exec(cpu) } now := time.Now() elapsed := now.Sub(cpu.lastd) if elapsed.Seconds() > 1.0/60 && cpu.dt > 0 { cpu.dt -= 1 cpu.lastd = now } elapsed = now.Sub(cpu.lasts) if elapsed.Seconds() > 1.0/60 && cpu.st > 0 { audio.Play() audio.Rewind() cpu.st -= 1 cpu.lasts = now } return nil } type Command interface { exec(cpu *Cpu) } type Next struct{} func (c Next) exec(cpu *Cpu) { cpu.pc += 2 } type Jump struct { addr uint16 } func (c Jump) exec(cpu *Cpu) { cpu.pc = c.addr } type Skip struct{} func (c Skip) exec(cpu *Cpu) { cpu.pc += 4 } type Memory struct { buf [0xFFF]byte // Chip-8 has 0xFFFF (4096) bytes of RAM. } func (m *Memory) Load(path string) error { f, err := os.Open(path) if err != nil { return err } n, err := f.Read(m.buf[0x200:]) log.Printf("%d bytes read from \"%s\".", n, path) return nil } func NewMemory() *Memory { m := new(Memory) // Load fontsets. m.buf = [0xFFF]byte{0xF0, 0x90, 0x90, 0x90, 0xF0, 0x20, 0x60, 0x20, 0x20, 0x70, 0xF0, 0x10, 0xF0, 0x80, 0xF0, 0xF0, 0x10, 0xF0, 0x10, 0xF0, 0x90, 0x90, 0xF0, 0x10, 0x10, 0xF0, 0x80, 0xF0, 0x10, 0xF0, 0xF0, 0x80, 0xF0, 0x90, 0xF0, 0xF0, 0x10, 0x20, 0x40, 0x40, 0xF0, 0x90, 0xF0, 0x90, 0xF0, 0xF0, 0x90, 0xF0, 0x10, 0xF0, 0xF0, 0x90, 0xF0, 0x90, 0x90, 0xE0, 0x90, 0xE0, 0x90, 0xE0, 0xF0, 0x80, 0x80, 0x80, 0xF0, 0xE0, 0x90, 0x90, 0x90, 0xE0, 0xF0, 0x80, 0xF0, 0x80, 0xF0, 0xF0, 0x80, 0xF0, 0x80, 0x80} return m } // VideoMemory implements double buffer. type VideoMemory struct { buf [H_PIXELS][V_PIXELS]byte mem [H_PIXELS][V_PIXELS]byte } func NewVideoMemory() *VideoMemory { return new(VideoMemory) } func (vme *VideoMemory) clear() { for x := 0; x < H_PIXELS; x++ { for y := 0; y < V_PIXELS; y++ { vme.buf[x][y] = 0 } } } func (vme *VideoMemory) draw(x uint16, y uint16, buf []byte) uint8 { vf := uint16(0) for i, byte := range buf { i := uint16(i) vf += vme.draw_pixcel(x, y+i, (byte>>7)&0x1) vf += vme.draw_pixcel(x+1, y+i, (byte>>6)&0x1) vf += vme.draw_pixcel(x+2, y+i, (byte>>5)&0x1) vf += vme.draw_pixcel(x+3, y+i, (byte>>4)&0x1) vf += vme.draw_pixcel(x+4, y+i, (byte>>3)&0x1) vf += vme.draw_pixcel(x+5, y+i, (byte>>2)&0x1) vf += vme.draw_pixcel(x+6, y+i, (byte>>1)&0x1) vf += vme.draw_pixcel(x+7, y+i, (byte>>0)&0x1) } if vf > 0 { return 1 } else { return 0 } } func (vme *VideoMemory) draw_pixcel(x uint16, y uint16, new byte) uint16 { var vf uint16 // Check collision. if vme.buf[x][y] == 1 && new == 1 { vf = 1 } else { vf = 0 } vme.buf[x][y] ^= new return vf } type Button struct { text string img *ebiten.Image x int y int onclicked func(*Button) font *font.Face rom Rom } func NewButton(text string, font *font.Face, x, y int, rom Rom, onclicked func(*Button)) *Button { btn := new(Button) img := ebiten.NewImage(BUTTON_WIDTH-1, BUTTON_HIGHT-1) img.Fill(color.White) btn.img = img btn.text = text btn.font = font btn.x = x btn.y = y btn.rom = rom btn.onclicked = onclicked return btn } func (btn *Button) Draw(screen *ebiten.Image) { opts := &ebiten.DrawImageOptions{} opts.GeoM.Translate(float64(btn.x*BUTTON_WIDTH), float64(btn.y*BUTTON_HIGHT)+float64(SELECT_HIGHT)) screen.DrawImage(btn.img, opts) text.Draw(screen, btn.text, *btn.font, btn.x*BUTTON_WIDTH+10, btn.y*BUTTON_HIGHT+17+SELECT_HIGHT, color.Black) } type UI struct { btns []*Button oncompleted func(rom Rom) font *font.Face } func (ui *UI) Draw(screen *ebiten.Image) { text.Draw(screen, "SELECT A GAME", *ui.font, 160, 36, color.White) for _, btn := range ui.btns { btn.Draw(screen) } } func (ui *UI) Update() { clicked := ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft) x, y := ebiten.CursorPosition() if clicked { log.Printf("Clicked: %v on (%d, %d)", clicked, x, y) for _, btn := range ui.btns { minx := btn.img.Bounds().Min.X + btn.x*BUTTON_WIDTH maxx := btn.img.Bounds().Max.X + btn.x*BUTTON_WIDTH miny := btn.img.Bounds().Min.Y + btn.y*BUTTON_HIGHT + SELECT_HIGHT maxy := btn.img.Bounds().Max.Y + btn.y*BUTTON_HIGHT + SELECT_HIGHT log.Printf("x=%d y=%d minx=%d maxx=%d miny=%d maxy=%d", x, y, minx, maxx, miny, maxy) if x >= minx && x <= maxx && y >= miny && y <= maxy { btn.onclicked(btn) } } } } type Rom struct { name string path string } func NewUI() *UI { ROMS := [90]Rom{ {"15Puzzle", "roms/15 Puzzle [Roger Ivie].ch8"}, {"Addition Problems", "roms/Addition Problems [Paul C. Moews].ch8"}, {"Airplane", "roms/Airplane.ch8"}, {"Animal Race", "roms/Animal Race [Brian Astle].ch8"}, {"Astro Dodge", "roms/Astro Dodge [Revival Studios, 2008].ch8"}, {"BMP Viewer", "roms/BMP Viewer - Hello (C8 example) [Hap, 2005].ch8"}, {"Biorhythm ", "roms/Biorhythm [Jef Winsor].ch8"}, {"Blinky", "roms/Blinky [Hans Christian Egeberg, 1991].ch8"}, {"Blitz ", "roms/Blitz [David Winter].ch8"}, {"Bowling", "roms/Bowling [Gooitzen van der Wal].ch8"}, {"Breakout", "roms/Breakout (Brix hack) [David Winter, 1997].ch8"}, {"Brick", "roms/Brick (Brix hack, 1990).ch8"}, {"Brix", "roms/Brix [Andreas Gustafsson, 1990].ch8"}, {"Cave", "roms/Cave.ch8"}, {"Chip8 Picture", "roms/Chip8 Picture.ch8"}, {"Chip8 Logo", "roms/Chip8 emulator Logo [Garstyciuks].ch8"}, {"Clock Program", "roms/Clock Program [Bill Fisher, 1981].ch8"}, {"Coin Flipping", "roms/Coin Flipping [Carmelo Cortez, 1978].ch8"}, {"Connect 4", "roms/Connect 4 [David Winter].ch8"}, {"Craps", "roms/Craps [Camerlo Cortez, 1978].ch8"}, {"Deflection", "roms/Deflection [John Fort].ch8"}, {"Delay Timer Test", "roms/Delay Timer Test [Matthew Mikolay, 2010].ch8"}, {"Division Test", "roms/Division Test [Sergey Naydenov, 2010].ch8"}, {"Figures", "roms/Figures.ch8"}, {"Filter", "roms/Filter.ch8"}, {"Fishie", "roms/Fishie [Hap, 2005].ch8"}, {"Framed", "roms/Framed MK1 [GV Samways, 1980].ch8"}, {"Framed2", "roms/Framed MK2 [GV Samways, 1980].ch8"}, {"Guess", "roms/Guess [David Winter].ch8"}, {"Hi-Lo", "roms/Hi-Lo [Jef Winsor, 1978].ch8"}, {"Hidden", "roms/Hidden [David Winter, 1996].ch8"}, {"IBM Logo", "roms/IBM Logo.ch8"}, {"Jumping X", "roms/Jumping X and O [Harry Kleinberg, 1977].ch8"}, {"Kaleidoscope", "roms/Kaleidoscope [Joseph Weisbecker, 1978].ch8"}, {"Keypad Test", "roms/Keypad Test [Hap, 2006].ch8"}, {"Landing", "roms/Landing.ch8"}, {"Life", "roms/Life [GV Samways, 1980].ch8"}, {"Lunar Lander", "roms/Lunar Lander (Udo Pernisz, 1979).ch8"}, {"Mastermind FourRow", "roms/Mastermind FourRow (Robert Lindley, 1978).ch8"}, {"Maze", "roms/Maze [David Winter, 199x].ch8"}, {"Merlin", "roms/Merlin [David Winter].ch8"}, {"Minimal", "roms/Minimal game [Revival Studios, 2007].ch8"}, {"Missile", "roms/Missile [David Winter].ch8"}, {"Most", "roms/Most Dangerous Game [Peter Maruhnic].ch8"}, {"Nim ", "roms/Nim [Carmelo Cortez, 1978].ch8"}, {"Paddles", "roms/Paddles.ch8"}, {"Particle ", "roms/Particle Demo [zeroZshadow, 2008].ch8"}, {"Pong", "roms/Pong (1 player).ch8"}, {"Pong 2", "roms/Pong 2 (Pong hack) [David Winter, 1997].ch8"}, {"Pong 3", "roms/Pong [Paul Vervalin, 1990].ch8"}, {"Programmable Spacefighters", "roms/Programmable Spacefighters [Jef Winsor].ch8"}, {"Puzzle", "roms/Puzzle.ch8"}, {"Random Number", "roms/Random Number Test [Matthew Mikolay, 2010].ch8"}, {"Reversi", "roms/Reversi [Philip Baltzer].ch8"}, {"Rocket Launch", "roms/Rocket Launch [Jonas Lindstedt].ch8"}, {"Rocket Launcher", "roms/Rocket Launcher.ch8"}, {"Rocket ", "roms/Rocket [Joseph Weisbecker, 1978].ch8"}, {"Rush Hour", "roms/Rush Hour [Hap, 2006].ch8"}, {"Russian Roulette", "roms/Russian Roulette [Carmelo Cortez, 1978].ch8"}, {"SQRT Test", "roms/SQRT Test [Sergey Naydenov, 2010].ch8"}, {"Sequence Shoot", "roms/Sequence Shoot [Joyce Weisbecker].ch8"}, {"Shooting Stars", "roms/Shooting Stars [Philip Baltzer, 1978].ch8"}, {"Sierpinski", "roms/Sierpinski [Sergey Naydenov, 2010].ch8"}, {"Slide ", "roms/Slide [Joyce Weisbecker].ch8"}, {"Soccer", "roms/Soccer.ch8"}, {"Space Flight", "roms/Space Flight.ch8"}, {"Space Intercept", "roms/Space Intercept [Joseph Weisbecker, 1978].ch8"}, {"Space Invaders", "roms/Space Invaders [David Winter].ch8"}, {"Spooky Spot", "roms/Spooky Spot [Joseph Weisbecker, 1978].ch8"}, {"Squash", "roms/Squash [David Winter].ch8"}, {"Stars", "roms/Stars [Sergey Naydenov, 2010].ch8"}, {"Submarine", "roms/Submarine [Carmelo Cortez, 1978].ch8"}, {"Sum Fun", "roms/Sum Fun [Joyce Weisbecker].ch8"}, {"Syzygy", "roms/Syzygy [Roy Trevino, 1990].ch8"}, {"Tank", "roms/Tank.ch8"}, {"Tapeworm", "roms/Tapeworm [JDR, 1999].ch8"}, {"Tetris", "roms/Tetris [Fran Dachille, 1991].ch8"}, {"Tic-Tac-Toe", "roms/Tic-Tac-Toe [David Winter].ch8"}, {"Timebomb", "roms/Timebomb.ch8"}, {"Trip8 Demo", "roms/Trip8 Demo (2008) [Revival Studios].ch8"}, {"Tron", "roms/Tron.ch8"}, {"UFO", "roms/UFO [Lutz V, 1992].ch8"}, {"Vers", "roms/Vers [JMN, 1991].ch8"}, {"Vertical Brix", "roms/Vertical Brix [Paul Robson, 1996].ch8"}, {"Wall", "roms/Wall [David Winter].ch8"}, {"Wipe Off", "roms/Wipe Off [Joseph Weisbecker].ch8"}, {"Worm V4", "roms/Worm V4 [RB-Revival Studios, 2007].ch8"}, {"X-Mirror", "roms/X-Mirror.ch8"}, {"Zero Demo", "roms/Zero Demo [zeroZshadow, 2007].ch8"}, {"ZeroPong ", "roms/ZeroPong [zeroZshadow, 2007].ch8"}, } ui := new(UI) tt, err := opentype.Parse(PressStart2P_ttf) if err != nil { log.Fatal(err) } titleFont, err := opentype.NewFace(tt, &opentype.FaceOptions{ Size: 24, DPI: 72, Hinting: font.HintingFull, }) btnFont, err := opentype.NewFace(tt, &opentype.FaceOptions{ Size: 7, DPI: 72, Hinting: font.HintingFull, }) ui.font = &titleFont cb := func(btn *Button) { log.Printf("button %s was clicked!", btn.text) if ui.oncompleted != nil { ui.oncompleted(btn.rom) } } for n, rom := range ROMS { x := n % 8 y := n / 8 ui.btns = append(ui.btns, NewButton(rom.name, &btnFont, x, y, rom, cb)) } return ui } // Workaround to create a variable to receive both UI and Chip8 object. type Scene interface { Draw(screen *ebiten.Image) Update() } type Game struct { scene Scene } func (g *Game) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) { return 640, 320 } func (g *Game) Draw(screen *ebiten.Image) { g.scene.Draw(screen) ebitenutil.DebugPrint(screen, fmt.Sprintf("%f", ebiten.CurrentTPS())) } func (g *Game) Update() error { g.scene.Update() return nil } func main() { ebiten.SetMaxTPS(800) ebiten.SetWindowSize(640, 320) ebiten.SetWindowTitle("CHIP-8") cpu := NewCpu() mem := NewMemory() vme := NewVideoMemory() f, err := os.Open("audio.mp3") if err != nil { log.Fatal(err) } audio, err := audio.NewPlayer(audio.NewContext(32000), f) if err != nil { log.Fatal(err) } log.Printf("%+v", mem) kb := NewKeyboard() ui := NewUI() c8 := Chip8{cpu, mem, vme, audio, kb} game := Game{ui} ui.oncompleted = func(rom Rom) { game.scene = &c8 err := c8.mem.Load(rom.path) if err != nil { panic(err) } } if err := ebiten.RunGame(&game); err != nil { log.Fatal(err) } }
{ cmd = Skip{} }
conditional_block
c8.go
package main import ( "fmt" "image/color" "log" "math/rand" "os" "strings" "time" "github.com/hajimehoshi/ebiten/v2" "github.com/hajimehoshi/ebiten/v2/audio" "github.com/hajimehoshi/ebiten/v2/ebitenutil" "github.com/hajimehoshi/ebiten/v2/inpututil" "github.com/hajimehoshi/ebiten/v2/text" "golang.org/x/image/font" "golang.org/x/image/font/opentype" ) const ( V_PIXELS = 32 H_PIXELS = 64 SCALE = 10 WIDTH = H_PIXELS * SCALE HEIGHT = V_PIXELS * SCALE BUTTON_WIDTH = 80 // Button width of Game Select UI BUTTON_HIGHT = 23 // Button height of Game Select UI SELECT_HIGHT = 45 // Title height of Game Select UI ) // A pixel in Chip8 console. type Pixel struct { x int y int enable bool } func (p *Pixel) image() *ebiten.Image { img := ebiten.NewImage(10, 10) if p.enable { img.Fill(color.White) } else { img.Fill(color.Black) } return img } func (p *Pixel) Draw(screen *ebiten.Image) { opts := &ebiten.DrawImageOptions{} opts.GeoM.Translate(float64(10*p.x), float64(10*p.y)) screen.DrawImage(p.image(), opts) } // Game main. type Chip8 struct { cpu *Cpu mem *Memory vme *VideoMemory audio *audio.Player kb *Keyboard } func (c8 *Chip8) Update() { c8.kb.Update() if len(c8.kb.queue) > 0 { keys := []string{} for _, key := range c8.kb.queue { keys = append(keys, fmt.Sprintf("%d", key)) } log.Printf("Unprocessed keys: %s", strings.Join(keys, " ")) } err := c8.cpu.Tick(c8.mem, c8.vme, c8.audio, c8.kb) if err != nil { log.Fatal(err) } } func (c8 *Chip8) Draw(screen *ebiten.Image) { for x := 0; x < H_PIXELS; x++ { for y := 0; y < V_PIXELS; y++ { xor := c8.vme.mem[x][y] ^ c8.vme.buf[x][y] if xor == 1 { pixel := Pixel{x, y, bytob(c8.vme.buf[x][y])} pixel.Draw(screen) } } } } func bytob(value byte) bool { if value == 1 { return true } else { return false } } type Keyboard struct { queue []uint16 } func (kb *Keyboard) Update() { // 0~9: 43~52 for _, key := range inpututil.PressedKeys() { if (key >= 43 && key <= 52) || (key >= 0 && key <= 5) { kb.queue = append(kb.queue, uint16(keytohex(key))) // log.Printf("keyPressed=%d \n", key) } } } func NewKeyboard() *Keyboard { kb := new(Keyboard) kb.queue = []uint16{} return kb } func (kb *Keyboard) Pop() *uint16 { len := len(kb.queue) if len > 0 { key := kb.queue[0] kb.queue = kb.queue[1:] return &key } else { return nil } } func (kb *Keyboard) Clear()
func keytohex(key ebiten.Key) uint16 { if key >= 43 && key <= 52 { return uint16(key) - 43 } else { return uint16(key) + 0x10 } } type Cpu struct { v [64]uint8 i uint16 stack [16]uint16 sp uint16 pc uint16 dt uint16 st uint16 rnd *rand.Rand lastd time.Time lasts time.Time } func NewCpu() *Cpu { cpu := new(Cpu) cpu.pc = 0x200 cpu.rnd = rand.New(rand.NewSource(time.Now().UnixNano())) cpu.lastd = time.Now() cpu.lasts = time.Now() return cpu } func (cpu *Cpu) rand() uint8 { return uint8(cpu.rnd.Intn(256)) } func (cpu *Cpu) Tick(mem *Memory, vme *VideoMemory, audio *audio.Player, kb *Keyboard) error { o1 := mem.buf[cpu.pc] >> 4 o2 := mem.buf[cpu.pc] & 0x0F o3 := mem.buf[cpu.pc+1] >> 4 o4 := mem.buf[cpu.pc+1] & 0x0F opcode := fmt.Sprintf("%02X%02X%02X%02X", o1, o2, o3, o4) log.Printf("Tick sp=%d pc=%d dt=%d st=%d opcode=%s", cpu.sp, cpu.pc, cpu.dt, cpu.st, opcode) nnn := (uint16(o2) << 8) + (uint16(o3) << 4) + uint16(o4) kk := (uint8(o3) << 4) + uint8(o4) x := o2 y := o3 vx := uint16(cpu.v[o2]) vy := uint16(cpu.v[o3]) xy := vx + vy var cmd Command switch o1 { case 0x0: switch o2 { case 0x0: switch o3 { case 0xE: switch o4 { case 0x0: log.Println("CLS") vme.clear() cmd = Next{} case 0xE: log.Println("00EE RET") pc := cpu.stack[cpu.sp-1] cpu.sp -= 1 cmd = Jump{pc + 2} } } default: log.Println("SYS addr") cmd = Jump{nnn} } case 0x1: log.Println("1nnn JP") cmd = Jump{nnn} case 0x2: log.Println("2nnn CALL") cpu.stack[cpu.sp] = cpu.pc cpu.sp += 1 cmd = Jump{nnn} case 0x3: log.Println("3xkk SE") if vx == uint16(kk) { cmd = Skip{} } else { cmd = Next{} } case 0x4: log.Println("4xkk SNE") if vx != uint16(kk) { cmd = Skip{} } else { cmd = Next{} } case 0x5: log.Println("5xy0 - SE") if vx == vy { cmd = Skip{} } else { cmd = Next{} } case 0x6: log.Println("6xkk - LD") cpu.v[x] = kk cmd = Next{} case 0x7: log.Println("7xkk - ADD") cpu.v[x] += kk cmd = Next{} case 0x8: switch o4 { case 0x0: log.Println("8xk0 - LD Vx, Vy") cpu.v[x] = cpu.v[y] case 0x1: log.Println("8xk1 - OR Vx, Vy") cpu.v[x] |= cpu.v[y] case 0x2: log.Println("8xk2 - AND Vx, Vy") cpu.v[x] &= cpu.v[y] case 0x3: log.Println("8xk3 - XOR Vx, Vy") cpu.v[x] ^= cpu.v[y] case 0x4: log.Println("8xk4 - ADD Vx, Vy") if xy > 0xFF { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(xy & 0xFF) case 0x5: log.Println("8xk5 - SUB Vx, Vy") if vx > vy { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(vx - vy) case 0x6: log.Println("8xk6 - SHR Vx, Vy") cpu.v[0xF] = uint8(vx & 0x1) cpu.v[x] /= 2 case 0x7: log.Println("8xk7 - SUBN Vx, Vy") if vy > vx { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(vy - vx) case 0xE: log.Println("8xkE - SHL Vx, Vy") cpu.v[0xF] = cpu.v[x] >> 7 cpu.v[x] *= 2 } cmd = Next{} case 0x9: log.Println("9xy0 - SNE") if vx != vy { cmd = Skip{} } else { cmd = Next{} } case 0xA: log.Println("Annn - LD I") cpu.i = nnn cmd = Next{} case 0xB: log.Println("Bnnn - JP") cmd = Jump{nnn + uint16(cpu.v[0])} case 0xC: log.Println("Cxkk - RND") cpu.v[x] = cpu.rand() & kk cmd = Next{} case 0xD: log.Println("DRW - Vx, Vy, nibble") n := o4 bytes := mem.buf[cpu.i : cpu.i+uint16(n)] cpu.v[0xF] = vme.draw(vx, vy, bytes) cmd = Next{} case 0xE: switch o3 { case 0x9: log.Println("Ex9E - SKP") pressed := false for true { key := kb.Pop() if key == nil { break } if vx == *key { pressed = true } } if pressed { cmd = Skip{} } else { cmd = Next{} } case 0xA: log.Println("ExA1 - SKNP") pressed := false for true { key := kb.Pop() if key == nil { break } if vx == *key { pressed = true } } if !pressed { cmd = Skip{} } else { cmd = Next{} } } case 0xF: switch o3 { case 0x0: switch o4 { case 0x7: log.Println("Fx07 - LD Vx, DT") cpu.v[x] = uint8(cpu.dt) cmd = Next{} case 0xA: log.Println("Fx0A - LD Vx, K") key := kb.Pop() if key != nil { cpu.v[x] = uint8(*key) cmd = Next{} } else { // Do nothing. } } case 0x1: switch o4 { case 0x5: log.Println("Fx15 - LD DT") cpu.dt = vx cpu.lastd = time.Now() cmd = Next{} case 0x8: log.Println("Fx18 - LD ST") cpu.st = vx cpu.lasts = time.Now() cmd = Next{} case 0xE: log.Println("Fx1E - ADD I Vx") cpu.i += vx cmd = Next{} } case 0x2: log.Println("Fx29 - LD F") cpu.i = vx * 5 cmd = Next{} case 0x3: log.Println("Fx33 - LD B") mem.buf[cpu.i] = (uint8(vx) / 100) % 10 mem.buf[cpu.i+1] = (uint8(vx) / 10) % 10 mem.buf[cpu.i+2] = uint8(vx) % 10 cmd = Next{} case 0x5: log.Println("Fx55 - LD [I]") for n := 0; n <= int(x); n++ { mem.buf[cpu.i+uint16(n)] = cpu.v[n] } cmd = Next{} case 0x6: log.Println("Fx65 - LD") for n := 0; n <= int(x); n++ { cpu.v[n] = mem.buf[cpu.i+uint16(n)] } cmd = Next{} } } if cmd != nil { cmd.exec(cpu) } now := time.Now() elapsed := now.Sub(cpu.lastd) if elapsed.Seconds() > 1.0/60 && cpu.dt > 0 { cpu.dt -= 1 cpu.lastd = now } elapsed = now.Sub(cpu.lasts) if elapsed.Seconds() > 1.0/60 && cpu.st > 0 { audio.Play() audio.Rewind() cpu.st -= 1 cpu.lasts = now } return nil } type Command interface { exec(cpu *Cpu) } type Next struct{} func (c Next) exec(cpu *Cpu) { cpu.pc += 2 } type Jump struct { addr uint16 } func (c Jump) exec(cpu *Cpu) { cpu.pc = c.addr } type Skip struct{} func (c Skip) exec(cpu *Cpu) { cpu.pc += 4 } type Memory struct { buf [0xFFF]byte // Chip-8 has 0xFFFF (4096) bytes of RAM. } func (m *Memory) Load(path string) error { f, err := os.Open(path) if err != nil { return err } n, err := f.Read(m.buf[0x200:]) log.Printf("%d bytes read from \"%s\".", n, path) return nil } func NewMemory() *Memory { m := new(Memory) // Load fontsets. m.buf = [0xFFF]byte{0xF0, 0x90, 0x90, 0x90, 0xF0, 0x20, 0x60, 0x20, 0x20, 0x70, 0xF0, 0x10, 0xF0, 0x80, 0xF0, 0xF0, 0x10, 0xF0, 0x10, 0xF0, 0x90, 0x90, 0xF0, 0x10, 0x10, 0xF0, 0x80, 0xF0, 0x10, 0xF0, 0xF0, 0x80, 0xF0, 0x90, 0xF0, 0xF0, 0x10, 0x20, 0x40, 0x40, 0xF0, 0x90, 0xF0, 0x90, 0xF0, 0xF0, 0x90, 0xF0, 0x10, 0xF0, 0xF0, 0x90, 0xF0, 0x90, 0x90, 0xE0, 0x90, 0xE0, 0x90, 0xE0, 0xF0, 0x80, 0x80, 0x80, 0xF0, 0xE0, 0x90, 0x90, 0x90, 0xE0, 0xF0, 0x80, 0xF0, 0x80, 0xF0, 0xF0, 0x80, 0xF0, 0x80, 0x80} return m } // VideoMemory implements double buffer. type VideoMemory struct { buf [H_PIXELS][V_PIXELS]byte mem [H_PIXELS][V_PIXELS]byte } func NewVideoMemory() *VideoMemory { return new(VideoMemory) } func (vme *VideoMemory) clear() { for x := 0; x < H_PIXELS; x++ { for y := 0; y < V_PIXELS; y++ { vme.buf[x][y] = 0 } } } func (vme *VideoMemory) draw(x uint16, y uint16, buf []byte) uint8 { vf := uint16(0) for i, byte := range buf { i := uint16(i) vf += vme.draw_pixcel(x, y+i, (byte>>7)&0x1) vf += vme.draw_pixcel(x+1, y+i, (byte>>6)&0x1) vf += vme.draw_pixcel(x+2, y+i, (byte>>5)&0x1) vf += vme.draw_pixcel(x+3, y+i, (byte>>4)&0x1) vf += vme.draw_pixcel(x+4, y+i, (byte>>3)&0x1) vf += vme.draw_pixcel(x+5, y+i, (byte>>2)&0x1) vf += vme.draw_pixcel(x+6, y+i, (byte>>1)&0x1) vf += vme.draw_pixcel(x+7, y+i, (byte>>0)&0x1) } if vf > 0 { return 1 } else { return 0 } } func (vme *VideoMemory) draw_pixcel(x uint16, y uint16, new byte) uint16 { var vf uint16 // Check collision. if vme.buf[x][y] == 1 && new == 1 { vf = 1 } else { vf = 0 } vme.buf[x][y] ^= new return vf } type Button struct { text string img *ebiten.Image x int y int onclicked func(*Button) font *font.Face rom Rom } func NewButton(text string, font *font.Face, x, y int, rom Rom, onclicked func(*Button)) *Button { btn := new(Button) img := ebiten.NewImage(BUTTON_WIDTH-1, BUTTON_HIGHT-1) img.Fill(color.White) btn.img = img btn.text = text btn.font = font btn.x = x btn.y = y btn.rom = rom btn.onclicked = onclicked return btn } func (btn *Button) Draw(screen *ebiten.Image) { opts := &ebiten.DrawImageOptions{} opts.GeoM.Translate(float64(btn.x*BUTTON_WIDTH), float64(btn.y*BUTTON_HIGHT)+float64(SELECT_HIGHT)) screen.DrawImage(btn.img, opts) text.Draw(screen, btn.text, *btn.font, btn.x*BUTTON_WIDTH+10, btn.y*BUTTON_HIGHT+17+SELECT_HIGHT, color.Black) } type UI struct { btns []*Button oncompleted func(rom Rom) font *font.Face } func (ui *UI) Draw(screen *ebiten.Image) { text.Draw(screen, "SELECT A GAME", *ui.font, 160, 36, color.White) for _, btn := range ui.btns { btn.Draw(screen) } } func (ui *UI) Update() { clicked := ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft) x, y := ebiten.CursorPosition() if clicked { log.Printf("Clicked: %v on (%d, %d)", clicked, x, y) for _, btn := range ui.btns { minx := btn.img.Bounds().Min.X + btn.x*BUTTON_WIDTH maxx := btn.img.Bounds().Max.X + btn.x*BUTTON_WIDTH miny := btn.img.Bounds().Min.Y + btn.y*BUTTON_HIGHT + SELECT_HIGHT maxy := btn.img.Bounds().Max.Y + btn.y*BUTTON_HIGHT + SELECT_HIGHT log.Printf("x=%d y=%d minx=%d maxx=%d miny=%d maxy=%d", x, y, minx, maxx, miny, maxy) if x >= minx && x <= maxx && y >= miny && y <= maxy { btn.onclicked(btn) } } } } type Rom struct { name string path string } func NewUI() *UI { ROMS := [90]Rom{ {"15Puzzle", "roms/15 Puzzle [Roger Ivie].ch8"}, {"Addition Problems", "roms/Addition Problems [Paul C. Moews].ch8"}, {"Airplane", "roms/Airplane.ch8"}, {"Animal Race", "roms/Animal Race [Brian Astle].ch8"}, {"Astro Dodge", "roms/Astro Dodge [Revival Studios, 2008].ch8"}, {"BMP Viewer", "roms/BMP Viewer - Hello (C8 example) [Hap, 2005].ch8"}, {"Biorhythm ", "roms/Biorhythm [Jef Winsor].ch8"}, {"Blinky", "roms/Blinky [Hans Christian Egeberg, 1991].ch8"}, {"Blitz ", "roms/Blitz [David Winter].ch8"}, {"Bowling", "roms/Bowling [Gooitzen van der Wal].ch8"}, {"Breakout", "roms/Breakout (Brix hack) [David Winter, 1997].ch8"}, {"Brick", "roms/Brick (Brix hack, 1990).ch8"}, {"Brix", "roms/Brix [Andreas Gustafsson, 1990].ch8"}, {"Cave", "roms/Cave.ch8"}, {"Chip8 Picture", "roms/Chip8 Picture.ch8"}, {"Chip8 Logo", "roms/Chip8 emulator Logo [Garstyciuks].ch8"}, {"Clock Program", "roms/Clock Program [Bill Fisher, 1981].ch8"}, {"Coin Flipping", "roms/Coin Flipping [Carmelo Cortez, 1978].ch8"}, {"Connect 4", "roms/Connect 4 [David Winter].ch8"}, {"Craps", "roms/Craps [Camerlo Cortez, 1978].ch8"}, {"Deflection", "roms/Deflection [John Fort].ch8"}, {"Delay Timer Test", "roms/Delay Timer Test [Matthew Mikolay, 2010].ch8"}, {"Division Test", "roms/Division Test [Sergey Naydenov, 2010].ch8"}, {"Figures", "roms/Figures.ch8"}, {"Filter", "roms/Filter.ch8"}, {"Fishie", "roms/Fishie [Hap, 2005].ch8"}, {"Framed", "roms/Framed MK1 [GV Samways, 1980].ch8"}, {"Framed2", "roms/Framed MK2 [GV Samways, 1980].ch8"}, {"Guess", "roms/Guess [David Winter].ch8"}, {"Hi-Lo", "roms/Hi-Lo [Jef Winsor, 1978].ch8"}, {"Hidden", "roms/Hidden [David Winter, 1996].ch8"}, {"IBM Logo", "roms/IBM Logo.ch8"}, {"Jumping X", "roms/Jumping X and O [Harry Kleinberg, 1977].ch8"}, {"Kaleidoscope", "roms/Kaleidoscope [Joseph Weisbecker, 1978].ch8"}, {"Keypad Test", "roms/Keypad Test [Hap, 2006].ch8"}, {"Landing", "roms/Landing.ch8"}, {"Life", "roms/Life [GV Samways, 1980].ch8"}, {"Lunar Lander", "roms/Lunar Lander (Udo Pernisz, 1979).ch8"}, {"Mastermind FourRow", "roms/Mastermind FourRow (Robert Lindley, 1978).ch8"}, {"Maze", "roms/Maze [David Winter, 199x].ch8"}, {"Merlin", "roms/Merlin [David Winter].ch8"}, {"Minimal", "roms/Minimal game [Revival Studios, 2007].ch8"}, {"Missile", "roms/Missile [David Winter].ch8"}, {"Most", "roms/Most Dangerous Game [Peter Maruhnic].ch8"}, {"Nim ", "roms/Nim [Carmelo Cortez, 1978].ch8"}, {"Paddles", "roms/Paddles.ch8"}, {"Particle ", "roms/Particle Demo [zeroZshadow, 2008].ch8"}, {"Pong", "roms/Pong (1 player).ch8"}, {"Pong 2", "roms/Pong 2 (Pong hack) [David Winter, 1997].ch8"}, {"Pong 3", "roms/Pong [Paul Vervalin, 1990].ch8"}, {"Programmable Spacefighters", "roms/Programmable Spacefighters [Jef Winsor].ch8"}, {"Puzzle", "roms/Puzzle.ch8"}, {"Random Number", "roms/Random Number Test [Matthew Mikolay, 2010].ch8"}, {"Reversi", "roms/Reversi [Philip Baltzer].ch8"}, {"Rocket Launch", "roms/Rocket Launch [Jonas Lindstedt].ch8"}, {"Rocket Launcher", "roms/Rocket Launcher.ch8"}, {"Rocket ", "roms/Rocket [Joseph Weisbecker, 1978].ch8"}, {"Rush Hour", "roms/Rush Hour [Hap, 2006].ch8"}, {"Russian Roulette", "roms/Russian Roulette [Carmelo Cortez, 1978].ch8"}, {"SQRT Test", "roms/SQRT Test [Sergey Naydenov, 2010].ch8"}, {"Sequence Shoot", "roms/Sequence Shoot [Joyce Weisbecker].ch8"}, {"Shooting Stars", "roms/Shooting Stars [Philip Baltzer, 1978].ch8"}, {"Sierpinski", "roms/Sierpinski [Sergey Naydenov, 2010].ch8"}, {"Slide ", "roms/Slide [Joyce Weisbecker].ch8"}, {"Soccer", "roms/Soccer.ch8"}, {"Space Flight", "roms/Space Flight.ch8"}, {"Space Intercept", "roms/Space Intercept [Joseph Weisbecker, 1978].ch8"}, {"Space Invaders", "roms/Space Invaders [David Winter].ch8"}, {"Spooky Spot", "roms/Spooky Spot [Joseph Weisbecker, 1978].ch8"}, {"Squash", "roms/Squash [David Winter].ch8"}, {"Stars", "roms/Stars [Sergey Naydenov, 2010].ch8"}, {"Submarine", "roms/Submarine [Carmelo Cortez, 1978].ch8"}, {"Sum Fun", "roms/Sum Fun [Joyce Weisbecker].ch8"}, {"Syzygy", "roms/Syzygy [Roy Trevino, 1990].ch8"}, {"Tank", "roms/Tank.ch8"}, {"Tapeworm", "roms/Tapeworm [JDR, 1999].ch8"}, {"Tetris", "roms/Tetris [Fran Dachille, 1991].ch8"}, {"Tic-Tac-Toe", "roms/Tic-Tac-Toe [David Winter].ch8"}, {"Timebomb", "roms/Timebomb.ch8"}, {"Trip8 Demo", "roms/Trip8 Demo (2008) [Revival Studios].ch8"}, {"Tron", "roms/Tron.ch8"}, {"UFO", "roms/UFO [Lutz V, 1992].ch8"}, {"Vers", "roms/Vers [JMN, 1991].ch8"}, {"Vertical Brix", "roms/Vertical Brix [Paul Robson, 1996].ch8"}, {"Wall", "roms/Wall [David Winter].ch8"}, {"Wipe Off", "roms/Wipe Off [Joseph Weisbecker].ch8"}, {"Worm V4", "roms/Worm V4 [RB-Revival Studios, 2007].ch8"}, {"X-Mirror", "roms/X-Mirror.ch8"}, {"Zero Demo", "roms/Zero Demo [zeroZshadow, 2007].ch8"}, {"ZeroPong ", "roms/ZeroPong [zeroZshadow, 2007].ch8"}, } ui := new(UI) tt, err := opentype.Parse(PressStart2P_ttf) if err != nil { log.Fatal(err) } titleFont, err := opentype.NewFace(tt, &opentype.FaceOptions{ Size: 24, DPI: 72, Hinting: font.HintingFull, }) btnFont, err := opentype.NewFace(tt, &opentype.FaceOptions{ Size: 7, DPI: 72, Hinting: font.HintingFull, }) ui.font = &titleFont cb := func(btn *Button) { log.Printf("button %s was clicked!", btn.text) if ui.oncompleted != nil { ui.oncompleted(btn.rom) } } for n, rom := range ROMS { x := n % 8 y := n / 8 ui.btns = append(ui.btns, NewButton(rom.name, &btnFont, x, y, rom, cb)) } return ui } // Workaround to create a variable to receive both UI and Chip8 object. type Scene interface { Draw(screen *ebiten.Image) Update() } type Game struct { scene Scene } func (g *Game) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) { return 640, 320 } func (g *Game) Draw(screen *ebiten.Image) { g.scene.Draw(screen) ebitenutil.DebugPrint(screen, fmt.Sprintf("%f", ebiten.CurrentTPS())) } func (g *Game) Update() error { g.scene.Update() return nil } func main() { ebiten.SetMaxTPS(800) ebiten.SetWindowSize(640, 320) ebiten.SetWindowTitle("CHIP-8") cpu := NewCpu() mem := NewMemory() vme := NewVideoMemory() f, err := os.Open("audio.mp3") if err != nil { log.Fatal(err) } audio, err := audio.NewPlayer(audio.NewContext(32000), f) if err != nil { log.Fatal(err) } log.Printf("%+v", mem) kb := NewKeyboard() ui := NewUI() c8 := Chip8{cpu, mem, vme, audio, kb} game := Game{ui} ui.oncompleted = func(rom Rom) { game.scene = &c8 err := c8.mem.Load(rom.path) if err != nil { panic(err) } } if err := ebiten.RunGame(&game); err != nil { log.Fatal(err) } }
{ kb.queue = []uint16{} }
identifier_body
c8.go
package main import ( "fmt" "image/color" "log" "math/rand" "os" "strings" "time" "github.com/hajimehoshi/ebiten/v2" "github.com/hajimehoshi/ebiten/v2/audio" "github.com/hajimehoshi/ebiten/v2/ebitenutil" "github.com/hajimehoshi/ebiten/v2/inpututil" "github.com/hajimehoshi/ebiten/v2/text" "golang.org/x/image/font" "golang.org/x/image/font/opentype" ) const ( V_PIXELS = 32 H_PIXELS = 64 SCALE = 10 WIDTH = H_PIXELS * SCALE HEIGHT = V_PIXELS * SCALE BUTTON_WIDTH = 80 // Button width of Game Select UI BUTTON_HIGHT = 23 // Button height of Game Select UI SELECT_HIGHT = 45 // Title height of Game Select UI ) // A pixel in Chip8 console. type Pixel struct { x int y int enable bool } func (p *Pixel) image() *ebiten.Image { img := ebiten.NewImage(10, 10) if p.enable { img.Fill(color.White) } else { img.Fill(color.Black) } return img } func (p *Pixel) Draw(screen *ebiten.Image) { opts := &ebiten.DrawImageOptions{} opts.GeoM.Translate(float64(10*p.x), float64(10*p.y)) screen.DrawImage(p.image(), opts) } // Game main. type Chip8 struct { cpu *Cpu mem *Memory vme *VideoMemory audio *audio.Player kb *Keyboard } func (c8 *Chip8) Update() { c8.kb.Update() if len(c8.kb.queue) > 0 { keys := []string{} for _, key := range c8.kb.queue { keys = append(keys, fmt.Sprintf("%d", key)) } log.Printf("Unprocessed keys: %s", strings.Join(keys, " ")) } err := c8.cpu.Tick(c8.mem, c8.vme, c8.audio, c8.kb) if err != nil { log.Fatal(err) } } func (c8 *Chip8) Draw(screen *ebiten.Image) { for x := 0; x < H_PIXELS; x++ { for y := 0; y < V_PIXELS; y++ { xor := c8.vme.mem[x][y] ^ c8.vme.buf[x][y] if xor == 1 { pixel := Pixel{x, y, bytob(c8.vme.buf[x][y])} pixel.Draw(screen) } } } } func bytob(value byte) bool { if value == 1 { return true } else { return false } } type Keyboard struct { queue []uint16 } func (kb *Keyboard) Update() { // 0~9: 43~52 for _, key := range inpututil.PressedKeys() { if (key >= 43 && key <= 52) || (key >= 0 && key <= 5) { kb.queue = append(kb.queue, uint16(keytohex(key))) // log.Printf("keyPressed=%d \n", key) } } } func NewKeyboard() *Keyboard { kb := new(Keyboard) kb.queue = []uint16{} return kb } func (kb *Keyboard) Pop() *uint16 { len := len(kb.queue) if len > 0 { key := kb.queue[0] kb.queue = kb.queue[1:] return &key } else { return nil } } func (kb *Keyboard) Clear() { kb.queue = []uint16{} } func keytohex(key ebiten.Key) uint16 { if key >= 43 && key <= 52 { return uint16(key) - 43 } else { return uint16(key) + 0x10 } } type Cpu struct { v [64]uint8 i uint16 stack [16]uint16 sp uint16 pc uint16 dt uint16 st uint16 rnd *rand.Rand lastd time.Time lasts time.Time } func NewCpu() *Cpu { cpu := new(Cpu) cpu.pc = 0x200 cpu.rnd = rand.New(rand.NewSource(time.Now().UnixNano())) cpu.lastd = time.Now() cpu.lasts = time.Now() return cpu } func (cpu *Cpu) rand() uint8 { return uint8(cpu.rnd.Intn(256)) } func (cpu *Cpu) Tick(mem *Memory, vme *VideoMemory, audio *audio.Player, kb *Keyboard) error { o1 := mem.buf[cpu.pc] >> 4 o2 := mem.buf[cpu.pc] & 0x0F o3 := mem.buf[cpu.pc+1] >> 4 o4 := mem.buf[cpu.pc+1] & 0x0F opcode := fmt.Sprintf("%02X%02X%02X%02X", o1, o2, o3, o4) log.Printf("Tick sp=%d pc=%d dt=%d st=%d opcode=%s", cpu.sp, cpu.pc, cpu.dt, cpu.st, opcode) nnn := (uint16(o2) << 8) + (uint16(o3) << 4) + uint16(o4) kk := (uint8(o3) << 4) + uint8(o4) x := o2 y := o3 vx := uint16(cpu.v[o2]) vy := uint16(cpu.v[o3]) xy := vx + vy var cmd Command switch o1 { case 0x0: switch o2 { case 0x0: switch o3 { case 0xE: switch o4 { case 0x0: log.Println("CLS") vme.clear() cmd = Next{} case 0xE: log.Println("00EE RET") pc := cpu.stack[cpu.sp-1] cpu.sp -= 1 cmd = Jump{pc + 2} } } default: log.Println("SYS addr") cmd = Jump{nnn} } case 0x1: log.Println("1nnn JP") cmd = Jump{nnn} case 0x2: log.Println("2nnn CALL") cpu.stack[cpu.sp] = cpu.pc cpu.sp += 1 cmd = Jump{nnn} case 0x3: log.Println("3xkk SE") if vx == uint16(kk) { cmd = Skip{} } else { cmd = Next{} } case 0x4: log.Println("4xkk SNE") if vx != uint16(kk) { cmd = Skip{} } else { cmd = Next{} } case 0x5: log.Println("5xy0 - SE") if vx == vy { cmd = Skip{} } else { cmd = Next{} } case 0x6: log.Println("6xkk - LD") cpu.v[x] = kk cmd = Next{} case 0x7: log.Println("7xkk - ADD") cpu.v[x] += kk cmd = Next{} case 0x8: switch o4 { case 0x0: log.Println("8xk0 - LD Vx, Vy") cpu.v[x] = cpu.v[y] case 0x1: log.Println("8xk1 - OR Vx, Vy") cpu.v[x] |= cpu.v[y] case 0x2: log.Println("8xk2 - AND Vx, Vy") cpu.v[x] &= cpu.v[y] case 0x3: log.Println("8xk3 - XOR Vx, Vy") cpu.v[x] ^= cpu.v[y] case 0x4: log.Println("8xk4 - ADD Vx, Vy") if xy > 0xFF { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(xy & 0xFF) case 0x5: log.Println("8xk5 - SUB Vx, Vy") if vx > vy { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(vx - vy) case 0x6: log.Println("8xk6 - SHR Vx, Vy") cpu.v[0xF] = uint8(vx & 0x1) cpu.v[x] /= 2 case 0x7: log.Println("8xk7 - SUBN Vx, Vy") if vy > vx { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(vy - vx) case 0xE: log.Println("8xkE - SHL Vx, Vy") cpu.v[0xF] = cpu.v[x] >> 7 cpu.v[x] *= 2 } cmd = Next{} case 0x9: log.Println("9xy0 - SNE") if vx != vy { cmd = Skip{} } else { cmd = Next{} } case 0xA: log.Println("Annn - LD I") cpu.i = nnn cmd = Next{} case 0xB: log.Println("Bnnn - JP") cmd = Jump{nnn + uint16(cpu.v[0])} case 0xC: log.Println("Cxkk - RND") cpu.v[x] = cpu.rand() & kk cmd = Next{} case 0xD: log.Println("DRW - Vx, Vy, nibble") n := o4 bytes := mem.buf[cpu.i : cpu.i+uint16(n)] cpu.v[0xF] = vme.draw(vx, vy, bytes) cmd = Next{} case 0xE: switch o3 { case 0x9: log.Println("Ex9E - SKP") pressed := false for true { key := kb.Pop() if key == nil { break } if vx == *key { pressed = true } } if pressed { cmd = Skip{} } else { cmd = Next{} } case 0xA: log.Println("ExA1 - SKNP") pressed := false for true { key := kb.Pop() if key == nil { break } if vx == *key { pressed = true } } if !pressed { cmd = Skip{} } else { cmd = Next{} } } case 0xF: switch o3 { case 0x0: switch o4 { case 0x7: log.Println("Fx07 - LD Vx, DT") cpu.v[x] = uint8(cpu.dt) cmd = Next{} case 0xA: log.Println("Fx0A - LD Vx, K") key := kb.Pop() if key != nil { cpu.v[x] = uint8(*key) cmd = Next{} } else { // Do nothing. } } case 0x1: switch o4 { case 0x5: log.Println("Fx15 - LD DT") cpu.dt = vx cpu.lastd = time.Now() cmd = Next{} case 0x8: log.Println("Fx18 - LD ST") cpu.st = vx cpu.lasts = time.Now() cmd = Next{} case 0xE: log.Println("Fx1E - ADD I Vx") cpu.i += vx cmd = Next{} } case 0x2: log.Println("Fx29 - LD F") cpu.i = vx * 5 cmd = Next{} case 0x3: log.Println("Fx33 - LD B") mem.buf[cpu.i] = (uint8(vx) / 100) % 10 mem.buf[cpu.i+1] = (uint8(vx) / 10) % 10 mem.buf[cpu.i+2] = uint8(vx) % 10 cmd = Next{} case 0x5: log.Println("Fx55 - LD [I]") for n := 0; n <= int(x); n++ { mem.buf[cpu.i+uint16(n)] = cpu.v[n] } cmd = Next{} case 0x6: log.Println("Fx65 - LD") for n := 0; n <= int(x); n++ { cpu.v[n] = mem.buf[cpu.i+uint16(n)] } cmd = Next{} } } if cmd != nil { cmd.exec(cpu) } now := time.Now() elapsed := now.Sub(cpu.lastd) if elapsed.Seconds() > 1.0/60 && cpu.dt > 0 { cpu.dt -= 1 cpu.lastd = now } elapsed = now.Sub(cpu.lasts) if elapsed.Seconds() > 1.0/60 && cpu.st > 0 { audio.Play() audio.Rewind() cpu.st -= 1 cpu.lasts = now } return nil } type Command interface { exec(cpu *Cpu) } type Next struct{} func (c Next) exec(cpu *Cpu) { cpu.pc += 2 } type Jump struct { addr uint16 } func (c Jump) exec(cpu *Cpu) { cpu.pc = c.addr } type Skip struct{} func (c Skip) exec(cpu *Cpu) { cpu.pc += 4 } type Memory struct { buf [0xFFF]byte // Chip-8 has 0xFFFF (4096) bytes of RAM. } func (m *Memory) Load(path string) error { f, err := os.Open(path) if err != nil { return err } n, err := f.Read(m.buf[0x200:]) log.Printf("%d bytes read from \"%s\".", n, path) return nil } func NewMemory() *Memory { m := new(Memory) // Load fontsets. m.buf = [0xFFF]byte{0xF0, 0x90, 0x90, 0x90, 0xF0, 0x20, 0x60, 0x20, 0x20, 0x70, 0xF0, 0x10, 0xF0, 0x80, 0xF0, 0xF0, 0x10, 0xF0, 0x10, 0xF0, 0x90, 0x90, 0xF0, 0x10, 0x10, 0xF0, 0x80, 0xF0, 0x10, 0xF0, 0xF0, 0x80, 0xF0, 0x90, 0xF0, 0xF0, 0x10, 0x20, 0x40, 0x40, 0xF0, 0x90, 0xF0, 0x90, 0xF0, 0xF0, 0x90, 0xF0, 0x10, 0xF0, 0xF0, 0x90, 0xF0, 0x90, 0x90, 0xE0, 0x90, 0xE0, 0x90, 0xE0, 0xF0, 0x80, 0x80, 0x80, 0xF0, 0xE0, 0x90, 0x90, 0x90, 0xE0, 0xF0, 0x80, 0xF0, 0x80, 0xF0, 0xF0, 0x80, 0xF0, 0x80, 0x80} return m } // VideoMemory implements double buffer. type VideoMemory struct { buf [H_PIXELS][V_PIXELS]byte mem [H_PIXELS][V_PIXELS]byte } func NewVideoMemory() *VideoMemory { return new(VideoMemory) } func (vme *VideoMemory) clear() { for x := 0; x < H_PIXELS; x++ { for y := 0; y < V_PIXELS; y++ { vme.buf[x][y] = 0 } } } func (vme *VideoMemory) draw(x uint16, y uint16, buf []byte) uint8 { vf := uint16(0) for i, byte := range buf { i := uint16(i) vf += vme.draw_pixcel(x, y+i, (byte>>7)&0x1) vf += vme.draw_pixcel(x+1, y+i, (byte>>6)&0x1) vf += vme.draw_pixcel(x+2, y+i, (byte>>5)&0x1) vf += vme.draw_pixcel(x+3, y+i, (byte>>4)&0x1) vf += vme.draw_pixcel(x+4, y+i, (byte>>3)&0x1) vf += vme.draw_pixcel(x+5, y+i, (byte>>2)&0x1) vf += vme.draw_pixcel(x+6, y+i, (byte>>1)&0x1) vf += vme.draw_pixcel(x+7, y+i, (byte>>0)&0x1) } if vf > 0 { return 1 } else { return 0 } } func (vme *VideoMemory) draw_pixcel(x uint16, y uint16, new byte) uint16 { var vf uint16 // Check collision. if vme.buf[x][y] == 1 && new == 1 { vf = 1 } else { vf = 0 } vme.buf[x][y] ^= new return vf } type Button struct { text string img *ebiten.Image x int y int onclicked func(*Button) font *font.Face rom Rom } func NewButton(text string, font *font.Face, x, y int, rom Rom, onclicked func(*Button)) *Button { btn := new(Button) img := ebiten.NewImage(BUTTON_WIDTH-1, BUTTON_HIGHT-1) img.Fill(color.White) btn.img = img btn.text = text btn.font = font btn.x = x btn.y = y btn.rom = rom btn.onclicked = onclicked return btn } func (btn *Button) Draw(screen *ebiten.Image) { opts := &ebiten.DrawImageOptions{} opts.GeoM.Translate(float64(btn.x*BUTTON_WIDTH), float64(btn.y*BUTTON_HIGHT)+float64(SELECT_HIGHT)) screen.DrawImage(btn.img, opts) text.Draw(screen, btn.text, *btn.font, btn.x*BUTTON_WIDTH+10, btn.y*BUTTON_HIGHT+17+SELECT_HIGHT, color.Black) } type UI struct { btns []*Button oncompleted func(rom Rom) font *font.Face } func (ui *UI) Draw(screen *ebiten.Image) { text.Draw(screen, "SELECT A GAME", *ui.font, 160, 36, color.White) for _, btn := range ui.btns { btn.Draw(screen) } } func (ui *UI) Update() { clicked := ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft) x, y := ebiten.CursorPosition() if clicked { log.Printf("Clicked: %v on (%d, %d)", clicked, x, y) for _, btn := range ui.btns { minx := btn.img.Bounds().Min.X + btn.x*BUTTON_WIDTH maxx := btn.img.Bounds().Max.X + btn.x*BUTTON_WIDTH miny := btn.img.Bounds().Min.Y + btn.y*BUTTON_HIGHT + SELECT_HIGHT maxy := btn.img.Bounds().Max.Y + btn.y*BUTTON_HIGHT + SELECT_HIGHT log.Printf("x=%d y=%d minx=%d maxx=%d miny=%d maxy=%d", x, y, minx, maxx, miny, maxy) if x >= minx && x <= maxx && y >= miny && y <= maxy { btn.onclicked(btn) } } } } type Rom struct { name string path string } func
() *UI { ROMS := [90]Rom{ {"15Puzzle", "roms/15 Puzzle [Roger Ivie].ch8"}, {"Addition Problems", "roms/Addition Problems [Paul C. Moews].ch8"}, {"Airplane", "roms/Airplane.ch8"}, {"Animal Race", "roms/Animal Race [Brian Astle].ch8"}, {"Astro Dodge", "roms/Astro Dodge [Revival Studios, 2008].ch8"}, {"BMP Viewer", "roms/BMP Viewer - Hello (C8 example) [Hap, 2005].ch8"}, {"Biorhythm ", "roms/Biorhythm [Jef Winsor].ch8"}, {"Blinky", "roms/Blinky [Hans Christian Egeberg, 1991].ch8"}, {"Blitz ", "roms/Blitz [David Winter].ch8"}, {"Bowling", "roms/Bowling [Gooitzen van der Wal].ch8"}, {"Breakout", "roms/Breakout (Brix hack) [David Winter, 1997].ch8"}, {"Brick", "roms/Brick (Brix hack, 1990).ch8"}, {"Brix", "roms/Brix [Andreas Gustafsson, 1990].ch8"}, {"Cave", "roms/Cave.ch8"}, {"Chip8 Picture", "roms/Chip8 Picture.ch8"}, {"Chip8 Logo", "roms/Chip8 emulator Logo [Garstyciuks].ch8"}, {"Clock Program", "roms/Clock Program [Bill Fisher, 1981].ch8"}, {"Coin Flipping", "roms/Coin Flipping [Carmelo Cortez, 1978].ch8"}, {"Connect 4", "roms/Connect 4 [David Winter].ch8"}, {"Craps", "roms/Craps [Camerlo Cortez, 1978].ch8"}, {"Deflection", "roms/Deflection [John Fort].ch8"}, {"Delay Timer Test", "roms/Delay Timer Test [Matthew Mikolay, 2010].ch8"}, {"Division Test", "roms/Division Test [Sergey Naydenov, 2010].ch8"}, {"Figures", "roms/Figures.ch8"}, {"Filter", "roms/Filter.ch8"}, {"Fishie", "roms/Fishie [Hap, 2005].ch8"}, {"Framed", "roms/Framed MK1 [GV Samways, 1980].ch8"}, {"Framed2", "roms/Framed MK2 [GV Samways, 1980].ch8"}, {"Guess", "roms/Guess [David Winter].ch8"}, {"Hi-Lo", "roms/Hi-Lo [Jef Winsor, 1978].ch8"}, {"Hidden", "roms/Hidden [David Winter, 1996].ch8"}, {"IBM Logo", "roms/IBM Logo.ch8"}, {"Jumping X", "roms/Jumping X and O [Harry Kleinberg, 1977].ch8"}, {"Kaleidoscope", "roms/Kaleidoscope [Joseph Weisbecker, 1978].ch8"}, {"Keypad Test", "roms/Keypad Test [Hap, 2006].ch8"}, {"Landing", "roms/Landing.ch8"}, {"Life", "roms/Life [GV Samways, 1980].ch8"}, {"Lunar Lander", "roms/Lunar Lander (Udo Pernisz, 1979).ch8"}, {"Mastermind FourRow", "roms/Mastermind FourRow (Robert Lindley, 1978).ch8"}, {"Maze", "roms/Maze [David Winter, 199x].ch8"}, {"Merlin", "roms/Merlin [David Winter].ch8"}, {"Minimal", "roms/Minimal game [Revival Studios, 2007].ch8"}, {"Missile", "roms/Missile [David Winter].ch8"}, {"Most", "roms/Most Dangerous Game [Peter Maruhnic].ch8"}, {"Nim ", "roms/Nim [Carmelo Cortez, 1978].ch8"}, {"Paddles", "roms/Paddles.ch8"}, {"Particle ", "roms/Particle Demo [zeroZshadow, 2008].ch8"}, {"Pong", "roms/Pong (1 player).ch8"}, {"Pong 2", "roms/Pong 2 (Pong hack) [David Winter, 1997].ch8"}, {"Pong 3", "roms/Pong [Paul Vervalin, 1990].ch8"}, {"Programmable Spacefighters", "roms/Programmable Spacefighters [Jef Winsor].ch8"}, {"Puzzle", "roms/Puzzle.ch8"}, {"Random Number", "roms/Random Number Test [Matthew Mikolay, 2010].ch8"}, {"Reversi", "roms/Reversi [Philip Baltzer].ch8"}, {"Rocket Launch", "roms/Rocket Launch [Jonas Lindstedt].ch8"}, {"Rocket Launcher", "roms/Rocket Launcher.ch8"}, {"Rocket ", "roms/Rocket [Joseph Weisbecker, 1978].ch8"}, {"Rush Hour", "roms/Rush Hour [Hap, 2006].ch8"}, {"Russian Roulette", "roms/Russian Roulette [Carmelo Cortez, 1978].ch8"}, {"SQRT Test", "roms/SQRT Test [Sergey Naydenov, 2010].ch8"}, {"Sequence Shoot", "roms/Sequence Shoot [Joyce Weisbecker].ch8"}, {"Shooting Stars", "roms/Shooting Stars [Philip Baltzer, 1978].ch8"}, {"Sierpinski", "roms/Sierpinski [Sergey Naydenov, 2010].ch8"}, {"Slide ", "roms/Slide [Joyce Weisbecker].ch8"}, {"Soccer", "roms/Soccer.ch8"}, {"Space Flight", "roms/Space Flight.ch8"}, {"Space Intercept", "roms/Space Intercept [Joseph Weisbecker, 1978].ch8"}, {"Space Invaders", "roms/Space Invaders [David Winter].ch8"}, {"Spooky Spot", "roms/Spooky Spot [Joseph Weisbecker, 1978].ch8"}, {"Squash", "roms/Squash [David Winter].ch8"}, {"Stars", "roms/Stars [Sergey Naydenov, 2010].ch8"}, {"Submarine", "roms/Submarine [Carmelo Cortez, 1978].ch8"}, {"Sum Fun", "roms/Sum Fun [Joyce Weisbecker].ch8"}, {"Syzygy", "roms/Syzygy [Roy Trevino, 1990].ch8"}, {"Tank", "roms/Tank.ch8"}, {"Tapeworm", "roms/Tapeworm [JDR, 1999].ch8"}, {"Tetris", "roms/Tetris [Fran Dachille, 1991].ch8"}, {"Tic-Tac-Toe", "roms/Tic-Tac-Toe [David Winter].ch8"}, {"Timebomb", "roms/Timebomb.ch8"}, {"Trip8 Demo", "roms/Trip8 Demo (2008) [Revival Studios].ch8"}, {"Tron", "roms/Tron.ch8"}, {"UFO", "roms/UFO [Lutz V, 1992].ch8"}, {"Vers", "roms/Vers [JMN, 1991].ch8"}, {"Vertical Brix", "roms/Vertical Brix [Paul Robson, 1996].ch8"}, {"Wall", "roms/Wall [David Winter].ch8"}, {"Wipe Off", "roms/Wipe Off [Joseph Weisbecker].ch8"}, {"Worm V4", "roms/Worm V4 [RB-Revival Studios, 2007].ch8"}, {"X-Mirror", "roms/X-Mirror.ch8"}, {"Zero Demo", "roms/Zero Demo [zeroZshadow, 2007].ch8"}, {"ZeroPong ", "roms/ZeroPong [zeroZshadow, 2007].ch8"}, } ui := new(UI) tt, err := opentype.Parse(PressStart2P_ttf) if err != nil { log.Fatal(err) } titleFont, err := opentype.NewFace(tt, &opentype.FaceOptions{ Size: 24, DPI: 72, Hinting: font.HintingFull, }) btnFont, err := opentype.NewFace(tt, &opentype.FaceOptions{ Size: 7, DPI: 72, Hinting: font.HintingFull, }) ui.font = &titleFont cb := func(btn *Button) { log.Printf("button %s was clicked!", btn.text) if ui.oncompleted != nil { ui.oncompleted(btn.rom) } } for n, rom := range ROMS { x := n % 8 y := n / 8 ui.btns = append(ui.btns, NewButton(rom.name, &btnFont, x, y, rom, cb)) } return ui } // Workaround to create a variable to receive both UI and Chip8 object. type Scene interface { Draw(screen *ebiten.Image) Update() } type Game struct { scene Scene } func (g *Game) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) { return 640, 320 } func (g *Game) Draw(screen *ebiten.Image) { g.scene.Draw(screen) ebitenutil.DebugPrint(screen, fmt.Sprintf("%f", ebiten.CurrentTPS())) } func (g *Game) Update() error { g.scene.Update() return nil } func main() { ebiten.SetMaxTPS(800) ebiten.SetWindowSize(640, 320) ebiten.SetWindowTitle("CHIP-8") cpu := NewCpu() mem := NewMemory() vme := NewVideoMemory() f, err := os.Open("audio.mp3") if err != nil { log.Fatal(err) } audio, err := audio.NewPlayer(audio.NewContext(32000), f) if err != nil { log.Fatal(err) } log.Printf("%+v", mem) kb := NewKeyboard() ui := NewUI() c8 := Chip8{cpu, mem, vme, audio, kb} game := Game{ui} ui.oncompleted = func(rom Rom) { game.scene = &c8 err := c8.mem.Load(rom.path) if err != nil { panic(err) } } if err := ebiten.RunGame(&game); err != nil { log.Fatal(err) } }
NewUI
identifier_name
c8.go
package main import ( "fmt" "image/color" "log" "math/rand" "os" "strings" "time" "github.com/hajimehoshi/ebiten/v2" "github.com/hajimehoshi/ebiten/v2/audio" "github.com/hajimehoshi/ebiten/v2/ebitenutil" "github.com/hajimehoshi/ebiten/v2/inpututil" "github.com/hajimehoshi/ebiten/v2/text" "golang.org/x/image/font" "golang.org/x/image/font/opentype" ) const ( V_PIXELS = 32 H_PIXELS = 64 SCALE = 10 WIDTH = H_PIXELS * SCALE HEIGHT = V_PIXELS * SCALE BUTTON_WIDTH = 80 // Button width of Game Select UI BUTTON_HIGHT = 23 // Button height of Game Select UI SELECT_HIGHT = 45 // Title height of Game Select UI ) // A pixel in Chip8 console. type Pixel struct { x int y int enable bool } func (p *Pixel) image() *ebiten.Image { img := ebiten.NewImage(10, 10) if p.enable { img.Fill(color.White) } else { img.Fill(color.Black) } return img } func (p *Pixel) Draw(screen *ebiten.Image) { opts := &ebiten.DrawImageOptions{} opts.GeoM.Translate(float64(10*p.x), float64(10*p.y)) screen.DrawImage(p.image(), opts) } // Game main. type Chip8 struct { cpu *Cpu mem *Memory vme *VideoMemory audio *audio.Player kb *Keyboard } func (c8 *Chip8) Update() { c8.kb.Update() if len(c8.kb.queue) > 0 { keys := []string{} for _, key := range c8.kb.queue { keys = append(keys, fmt.Sprintf("%d", key)) } log.Printf("Unprocessed keys: %s", strings.Join(keys, " ")) } err := c8.cpu.Tick(c8.mem, c8.vme, c8.audio, c8.kb) if err != nil { log.Fatal(err) } } func (c8 *Chip8) Draw(screen *ebiten.Image) { for x := 0; x < H_PIXELS; x++ { for y := 0; y < V_PIXELS; y++ { xor := c8.vme.mem[x][y] ^ c8.vme.buf[x][y] if xor == 1 { pixel := Pixel{x, y, bytob(c8.vme.buf[x][y])} pixel.Draw(screen) } } } } func bytob(value byte) bool { if value == 1 { return true } else { return false } } type Keyboard struct { queue []uint16 } func (kb *Keyboard) Update() { // 0~9: 43~52 for _, key := range inpututil.PressedKeys() { if (key >= 43 && key <= 52) || (key >= 0 && key <= 5) { kb.queue = append(kb.queue, uint16(keytohex(key))) // log.Printf("keyPressed=%d \n", key) } } } func NewKeyboard() *Keyboard { kb := new(Keyboard) kb.queue = []uint16{} return kb } func (kb *Keyboard) Pop() *uint16 { len := len(kb.queue) if len > 0 { key := kb.queue[0] kb.queue = kb.queue[1:] return &key } else { return nil } } func (kb *Keyboard) Clear() { kb.queue = []uint16{} } func keytohex(key ebiten.Key) uint16 { if key >= 43 && key <= 52 { return uint16(key) - 43 } else { return uint16(key) + 0x10 } } type Cpu struct { v [64]uint8 i uint16 stack [16]uint16 sp uint16 pc uint16 dt uint16 st uint16 rnd *rand.Rand lastd time.Time lasts time.Time } func NewCpu() *Cpu { cpu := new(Cpu) cpu.pc = 0x200 cpu.rnd = rand.New(rand.NewSource(time.Now().UnixNano())) cpu.lastd = time.Now() cpu.lasts = time.Now() return cpu } func (cpu *Cpu) rand() uint8 { return uint8(cpu.rnd.Intn(256)) } func (cpu *Cpu) Tick(mem *Memory, vme *VideoMemory, audio *audio.Player, kb *Keyboard) error { o1 := mem.buf[cpu.pc] >> 4 o2 := mem.buf[cpu.pc] & 0x0F o3 := mem.buf[cpu.pc+1] >> 4 o4 := mem.buf[cpu.pc+1] & 0x0F opcode := fmt.Sprintf("%02X%02X%02X%02X", o1, o2, o3, o4) log.Printf("Tick sp=%d pc=%d dt=%d st=%d opcode=%s", cpu.sp, cpu.pc, cpu.dt, cpu.st, opcode) nnn := (uint16(o2) << 8) + (uint16(o3) << 4) + uint16(o4) kk := (uint8(o3) << 4) + uint8(o4) x := o2 y := o3 vx := uint16(cpu.v[o2]) vy := uint16(cpu.v[o3]) xy := vx + vy var cmd Command switch o1 { case 0x0: switch o2 { case 0x0: switch o3 { case 0xE: switch o4 { case 0x0: log.Println("CLS") vme.clear() cmd = Next{} case 0xE: log.Println("00EE RET") pc := cpu.stack[cpu.sp-1] cpu.sp -= 1 cmd = Jump{pc + 2} } } default: log.Println("SYS addr") cmd = Jump{nnn} } case 0x1: log.Println("1nnn JP") cmd = Jump{nnn} case 0x2: log.Println("2nnn CALL") cpu.stack[cpu.sp] = cpu.pc cpu.sp += 1 cmd = Jump{nnn} case 0x3: log.Println("3xkk SE") if vx == uint16(kk) { cmd = Skip{} } else { cmd = Next{} } case 0x4: log.Println("4xkk SNE") if vx != uint16(kk) { cmd = Skip{} } else { cmd = Next{} } case 0x5: log.Println("5xy0 - SE") if vx == vy { cmd = Skip{} } else { cmd = Next{} } case 0x6: log.Println("6xkk - LD") cpu.v[x] = kk cmd = Next{} case 0x7: log.Println("7xkk - ADD") cpu.v[x] += kk cmd = Next{} case 0x8: switch o4 { case 0x0: log.Println("8xk0 - LD Vx, Vy") cpu.v[x] = cpu.v[y] case 0x1: log.Println("8xk1 - OR Vx, Vy") cpu.v[x] |= cpu.v[y] case 0x2: log.Println("8xk2 - AND Vx, Vy") cpu.v[x] &= cpu.v[y] case 0x3: log.Println("8xk3 - XOR Vx, Vy") cpu.v[x] ^= cpu.v[y] case 0x4: log.Println("8xk4 - ADD Vx, Vy") if xy > 0xFF { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(xy & 0xFF) case 0x5: log.Println("8xk5 - SUB Vx, Vy") if vx > vy { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(vx - vy) case 0x6: log.Println("8xk6 - SHR Vx, Vy") cpu.v[0xF] = uint8(vx & 0x1) cpu.v[x] /= 2 case 0x7: log.Println("8xk7 - SUBN Vx, Vy") if vy > vx { cpu.v[0xF] = 1 } else { cpu.v[0xF] = 0 } cpu.v[x] = uint8(vy - vx) case 0xE: log.Println("8xkE - SHL Vx, Vy") cpu.v[0xF] = cpu.v[x] >> 7 cpu.v[x] *= 2 } cmd = Next{} case 0x9: log.Println("9xy0 - SNE") if vx != vy { cmd = Skip{} } else { cmd = Next{} } case 0xA: log.Println("Annn - LD I") cpu.i = nnn cmd = Next{} case 0xB: log.Println("Bnnn - JP") cmd = Jump{nnn + uint16(cpu.v[0])} case 0xC: log.Println("Cxkk - RND") cpu.v[x] = cpu.rand() & kk cmd = Next{} case 0xD: log.Println("DRW - Vx, Vy, nibble") n := o4 bytes := mem.buf[cpu.i : cpu.i+uint16(n)] cpu.v[0xF] = vme.draw(vx, vy, bytes) cmd = Next{} case 0xE: switch o3 { case 0x9: log.Println("Ex9E - SKP") pressed := false for true { key := kb.Pop() if key == nil { break } if vx == *key { pressed = true } } if pressed { cmd = Skip{} } else { cmd = Next{} } case 0xA: log.Println("ExA1 - SKNP") pressed := false for true { key := kb.Pop() if key == nil { break } if vx == *key { pressed = true } } if !pressed { cmd = Skip{} } else { cmd = Next{} } } case 0xF: switch o3 { case 0x0: switch o4 { case 0x7: log.Println("Fx07 - LD Vx, DT") cpu.v[x] = uint8(cpu.dt) cmd = Next{} case 0xA: log.Println("Fx0A - LD Vx, K") key := kb.Pop() if key != nil { cpu.v[x] = uint8(*key) cmd = Next{} } else { // Do nothing. } } case 0x1: switch o4 { case 0x5: log.Println("Fx15 - LD DT") cpu.dt = vx cpu.lastd = time.Now() cmd = Next{} case 0x8: log.Println("Fx18 - LD ST") cpu.st = vx cpu.lasts = time.Now() cmd = Next{} case 0xE: log.Println("Fx1E - ADD I Vx") cpu.i += vx cmd = Next{} } case 0x2: log.Println("Fx29 - LD F") cpu.i = vx * 5 cmd = Next{} case 0x3: log.Println("Fx33 - LD B") mem.buf[cpu.i] = (uint8(vx) / 100) % 10 mem.buf[cpu.i+1] = (uint8(vx) / 10) % 10 mem.buf[cpu.i+2] = uint8(vx) % 10 cmd = Next{} case 0x5: log.Println("Fx55 - LD [I]") for n := 0; n <= int(x); n++ { mem.buf[cpu.i+uint16(n)] = cpu.v[n] } cmd = Next{} case 0x6: log.Println("Fx65 - LD") for n := 0; n <= int(x); n++ { cpu.v[n] = mem.buf[cpu.i+uint16(n)] } cmd = Next{} } } if cmd != nil { cmd.exec(cpu) } now := time.Now() elapsed := now.Sub(cpu.lastd) if elapsed.Seconds() > 1.0/60 && cpu.dt > 0 { cpu.dt -= 1 cpu.lastd = now } elapsed = now.Sub(cpu.lasts) if elapsed.Seconds() > 1.0/60 && cpu.st > 0 { audio.Play() audio.Rewind() cpu.st -= 1 cpu.lasts = now } return nil } type Command interface { exec(cpu *Cpu) } type Next struct{} func (c Next) exec(cpu *Cpu) { cpu.pc += 2 } type Jump struct { addr uint16 } func (c Jump) exec(cpu *Cpu) { cpu.pc = c.addr } type Skip struct{} func (c Skip) exec(cpu *Cpu) { cpu.pc += 4 } type Memory struct { buf [0xFFF]byte // Chip-8 has 0xFFFF (4096) bytes of RAM. } func (m *Memory) Load(path string) error { f, err := os.Open(path) if err != nil { return err } n, err := f.Read(m.buf[0x200:]) log.Printf("%d bytes read from \"%s\".", n, path) return nil } func NewMemory() *Memory { m := new(Memory) // Load fontsets. m.buf = [0xFFF]byte{0xF0, 0x90, 0x90, 0x90, 0xF0, 0x20, 0x60, 0x20, 0x20, 0x70, 0xF0, 0x10, 0xF0, 0x80, 0xF0, 0xF0, 0x10, 0xF0, 0x10, 0xF0, 0x90, 0x90, 0xF0, 0x10, 0x10, 0xF0, 0x80, 0xF0, 0x10, 0xF0, 0xF0, 0x80, 0xF0, 0x90, 0xF0, 0xF0, 0x10, 0x20, 0x40, 0x40, 0xF0, 0x90, 0xF0, 0x90, 0xF0, 0xF0, 0x90, 0xF0, 0x10, 0xF0, 0xF0, 0x90, 0xF0, 0x90, 0x90, 0xE0, 0x90, 0xE0, 0x90, 0xE0, 0xF0, 0x80, 0x80, 0x80, 0xF0, 0xE0, 0x90, 0x90, 0x90, 0xE0, 0xF0, 0x80, 0xF0, 0x80, 0xF0, 0xF0, 0x80, 0xF0, 0x80, 0x80} return m } // VideoMemory implements double buffer. type VideoMemory struct { buf [H_PIXELS][V_PIXELS]byte mem [H_PIXELS][V_PIXELS]byte } func NewVideoMemory() *VideoMemory { return new(VideoMemory) } func (vme *VideoMemory) clear() { for x := 0; x < H_PIXELS; x++ { for y := 0; y < V_PIXELS; y++ { vme.buf[x][y] = 0 } } } func (vme *VideoMemory) draw(x uint16, y uint16, buf []byte) uint8 { vf := uint16(0) for i, byte := range buf { i := uint16(i) vf += vme.draw_pixcel(x, y+i, (byte>>7)&0x1) vf += vme.draw_pixcel(x+1, y+i, (byte>>6)&0x1) vf += vme.draw_pixcel(x+2, y+i, (byte>>5)&0x1) vf += vme.draw_pixcel(x+3, y+i, (byte>>4)&0x1) vf += vme.draw_pixcel(x+4, y+i, (byte>>3)&0x1) vf += vme.draw_pixcel(x+5, y+i, (byte>>2)&0x1) vf += vme.draw_pixcel(x+6, y+i, (byte>>1)&0x1) vf += vme.draw_pixcel(x+7, y+i, (byte>>0)&0x1) } if vf > 0 { return 1 } else { return 0 } } func (vme *VideoMemory) draw_pixcel(x uint16, y uint16, new byte) uint16 { var vf uint16
if vme.buf[x][y] == 1 && new == 1 { vf = 1 } else { vf = 0 } vme.buf[x][y] ^= new return vf } type Button struct { text string img *ebiten.Image x int y int onclicked func(*Button) font *font.Face rom Rom } func NewButton(text string, font *font.Face, x, y int, rom Rom, onclicked func(*Button)) *Button { btn := new(Button) img := ebiten.NewImage(BUTTON_WIDTH-1, BUTTON_HIGHT-1) img.Fill(color.White) btn.img = img btn.text = text btn.font = font btn.x = x btn.y = y btn.rom = rom btn.onclicked = onclicked return btn } func (btn *Button) Draw(screen *ebiten.Image) { opts := &ebiten.DrawImageOptions{} opts.GeoM.Translate(float64(btn.x*BUTTON_WIDTH), float64(btn.y*BUTTON_HIGHT)+float64(SELECT_HIGHT)) screen.DrawImage(btn.img, opts) text.Draw(screen, btn.text, *btn.font, btn.x*BUTTON_WIDTH+10, btn.y*BUTTON_HIGHT+17+SELECT_HIGHT, color.Black) } type UI struct { btns []*Button oncompleted func(rom Rom) font *font.Face } func (ui *UI) Draw(screen *ebiten.Image) { text.Draw(screen, "SELECT A GAME", *ui.font, 160, 36, color.White) for _, btn := range ui.btns { btn.Draw(screen) } } func (ui *UI) Update() { clicked := ebiten.IsMouseButtonPressed(ebiten.MouseButtonLeft) x, y := ebiten.CursorPosition() if clicked { log.Printf("Clicked: %v on (%d, %d)", clicked, x, y) for _, btn := range ui.btns { minx := btn.img.Bounds().Min.X + btn.x*BUTTON_WIDTH maxx := btn.img.Bounds().Max.X + btn.x*BUTTON_WIDTH miny := btn.img.Bounds().Min.Y + btn.y*BUTTON_HIGHT + SELECT_HIGHT maxy := btn.img.Bounds().Max.Y + btn.y*BUTTON_HIGHT + SELECT_HIGHT log.Printf("x=%d y=%d minx=%d maxx=%d miny=%d maxy=%d", x, y, minx, maxx, miny, maxy) if x >= minx && x <= maxx && y >= miny && y <= maxy { btn.onclicked(btn) } } } } type Rom struct { name string path string } func NewUI() *UI { ROMS := [90]Rom{ {"15Puzzle", "roms/15 Puzzle [Roger Ivie].ch8"}, {"Addition Problems", "roms/Addition Problems [Paul C. Moews].ch8"}, {"Airplane", "roms/Airplane.ch8"}, {"Animal Race", "roms/Animal Race [Brian Astle].ch8"}, {"Astro Dodge", "roms/Astro Dodge [Revival Studios, 2008].ch8"}, {"BMP Viewer", "roms/BMP Viewer - Hello (C8 example) [Hap, 2005].ch8"}, {"Biorhythm ", "roms/Biorhythm [Jef Winsor].ch8"}, {"Blinky", "roms/Blinky [Hans Christian Egeberg, 1991].ch8"}, {"Blitz ", "roms/Blitz [David Winter].ch8"}, {"Bowling", "roms/Bowling [Gooitzen van der Wal].ch8"}, {"Breakout", "roms/Breakout (Brix hack) [David Winter, 1997].ch8"}, {"Brick", "roms/Brick (Brix hack, 1990).ch8"}, {"Brix", "roms/Brix [Andreas Gustafsson, 1990].ch8"}, {"Cave", "roms/Cave.ch8"}, {"Chip8 Picture", "roms/Chip8 Picture.ch8"}, {"Chip8 Logo", "roms/Chip8 emulator Logo [Garstyciuks].ch8"}, {"Clock Program", "roms/Clock Program [Bill Fisher, 1981].ch8"}, {"Coin Flipping", "roms/Coin Flipping [Carmelo Cortez, 1978].ch8"}, {"Connect 4", "roms/Connect 4 [David Winter].ch8"}, {"Craps", "roms/Craps [Camerlo Cortez, 1978].ch8"}, {"Deflection", "roms/Deflection [John Fort].ch8"}, {"Delay Timer Test", "roms/Delay Timer Test [Matthew Mikolay, 2010].ch8"}, {"Division Test", "roms/Division Test [Sergey Naydenov, 2010].ch8"}, {"Figures", "roms/Figures.ch8"}, {"Filter", "roms/Filter.ch8"}, {"Fishie", "roms/Fishie [Hap, 2005].ch8"}, {"Framed", "roms/Framed MK1 [GV Samways, 1980].ch8"}, {"Framed2", "roms/Framed MK2 [GV Samways, 1980].ch8"}, {"Guess", "roms/Guess [David Winter].ch8"}, {"Hi-Lo", "roms/Hi-Lo [Jef Winsor, 1978].ch8"}, {"Hidden", "roms/Hidden [David Winter, 1996].ch8"}, {"IBM Logo", "roms/IBM Logo.ch8"}, {"Jumping X", "roms/Jumping X and O [Harry Kleinberg, 1977].ch8"}, {"Kaleidoscope", "roms/Kaleidoscope [Joseph Weisbecker, 1978].ch8"}, {"Keypad Test", "roms/Keypad Test [Hap, 2006].ch8"}, {"Landing", "roms/Landing.ch8"}, {"Life", "roms/Life [GV Samways, 1980].ch8"}, {"Lunar Lander", "roms/Lunar Lander (Udo Pernisz, 1979).ch8"}, {"Mastermind FourRow", "roms/Mastermind FourRow (Robert Lindley, 1978).ch8"}, {"Maze", "roms/Maze [David Winter, 199x].ch8"}, {"Merlin", "roms/Merlin [David Winter].ch8"}, {"Minimal", "roms/Minimal game [Revival Studios, 2007].ch8"}, {"Missile", "roms/Missile [David Winter].ch8"}, {"Most", "roms/Most Dangerous Game [Peter Maruhnic].ch8"}, {"Nim ", "roms/Nim [Carmelo Cortez, 1978].ch8"}, {"Paddles", "roms/Paddles.ch8"}, {"Particle ", "roms/Particle Demo [zeroZshadow, 2008].ch8"}, {"Pong", "roms/Pong (1 player).ch8"}, {"Pong 2", "roms/Pong 2 (Pong hack) [David Winter, 1997].ch8"}, {"Pong 3", "roms/Pong [Paul Vervalin, 1990].ch8"}, {"Programmable Spacefighters", "roms/Programmable Spacefighters [Jef Winsor].ch8"}, {"Puzzle", "roms/Puzzle.ch8"}, {"Random Number", "roms/Random Number Test [Matthew Mikolay, 2010].ch8"}, {"Reversi", "roms/Reversi [Philip Baltzer].ch8"}, {"Rocket Launch", "roms/Rocket Launch [Jonas Lindstedt].ch8"}, {"Rocket Launcher", "roms/Rocket Launcher.ch8"}, {"Rocket ", "roms/Rocket [Joseph Weisbecker, 1978].ch8"}, {"Rush Hour", "roms/Rush Hour [Hap, 2006].ch8"}, {"Russian Roulette", "roms/Russian Roulette [Carmelo Cortez, 1978].ch8"}, {"SQRT Test", "roms/SQRT Test [Sergey Naydenov, 2010].ch8"}, {"Sequence Shoot", "roms/Sequence Shoot [Joyce Weisbecker].ch8"}, {"Shooting Stars", "roms/Shooting Stars [Philip Baltzer, 1978].ch8"}, {"Sierpinski", "roms/Sierpinski [Sergey Naydenov, 2010].ch8"}, {"Slide ", "roms/Slide [Joyce Weisbecker].ch8"}, {"Soccer", "roms/Soccer.ch8"}, {"Space Flight", "roms/Space Flight.ch8"}, {"Space Intercept", "roms/Space Intercept [Joseph Weisbecker, 1978].ch8"}, {"Space Invaders", "roms/Space Invaders [David Winter].ch8"}, {"Spooky Spot", "roms/Spooky Spot [Joseph Weisbecker, 1978].ch8"}, {"Squash", "roms/Squash [David Winter].ch8"}, {"Stars", "roms/Stars [Sergey Naydenov, 2010].ch8"}, {"Submarine", "roms/Submarine [Carmelo Cortez, 1978].ch8"}, {"Sum Fun", "roms/Sum Fun [Joyce Weisbecker].ch8"}, {"Syzygy", "roms/Syzygy [Roy Trevino, 1990].ch8"}, {"Tank", "roms/Tank.ch8"}, {"Tapeworm", "roms/Tapeworm [JDR, 1999].ch8"}, {"Tetris", "roms/Tetris [Fran Dachille, 1991].ch8"}, {"Tic-Tac-Toe", "roms/Tic-Tac-Toe [David Winter].ch8"}, {"Timebomb", "roms/Timebomb.ch8"}, {"Trip8 Demo", "roms/Trip8 Demo (2008) [Revival Studios].ch8"}, {"Tron", "roms/Tron.ch8"}, {"UFO", "roms/UFO [Lutz V, 1992].ch8"}, {"Vers", "roms/Vers [JMN, 1991].ch8"}, {"Vertical Brix", "roms/Vertical Brix [Paul Robson, 1996].ch8"}, {"Wall", "roms/Wall [David Winter].ch8"}, {"Wipe Off", "roms/Wipe Off [Joseph Weisbecker].ch8"}, {"Worm V4", "roms/Worm V4 [RB-Revival Studios, 2007].ch8"}, {"X-Mirror", "roms/X-Mirror.ch8"}, {"Zero Demo", "roms/Zero Demo [zeroZshadow, 2007].ch8"}, {"ZeroPong ", "roms/ZeroPong [zeroZshadow, 2007].ch8"}, } ui := new(UI) tt, err := opentype.Parse(PressStart2P_ttf) if err != nil { log.Fatal(err) } titleFont, err := opentype.NewFace(tt, &opentype.FaceOptions{ Size: 24, DPI: 72, Hinting: font.HintingFull, }) btnFont, err := opentype.NewFace(tt, &opentype.FaceOptions{ Size: 7, DPI: 72, Hinting: font.HintingFull, }) ui.font = &titleFont cb := func(btn *Button) { log.Printf("button %s was clicked!", btn.text) if ui.oncompleted != nil { ui.oncompleted(btn.rom) } } for n, rom := range ROMS { x := n % 8 y := n / 8 ui.btns = append(ui.btns, NewButton(rom.name, &btnFont, x, y, rom, cb)) } return ui } // Workaround to create a variable to receive both UI and Chip8 object. type Scene interface { Draw(screen *ebiten.Image) Update() } type Game struct { scene Scene } func (g *Game) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) { return 640, 320 } func (g *Game) Draw(screen *ebiten.Image) { g.scene.Draw(screen) ebitenutil.DebugPrint(screen, fmt.Sprintf("%f", ebiten.CurrentTPS())) } func (g *Game) Update() error { g.scene.Update() return nil } func main() { ebiten.SetMaxTPS(800) ebiten.SetWindowSize(640, 320) ebiten.SetWindowTitle("CHIP-8") cpu := NewCpu() mem := NewMemory() vme := NewVideoMemory() f, err := os.Open("audio.mp3") if err != nil { log.Fatal(err) } audio, err := audio.NewPlayer(audio.NewContext(32000), f) if err != nil { log.Fatal(err) } log.Printf("%+v", mem) kb := NewKeyboard() ui := NewUI() c8 := Chip8{cpu, mem, vme, audio, kb} game := Game{ui} ui.oncompleted = func(rom Rom) { game.scene = &c8 err := c8.mem.Load(rom.path) if err != nil { panic(err) } } if err := ebiten.RunGame(&game); err != nil { log.Fatal(err) } }
// Check collision.
random_line_split
fmlrc2.rs
extern crate clap; extern crate env_logger; extern crate exitcode; extern crate log; extern crate needletail; use clap::{Arg, App, value_t, values_t}; use log::{info, error}; use needletail::parse_fastx_file; use std::fs::File; use std::sync::{Arc, mpsc}; use threadpool::ThreadPool; use fmlrc::bv_bwt::BitVectorBWT; use fmlrc::ordered_fasta_writer::OrderedFastaWriter; use fmlrc::read_correction::{CorrectionParameters, CorrectionResults, LongReadFA, correction_job}; const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); fn
() { //initialize logging for our benefit later env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init(); //non-cli parameters const JOB_SLOTS: u64 = 10000; const UPDATE_INTERVAL: u64 = 10000; //this is the CLI block, params that get populated appear before let bwt_fn: String; let long_read_fn: String; let corrected_read_fn: String; let mut kmer_sizes: Vec<usize> = vec![21, 59]; let mut threads: usize = 1; let mut begin_id: u64 = 0; let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF; let mut min_count: u64 = 5; let mut min_frac: f64 = 0.1; let mut branch_factor: f64 = 4.0; let mut cache_size: usize = 8; let verbose_mode: bool; let matches = App::new("FMLRC2") .version(VERSION.unwrap_or("?")) .author("J. Matthew Holt <[email protected]>") .about("FM-index Long Read Corrector - Rust implementation") .arg(Arg::with_name("verbose_mode") .short("v") .long("verbose") .help("enable verbose output")) .arg(Arg::with_name("kmer_sizes") .short("k") .long("K") .multiple(true) .takes_value(true) .help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")")) .arg(Arg::with_name("threads") .short("t") .long("threads") .takes_value(true) .help("number of correction threads (default: 1)")) .arg(Arg::with_name("begin_id") .short("b") .long("begin_index") .takes_value(true) .help("index of read to start with (default: 0)")) .arg(Arg::with_name("end_id") .short("e") .long("end_index") .takes_value(true) .help("index of read to end with (default: end of file)")) .arg(Arg::with_name("min_count") .short("m") .long("min_count") .takes_value(true) .help("absolute minimum k-mer count to consisder a path (default: 5)")) .arg(Arg::with_name("min_frac") .short("f") .long("min_dynamic_count") .takes_value(true) .help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)")) .arg(Arg::with_name("branch_factor") .short("B") .long("branch_factor") .takes_value(true) .help("branching factor for correction, scaled by k (default: 4.0)")) .arg(Arg::with_name("cache_size") .short("C") .long("cache_size") .takes_value(true) .help("the length of k-mer to precompute in cache (default: 8)")) .arg(Arg::with_name("COMP_MSBWT.NPY") .help("The compressed BWT file with high accuracy reads") .required(true) .index(1)) .arg(Arg::with_name("LONG_READS.FA") .help("The FASTX file with uncorrected reads") .required(true) .index(2)) .arg(Arg::with_name("CORRECTED_READS.FA") .help("The FASTA file to write corrected reads to") .required(true) .index(3)) .get_matches(); //pull out required values bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string(); long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string(); corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string(); //now check options verbose_mode = matches.is_present("verbose_mode"); kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes); threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads); begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id); end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id); min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count); min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac); branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor); cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size); info!("Input parameters (required):"); info!("\tBWT: \"{}\"", bwt_fn); match File::open(&bwt_fn) { Ok(_) => {}, Err(e) => { error!("Failed to open BWT file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; info!("\tInput reads: \"{}\"", long_read_fn); match File::open(&long_read_fn) { Ok(_) => {}, Err(e) => { error!("Failed to open input reads file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; info!("\tOutput corrected reads: \"{}\"", corrected_read_fn); let write_file: File = match File::create(&corrected_read_fn) { Ok(file) => file, Err(e) => { error!("Failed to create output corrected reads file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; let mut fasta_writer = OrderedFastaWriter::new(&write_file); info!("Execution Parameters:"); info!("\tverbose: {}", verbose_mode); info!("\tthreads: {}", threads); info!("\tcache size: {}", cache_size); info!("Correction Parameters:"); info!("\treads to correct: [{}, {})", begin_id, end_id); if begin_id > end_id { error!("--begin_index set to value larger than --end_index"); std::process::exit(exitcode::DATAERR); } kmer_sizes.sort_unstable(); info!("\tk-mer sizes: {:?}", kmer_sizes); info!("\tabs. mininimum count: {}", min_count); info!("\tdyn. minimimum fraction: {}", min_frac); if !(0.0..=1.0).contains(&min_frac) { error!("--min_dynamic_count must be within the range [0, 1]"); std::process::exit(exitcode::DATAERR); } info!("\tbranching factor: {}", branch_factor); if branch_factor <= 0.0 { error!("--branch_factor must be greater than 0.0"); std::process::exit(exitcode::DATAERR); } //TODO make some of these hard-coded into params? let my_params: CorrectionParameters = CorrectionParameters { kmer_sizes, min_count, max_branch_attempt_length: 10000, branch_limit_factor: branch_factor, branch_buffer_factor: 1.3, //TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug* midpoint_ed_factor: 0.4, tail_buffer_factor: 1.05, frac: min_frac, verbose: verbose_mode }; let arc_params: Arc<CorrectionParameters> = Arc::new(my_params); //first load the BWT into memory let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size); match bwt.load_numpy_file(&bwt_fn) { Ok(_) => {}, Err(e) => { error!("Failed to load BWT file: {:?}", e); std::process::exit(exitcode::IOERR); } }; let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt); //we need to set up the multiprocessing components now let pool = ThreadPool::new(threads); let (tx, rx) = mpsc::channel(); //now needletail open the reads to correct let mut read_index: u64 = 0; let mut jobs_queued: u64 = 0; let mut results_received: u64 = 0; info!("Starting read correction processes..."); match parse_fastx_file(&long_read_fn) { Ok(mut fastx_reader) => { while let Some(raw_record) = fastx_reader.next() { let record = match raw_record { Ok(record) => { record }, Err(e) => { error!("Invalid record while parsing long read file: {:?}", e); std::process::exit(exitcode::IOERR); } }; if read_index >= begin_id && read_index < end_id { //if we've filled our queue, then we should wait until we get some results back if jobs_queued - results_received >= JOB_SLOTS { let rx_value: CorrectionResults = rx.recv().unwrap(); if verbose_mode { info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after); } match fasta_writer.write_correction(rx_value) { Ok(()) => {}, Err(e) => { error!("Failed while writing read correction: {:?}", e); std::process::exit(exitcode::IOERR); } }; results_received += 1; if results_received % UPDATE_INTERVAL == 0 { info!("Processed {} reads...", results_received); } } //clone the transmit channel and submit the pool job let tx = tx.clone(); let arc_bwt = arc_bwt.clone(); let arc_params = arc_params.clone(); let read_data: LongReadFA = LongReadFA { read_index: jobs_queued, label: String::from_utf8(record.id().to_vec()).unwrap(), seq: String::from_utf8(record.seq().to_vec()).unwrap() }; //println!("Submitting {:?}", jobs_queued); pool.execute(move|| { let correction_results: CorrectionResults = correction_job(arc_bwt, read_data, arc_params); tx.send(correction_results).expect("channel will be there waiting for the pool"); }); jobs_queued += 1; } read_index += 1; } }, Err(e) => { error!("Failed to open long read file: {:?}", e); std::process::exit(exitcode::IOERR); } } while results_received < jobs_queued { let rx_value: CorrectionResults = rx.recv().unwrap(); if verbose_mode { info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after); } match fasta_writer.write_correction(rx_value) { Ok(()) => {}, Err(e) => { error!("Failed while writing read correction: {:?}", e); std::process::exit(exitcode::IOERR); } }; results_received += 1; if results_received % UPDATE_INTERVAL == 0 { info!("Processed {} reads...", results_received); } } info!("Finished processing {} total reads in range [{}, {})", results_received, begin_id, end_id); }
main
identifier_name
fmlrc2.rs
extern crate clap; extern crate env_logger; extern crate exitcode; extern crate log; extern crate needletail; use clap::{Arg, App, value_t, values_t}; use log::{info, error}; use needletail::parse_fastx_file; use std::fs::File; use std::sync::{Arc, mpsc}; use threadpool::ThreadPool; use fmlrc::bv_bwt::BitVectorBWT; use fmlrc::ordered_fasta_writer::OrderedFastaWriter; use fmlrc::read_correction::{CorrectionParameters, CorrectionResults, LongReadFA, correction_job}; const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); fn main() { //initialize logging for our benefit later env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init(); //non-cli parameters const JOB_SLOTS: u64 = 10000; const UPDATE_INTERVAL: u64 = 10000; //this is the CLI block, params that get populated appear before let bwt_fn: String; let long_read_fn: String; let corrected_read_fn: String; let mut kmer_sizes: Vec<usize> = vec![21, 59]; let mut threads: usize = 1; let mut begin_id: u64 = 0; let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF; let mut min_count: u64 = 5; let mut min_frac: f64 = 0.1; let mut branch_factor: f64 = 4.0; let mut cache_size: usize = 8; let verbose_mode: bool; let matches = App::new("FMLRC2") .version(VERSION.unwrap_or("?")) .author("J. Matthew Holt <[email protected]>") .about("FM-index Long Read Corrector - Rust implementation") .arg(Arg::with_name("verbose_mode") .short("v") .long("verbose") .help("enable verbose output")) .arg(Arg::with_name("kmer_sizes") .short("k") .long("K") .multiple(true) .takes_value(true) .help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")")) .arg(Arg::with_name("threads") .short("t") .long("threads") .takes_value(true) .help("number of correction threads (default: 1)")) .arg(Arg::with_name("begin_id") .short("b") .long("begin_index") .takes_value(true) .help("index of read to start with (default: 0)")) .arg(Arg::with_name("end_id") .short("e") .long("end_index") .takes_value(true) .help("index of read to end with (default: end of file)")) .arg(Arg::with_name("min_count") .short("m") .long("min_count") .takes_value(true) .help("absolute minimum k-mer count to consisder a path (default: 5)")) .arg(Arg::with_name("min_frac") .short("f") .long("min_dynamic_count") .takes_value(true) .help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)")) .arg(Arg::with_name("branch_factor") .short("B") .long("branch_factor") .takes_value(true) .help("branching factor for correction, scaled by k (default: 4.0)")) .arg(Arg::with_name("cache_size") .short("C") .long("cache_size") .takes_value(true) .help("the length of k-mer to precompute in cache (default: 8)")) .arg(Arg::with_name("COMP_MSBWT.NPY") .help("The compressed BWT file with high accuracy reads") .required(true) .index(1)) .arg(Arg::with_name("LONG_READS.FA") .help("The FASTX file with uncorrected reads") .required(true)
.help("The FASTA file to write corrected reads to") .required(true) .index(3)) .get_matches(); //pull out required values bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string(); long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string(); corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string(); //now check options verbose_mode = matches.is_present("verbose_mode"); kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes); threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads); begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id); end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id); min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count); min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac); branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor); cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size); info!("Input parameters (required):"); info!("\tBWT: \"{}\"", bwt_fn); match File::open(&bwt_fn) { Ok(_) => {}, Err(e) => { error!("Failed to open BWT file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; info!("\tInput reads: \"{}\"", long_read_fn); match File::open(&long_read_fn) { Ok(_) => {}, Err(e) => { error!("Failed to open input reads file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; info!("\tOutput corrected reads: \"{}\"", corrected_read_fn); let write_file: File = match File::create(&corrected_read_fn) { Ok(file) => file, Err(e) => { error!("Failed to create output corrected reads file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; let mut fasta_writer = OrderedFastaWriter::new(&write_file); info!("Execution Parameters:"); info!("\tverbose: {}", verbose_mode); info!("\tthreads: {}", threads); info!("\tcache size: {}", cache_size); info!("Correction Parameters:"); info!("\treads to correct: [{}, {})", begin_id, end_id); if begin_id > end_id { error!("--begin_index set to value larger than --end_index"); std::process::exit(exitcode::DATAERR); } kmer_sizes.sort_unstable(); info!("\tk-mer sizes: {:?}", kmer_sizes); info!("\tabs. mininimum count: {}", min_count); info!("\tdyn. minimimum fraction: {}", min_frac); if !(0.0..=1.0).contains(&min_frac) { error!("--min_dynamic_count must be within the range [0, 1]"); std::process::exit(exitcode::DATAERR); } info!("\tbranching factor: {}", branch_factor); if branch_factor <= 0.0 { error!("--branch_factor must be greater than 0.0"); std::process::exit(exitcode::DATAERR); } //TODO make some of these hard-coded into params? let my_params: CorrectionParameters = CorrectionParameters { kmer_sizes, min_count, max_branch_attempt_length: 10000, branch_limit_factor: branch_factor, branch_buffer_factor: 1.3, //TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug* midpoint_ed_factor: 0.4, tail_buffer_factor: 1.05, frac: min_frac, verbose: verbose_mode }; let arc_params: Arc<CorrectionParameters> = Arc::new(my_params); //first load the BWT into memory let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size); match bwt.load_numpy_file(&bwt_fn) { Ok(_) => {}, Err(e) => { error!("Failed to load BWT file: {:?}", e); std::process::exit(exitcode::IOERR); } }; let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt); //we need to set up the multiprocessing components now let pool = ThreadPool::new(threads); let (tx, rx) = mpsc::channel(); //now needletail open the reads to correct let mut read_index: u64 = 0; let mut jobs_queued: u64 = 0; let mut results_received: u64 = 0; info!("Starting read correction processes..."); match parse_fastx_file(&long_read_fn) { Ok(mut fastx_reader) => { while let Some(raw_record) = fastx_reader.next() { let record = match raw_record { Ok(record) => { record }, Err(e) => { error!("Invalid record while parsing long read file: {:?}", e); std::process::exit(exitcode::IOERR); } }; if read_index >= begin_id && read_index < end_id { //if we've filled our queue, then we should wait until we get some results back if jobs_queued - results_received >= JOB_SLOTS { let rx_value: CorrectionResults = rx.recv().unwrap(); if verbose_mode { info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after); } match fasta_writer.write_correction(rx_value) { Ok(()) => {}, Err(e) => { error!("Failed while writing read correction: {:?}", e); std::process::exit(exitcode::IOERR); } }; results_received += 1; if results_received % UPDATE_INTERVAL == 0 { info!("Processed {} reads...", results_received); } } //clone the transmit channel and submit the pool job let tx = tx.clone(); let arc_bwt = arc_bwt.clone(); let arc_params = arc_params.clone(); let read_data: LongReadFA = LongReadFA { read_index: jobs_queued, label: String::from_utf8(record.id().to_vec()).unwrap(), seq: String::from_utf8(record.seq().to_vec()).unwrap() }; //println!("Submitting {:?}", jobs_queued); pool.execute(move|| { let correction_results: CorrectionResults = correction_job(arc_bwt, read_data, arc_params); tx.send(correction_results).expect("channel will be there waiting for the pool"); }); jobs_queued += 1; } read_index += 1; } }, Err(e) => { error!("Failed to open long read file: {:?}", e); std::process::exit(exitcode::IOERR); } } while results_received < jobs_queued { let rx_value: CorrectionResults = rx.recv().unwrap(); if verbose_mode { info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after); } match fasta_writer.write_correction(rx_value) { Ok(()) => {}, Err(e) => { error!("Failed while writing read correction: {:?}", e); std::process::exit(exitcode::IOERR); } }; results_received += 1; if results_received % UPDATE_INTERVAL == 0 { info!("Processed {} reads...", results_received); } } info!("Finished processing {} total reads in range [{}, {})", results_received, begin_id, end_id); }
.index(2)) .arg(Arg::with_name("CORRECTED_READS.FA")
random_line_split
fmlrc2.rs
extern crate clap; extern crate env_logger; extern crate exitcode; extern crate log; extern crate needletail; use clap::{Arg, App, value_t, values_t}; use log::{info, error}; use needletail::parse_fastx_file; use std::fs::File; use std::sync::{Arc, mpsc}; use threadpool::ThreadPool; use fmlrc::bv_bwt::BitVectorBWT; use fmlrc::ordered_fasta_writer::OrderedFastaWriter; use fmlrc::read_correction::{CorrectionParameters, CorrectionResults, LongReadFA, correction_job}; const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); fn main() { //initialize logging for our benefit later env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init(); //non-cli parameters const JOB_SLOTS: u64 = 10000; const UPDATE_INTERVAL: u64 = 10000; //this is the CLI block, params that get populated appear before let bwt_fn: String; let long_read_fn: String; let corrected_read_fn: String; let mut kmer_sizes: Vec<usize> = vec![21, 59]; let mut threads: usize = 1; let mut begin_id: u64 = 0; let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF; let mut min_count: u64 = 5; let mut min_frac: f64 = 0.1; let mut branch_factor: f64 = 4.0; let mut cache_size: usize = 8; let verbose_mode: bool; let matches = App::new("FMLRC2") .version(VERSION.unwrap_or("?")) .author("J. Matthew Holt <[email protected]>") .about("FM-index Long Read Corrector - Rust implementation") .arg(Arg::with_name("verbose_mode") .short("v") .long("verbose") .help("enable verbose output")) .arg(Arg::with_name("kmer_sizes") .short("k") .long("K") .multiple(true) .takes_value(true) .help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")")) .arg(Arg::with_name("threads") .short("t") .long("threads") .takes_value(true) .help("number of correction threads (default: 1)")) .arg(Arg::with_name("begin_id") .short("b") .long("begin_index") .takes_value(true) .help("index of read to start with (default: 0)")) .arg(Arg::with_name("end_id") .short("e") .long("end_index") .takes_value(true) .help("index of read to end with (default: end of file)")) .arg(Arg::with_name("min_count") .short("m") .long("min_count") .takes_value(true) .help("absolute minimum k-mer count to consisder a path (default: 5)")) .arg(Arg::with_name("min_frac") .short("f") .long("min_dynamic_count") .takes_value(true) .help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)")) .arg(Arg::with_name("branch_factor") .short("B") .long("branch_factor") .takes_value(true) .help("branching factor for correction, scaled by k (default: 4.0)")) .arg(Arg::with_name("cache_size") .short("C") .long("cache_size") .takes_value(true) .help("the length of k-mer to precompute in cache (default: 8)")) .arg(Arg::with_name("COMP_MSBWT.NPY") .help("The compressed BWT file with high accuracy reads") .required(true) .index(1)) .arg(Arg::with_name("LONG_READS.FA") .help("The FASTX file with uncorrected reads") .required(true) .index(2)) .arg(Arg::with_name("CORRECTED_READS.FA") .help("The FASTA file to write corrected reads to") .required(true) .index(3)) .get_matches(); //pull out required values bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string(); long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string(); corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string(); //now check options verbose_mode = matches.is_present("verbose_mode"); kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes); threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads); begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id); end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id); min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count); min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac); branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor); cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size); info!("Input parameters (required):"); info!("\tBWT: \"{}\"", bwt_fn); match File::open(&bwt_fn) { Ok(_) => {}, Err(e) => { error!("Failed to open BWT file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; info!("\tInput reads: \"{}\"", long_read_fn); match File::open(&long_read_fn) { Ok(_) => {}, Err(e) => { error!("Failed to open input reads file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; info!("\tOutput corrected reads: \"{}\"", corrected_read_fn); let write_file: File = match File::create(&corrected_read_fn) { Ok(file) => file, Err(e) => { error!("Failed to create output corrected reads file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; let mut fasta_writer = OrderedFastaWriter::new(&write_file); info!("Execution Parameters:"); info!("\tverbose: {}", verbose_mode); info!("\tthreads: {}", threads); info!("\tcache size: {}", cache_size); info!("Correction Parameters:"); info!("\treads to correct: [{}, {})", begin_id, end_id); if begin_id > end_id { error!("--begin_index set to value larger than --end_index"); std::process::exit(exitcode::DATAERR); } kmer_sizes.sort_unstable(); info!("\tk-mer sizes: {:?}", kmer_sizes); info!("\tabs. mininimum count: {}", min_count); info!("\tdyn. minimimum fraction: {}", min_frac); if !(0.0..=1.0).contains(&min_frac) { error!("--min_dynamic_count must be within the range [0, 1]"); std::process::exit(exitcode::DATAERR); } info!("\tbranching factor: {}", branch_factor); if branch_factor <= 0.0 { error!("--branch_factor must be greater than 0.0"); std::process::exit(exitcode::DATAERR); } //TODO make some of these hard-coded into params? let my_params: CorrectionParameters = CorrectionParameters { kmer_sizes, min_count, max_branch_attempt_length: 10000, branch_limit_factor: branch_factor, branch_buffer_factor: 1.3, //TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug* midpoint_ed_factor: 0.4, tail_buffer_factor: 1.05, frac: min_frac, verbose: verbose_mode }; let arc_params: Arc<CorrectionParameters> = Arc::new(my_params); //first load the BWT into memory let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size); match bwt.load_numpy_file(&bwt_fn) { Ok(_) => {}, Err(e) => { error!("Failed to load BWT file: {:?}", e); std::process::exit(exitcode::IOERR); } }; let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt); //we need to set up the multiprocessing components now let pool = ThreadPool::new(threads); let (tx, rx) = mpsc::channel(); //now needletail open the reads to correct let mut read_index: u64 = 0; let mut jobs_queued: u64 = 0; let mut results_received: u64 = 0; info!("Starting read correction processes..."); match parse_fastx_file(&long_read_fn) { Ok(mut fastx_reader) => { while let Some(raw_record) = fastx_reader.next() { let record = match raw_record { Ok(record) => { record }, Err(e) => { error!("Invalid record while parsing long read file: {:?}", e); std::process::exit(exitcode::IOERR); } }; if read_index >= begin_id && read_index < end_id { //if we've filled our queue, then we should wait until we get some results back if jobs_queued - results_received >= JOB_SLOTS
//clone the transmit channel and submit the pool job let tx = tx.clone(); let arc_bwt = arc_bwt.clone(); let arc_params = arc_params.clone(); let read_data: LongReadFA = LongReadFA { read_index: jobs_queued, label: String::from_utf8(record.id().to_vec()).unwrap(), seq: String::from_utf8(record.seq().to_vec()).unwrap() }; //println!("Submitting {:?}", jobs_queued); pool.execute(move|| { let correction_results: CorrectionResults = correction_job(arc_bwt, read_data, arc_params); tx.send(correction_results).expect("channel will be there waiting for the pool"); }); jobs_queued += 1; } read_index += 1; } }, Err(e) => { error!("Failed to open long read file: {:?}", e); std::process::exit(exitcode::IOERR); } } while results_received < jobs_queued { let rx_value: CorrectionResults = rx.recv().unwrap(); if verbose_mode { info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after); } match fasta_writer.write_correction(rx_value) { Ok(()) => {}, Err(e) => { error!("Failed while writing read correction: {:?}", e); std::process::exit(exitcode::IOERR); } }; results_received += 1; if results_received % UPDATE_INTERVAL == 0 { info!("Processed {} reads...", results_received); } } info!("Finished processing {} total reads in range [{}, {})", results_received, begin_id, end_id); }
{ let rx_value: CorrectionResults = rx.recv().unwrap(); if verbose_mode { info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after); } match fasta_writer.write_correction(rx_value) { Ok(()) => {}, Err(e) => { error!("Failed while writing read correction: {:?}", e); std::process::exit(exitcode::IOERR); } }; results_received += 1; if results_received % UPDATE_INTERVAL == 0 { info!("Processed {} reads...", results_received); } }
conditional_block
fmlrc2.rs
extern crate clap; extern crate env_logger; extern crate exitcode; extern crate log; extern crate needletail; use clap::{Arg, App, value_t, values_t}; use log::{info, error}; use needletail::parse_fastx_file; use std::fs::File; use std::sync::{Arc, mpsc}; use threadpool::ThreadPool; use fmlrc::bv_bwt::BitVectorBWT; use fmlrc::ordered_fasta_writer::OrderedFastaWriter; use fmlrc::read_correction::{CorrectionParameters, CorrectionResults, LongReadFA, correction_job}; const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); fn main()
{ //initialize logging for our benefit later env_logger::from_env(env_logger::Env::default().default_filter_or("info")).init(); //non-cli parameters const JOB_SLOTS: u64 = 10000; const UPDATE_INTERVAL: u64 = 10000; //this is the CLI block, params that get populated appear before let bwt_fn: String; let long_read_fn: String; let corrected_read_fn: String; let mut kmer_sizes: Vec<usize> = vec![21, 59]; let mut threads: usize = 1; let mut begin_id: u64 = 0; let mut end_id: u64 = 0xFFFFFFFFFFFFFFFF; let mut min_count: u64 = 5; let mut min_frac: f64 = 0.1; let mut branch_factor: f64 = 4.0; let mut cache_size: usize = 8; let verbose_mode: bool; let matches = App::new("FMLRC2") .version(VERSION.unwrap_or("?")) .author("J. Matthew Holt <[email protected]>") .about("FM-index Long Read Corrector - Rust implementation") .arg(Arg::with_name("verbose_mode") .short("v") .long("verbose") .help("enable verbose output")) .arg(Arg::with_name("kmer_sizes") .short("k") .long("K") .multiple(true) .takes_value(true) .help("k-mer sizes for correction, can be specified multiple times (default: \"-k 21 59\")")) .arg(Arg::with_name("threads") .short("t") .long("threads") .takes_value(true) .help("number of correction threads (default: 1)")) .arg(Arg::with_name("begin_id") .short("b") .long("begin_index") .takes_value(true) .help("index of read to start with (default: 0)")) .arg(Arg::with_name("end_id") .short("e") .long("end_index") .takes_value(true) .help("index of read to end with (default: end of file)")) .arg(Arg::with_name("min_count") .short("m") .long("min_count") .takes_value(true) .help("absolute minimum k-mer count to consisder a path (default: 5)")) .arg(Arg::with_name("min_frac") .short("f") .long("min_dynamic_count") .takes_value(true) .help("dynamic minimum k-mer count fraction of median to consider a path (default: 0.1)")) .arg(Arg::with_name("branch_factor") .short("B") .long("branch_factor") .takes_value(true) .help("branching factor for correction, scaled by k (default: 4.0)")) .arg(Arg::with_name("cache_size") .short("C") .long("cache_size") .takes_value(true) .help("the length of k-mer to precompute in cache (default: 8)")) .arg(Arg::with_name("COMP_MSBWT.NPY") .help("The compressed BWT file with high accuracy reads") .required(true) .index(1)) .arg(Arg::with_name("LONG_READS.FA") .help("The FASTX file with uncorrected reads") .required(true) .index(2)) .arg(Arg::with_name("CORRECTED_READS.FA") .help("The FASTA file to write corrected reads to") .required(true) .index(3)) .get_matches(); //pull out required values bwt_fn = matches.value_of("COMP_MSBWT.NPY").unwrap().to_string(); long_read_fn = matches.value_of("LONG_READS.FA").unwrap().to_string(); corrected_read_fn = matches.value_of("CORRECTED_READS.FA").unwrap().to_string(); //now check options verbose_mode = matches.is_present("verbose_mode"); kmer_sizes = values_t!(matches.values_of("kmer_sizes"), usize).unwrap_or(kmer_sizes); threads = value_t!(matches.value_of("threads"), usize).unwrap_or(threads); begin_id = value_t!(matches.value_of("begin_id"), u64).unwrap_or(begin_id); end_id = value_t!(matches.value_of("end_id"), u64).unwrap_or(end_id); min_count = value_t!(matches.value_of("min_count"), u64).unwrap_or(min_count); min_frac = value_t!(matches.value_of("min_frac"), f64).unwrap_or(min_frac); branch_factor = value_t!(matches.value_of("branch_factor"), f64).unwrap_or(branch_factor); cache_size = value_t!(matches.value_of("cache_size"), usize).unwrap_or(cache_size); info!("Input parameters (required):"); info!("\tBWT: \"{}\"", bwt_fn); match File::open(&bwt_fn) { Ok(_) => {}, Err(e) => { error!("Failed to open BWT file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; info!("\tInput reads: \"{}\"", long_read_fn); match File::open(&long_read_fn) { Ok(_) => {}, Err(e) => { error!("Failed to open input reads file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; info!("\tOutput corrected reads: \"{}\"", corrected_read_fn); let write_file: File = match File::create(&corrected_read_fn) { Ok(file) => file, Err(e) => { error!("Failed to create output corrected reads file: {:?}", e); std::process::exit(exitcode::NOINPUT); } }; let mut fasta_writer = OrderedFastaWriter::new(&write_file); info!("Execution Parameters:"); info!("\tverbose: {}", verbose_mode); info!("\tthreads: {}", threads); info!("\tcache size: {}", cache_size); info!("Correction Parameters:"); info!("\treads to correct: [{}, {})", begin_id, end_id); if begin_id > end_id { error!("--begin_index set to value larger than --end_index"); std::process::exit(exitcode::DATAERR); } kmer_sizes.sort_unstable(); info!("\tk-mer sizes: {:?}", kmer_sizes); info!("\tabs. mininimum count: {}", min_count); info!("\tdyn. minimimum fraction: {}", min_frac); if !(0.0..=1.0).contains(&min_frac) { error!("--min_dynamic_count must be within the range [0, 1]"); std::process::exit(exitcode::DATAERR); } info!("\tbranching factor: {}", branch_factor); if branch_factor <= 0.0 { error!("--branch_factor must be greater than 0.0"); std::process::exit(exitcode::DATAERR); } //TODO make some of these hard-coded into params? let my_params: CorrectionParameters = CorrectionParameters { kmer_sizes, min_count, max_branch_attempt_length: 10000, branch_limit_factor: branch_factor, branch_buffer_factor: 1.3, //TODO: make this 0.4 a CLI param? we did a prelim test with 0.3 and prec/recall went up but total indels/subs went up *shrug* midpoint_ed_factor: 0.4, tail_buffer_factor: 1.05, frac: min_frac, verbose: verbose_mode }; let arc_params: Arc<CorrectionParameters> = Arc::new(my_params); //first load the BWT into memory let mut bwt: BitVectorBWT = BitVectorBWT::with_cache_size(cache_size); match bwt.load_numpy_file(&bwt_fn) { Ok(_) => {}, Err(e) => { error!("Failed to load BWT file: {:?}", e); std::process::exit(exitcode::IOERR); } }; let arc_bwt: Arc<BitVectorBWT> = Arc::new(bwt); //we need to set up the multiprocessing components now let pool = ThreadPool::new(threads); let (tx, rx) = mpsc::channel(); //now needletail open the reads to correct let mut read_index: u64 = 0; let mut jobs_queued: u64 = 0; let mut results_received: u64 = 0; info!("Starting read correction processes..."); match parse_fastx_file(&long_read_fn) { Ok(mut fastx_reader) => { while let Some(raw_record) = fastx_reader.next() { let record = match raw_record { Ok(record) => { record }, Err(e) => { error!("Invalid record while parsing long read file: {:?}", e); std::process::exit(exitcode::IOERR); } }; if read_index >= begin_id && read_index < end_id { //if we've filled our queue, then we should wait until we get some results back if jobs_queued - results_received >= JOB_SLOTS { let rx_value: CorrectionResults = rx.recv().unwrap(); if verbose_mode { info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after); } match fasta_writer.write_correction(rx_value) { Ok(()) => {}, Err(e) => { error!("Failed while writing read correction: {:?}", e); std::process::exit(exitcode::IOERR); } }; results_received += 1; if results_received % UPDATE_INTERVAL == 0 { info!("Processed {} reads...", results_received); } } //clone the transmit channel and submit the pool job let tx = tx.clone(); let arc_bwt = arc_bwt.clone(); let arc_params = arc_params.clone(); let read_data: LongReadFA = LongReadFA { read_index: jobs_queued, label: String::from_utf8(record.id().to_vec()).unwrap(), seq: String::from_utf8(record.seq().to_vec()).unwrap() }; //println!("Submitting {:?}", jobs_queued); pool.execute(move|| { let correction_results: CorrectionResults = correction_job(arc_bwt, read_data, arc_params); tx.send(correction_results).expect("channel will be there waiting for the pool"); }); jobs_queued += 1; } read_index += 1; } }, Err(e) => { error!("Failed to open long read file: {:?}", e); std::process::exit(exitcode::IOERR); } } while results_received < jobs_queued { let rx_value: CorrectionResults = rx.recv().unwrap(); if verbose_mode { info!("Job #{:?}: {:.2} -> {:.2}", rx_value.read_index, rx_value.avg_before, rx_value.avg_after); } match fasta_writer.write_correction(rx_value) { Ok(()) => {}, Err(e) => { error!("Failed while writing read correction: {:?}", e); std::process::exit(exitcode::IOERR); } }; results_received += 1; if results_received % UPDATE_INTERVAL == 0 { info!("Processed {} reads...", results_received); } } info!("Finished processing {} total reads in range [{}, {})", results_received, begin_id, end_id); }
identifier_body
flask_main.py
import flask from flask import render_template from flask import request from flask import url_for import uuid import json import logging # Date handling import arrow # Replacement for datetime, based on moment.js # import datetime # But we still need time from dateutil import tz # For interpreting local times # OAuth2 - Google library implementation for convenience from oauth2client import client import httplib2 # used in oauth2 flow # Google API for services from apiclient import discovery ### # Globals ### import config if __name__ == "__main__": CONFIG = config.configuration() else: CONFIG = config.configuration(proxied=True) app = flask.Flask(__name__) app.debug=CONFIG.DEBUG app.logger.setLevel(logging.DEBUG) app.secret_key=CONFIG.SECRET_KEY SCOPES = 'https://www.googleapis.com/auth/calendar.readonly' CLIENT_SECRET_FILE = CONFIG.GOOGLE_KEY_FILE ## You'll need this APPLICATION_NAME = 'MeetMe class project' ############################# # # Pages (routed from URLs) # ############################# @app.route("/") @app.route("/index") def index(): app.logger.debug("Entering index") if 'begin_date' not in flask.session: init_session_values() return render_template('index.html') @app.route("/choose") def choose(): ## We'll need authorization to list calendars ## I wanted to put what follows into a function, but had ## to pull it back here because the redirect has to be a ## 'return' app.logger.debug("Checking credentials for Google calendar access") credentials = valid_credentials() if not credentials: app.logger.debug("Redirecting to authorization") return flask.redirect(flask.url_for('oauth2callback')) gcal_service = get_gcal_service(credentials) app.logger.debug("Returned from get_gcal_service") flask.g.calendars = list_calendars(gcal_service) return render_template('index.html') #### # # Google calendar authorization: # Returns us to the main /choose screen after inserting # the calendar_service object in the session state. May # redirect to OAuth server first, and may take multiple # trips through the oauth2 callback function. # # Protocol for use ON EACH REQUEST: # First, check for valid credentials # If we don't have valid credentials # Get credentials (jump to the oauth2 protocol) # (redirects back to /choose, this time with credentials) # If we do have valid credentials # Get the service object # # The final result of successful authorization is a 'service' # object. We use a 'service' object to actually retrieve data # from the Google services. Service objects are NOT serializable --- # we can't stash one in a cookie. Instead, on each request we # get a fresh serivce object from our credentials, which are # serializable. # # Note that after authorization we always redirect to /choose; # If this is unsatisfactory, we'll need a session variable to use # as a 'continuation' or 'return address' to use instead. # #### def valid_credentials(): """ Returns OAuth2 credentials if we have valid credentials in the session. This is a 'truthy' value. Return None if we don't have credentials, or if they have expired or are otherwise invalid. This is a 'falsy' value. """ if 'credentials' not in flask.session: return None credentials = client.OAuth2Credentials.from_json( flask.session['credentials']) if (credentials.invalid or credentials.access_token_expired):
return credentials def get_gcal_service(credentials): """ We need a Google calendar 'service' object to obtain list of calendars, busy times, etc. This requires authorization. If authorization is already in effect, we'll just return with the authorization. Otherwise, control flow will be interrupted by authorization, and we'll end up redirected back to /choose *without a service object*. Then the second call will succeed without additional authorization. """ app.logger.debug("Entering get_gcal_service") http_auth = credentials.authorize(httplib2.Http()) service = discovery.build('calendar', 'v3', http=http_auth) app.logger.debug("Returning service") return service @app.route('/oauth2callback') def oauth2callback(): """ The 'flow' has this one place to call back to. We'll enter here more than once as steps in the flow are completed, and need to keep track of how far we've gotten. The first time we'll do the first step, the second time we'll skip the first step and do the second, and so on. """ app.logger.debug("Entering oauth2callback") flow = client.flow_from_clientsecrets( CLIENT_SECRET_FILE, scope= SCOPES, redirect_uri=flask.url_for('oauth2callback', _external=True)) ## Note we are *not* redirecting above. We are noting *where* ## we will redirect to, which is this function. ## The *second* time we enter here, it's a callback ## with 'code' set in the URL parameter. If we don't ## see that, it must be the first time through, so we ## need to do step 1. app.logger.debug("Got flow") if 'code' not in flask.request.args: app.logger.debug("Code not in flask.request.args") auth_uri = flow.step1_get_authorize_url() return flask.redirect(auth_uri) ## This will redirect back here, but the second time through ## we'll have the 'code' parameter set else: ## It's the second time through ... we can tell because ## we got the 'code' argument in the URL. app.logger.debug("Code was in flask.request.args") auth_code = flask.request.args.get('code') credentials = flow.step2_exchange(auth_code) flask.session['credentials'] = credentials.to_json() ## Now I can build the service and execute the query, ## but for the moment I'll just log it and go back to ## the main screen app.logger.debug("Got credentials") return flask.redirect(flask.url_for('choose')) ##### # # Option setting: Buttons or forms that add some # information into session state. Don't do the # computation here; use of the information might # depend on what other information we have. # Setting an option sends us back to the main display # page, where we may put the new information to use. # ##### @app.route('/setrange', methods=['POST']) def setrange(): """ User chose a date range with the bootstrap daterange widget. """ app.logger.debug("Entering setrange") flask.flash("Setrange gave us '{}'".format( request.form.get('daterange'))) daterange = request.form.get('daterange') flask.session['daterange'] = daterange daterange_parts = daterange.split() flask.session['begin_date'] = interpret_date(daterange_parts[0]) flask.session['end_date'] = interpret_date(daterange_parts[2]) app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format( daterange_parts[0], daterange_parts[1], flask.session['begin_date'], flask.session['end_date'])) return flask.redirect(flask.url_for("choose")) #### # # Initialize session variables # #### def init_session_values(): """ Start with some reasonable defaults for date and time ranges. Note this must be run in app context ... can't call from main. """ # Default date span = tomorrow to 1 week from now now = arrow.now('local') # We really should be using tz from browser tomorrow = now.replace(days=+1) nextweek = now.replace(days=+7) flask.session["begin_date"] = tomorrow.floor('day').isoformat() flask.session["end_date"] = nextweek.ceil('day').isoformat() flask.session["daterange"] = "{} - {}".format( tomorrow.format("MM/DD/YYYY"), nextweek.format("MM/DD/YYYY")) # Default time span each day, 8 to 5 flask.session["begin_time"] = interpret_time("9am") flask.session["end_time"] = interpret_time("5pm") def interpret_time( text ): """ Read time in a human-compatible format and interpret as ISO format with local timezone. May throw exception if time can't be interpreted. In that case it will also flash a message explaining accepted formats. """ app.logger.debug("Decoding time '{}'".format(text)) time_formats = ["ha", "h:mma", "h:mm a", "H:mm"] try: as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal()) as_arrow = as_arrow.replace(year=2016) #HACK see below app.logger.debug("Succeeded interpreting time") except: app.logger.debug("Failed to interpret time") flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm" .format(text)) raise return as_arrow.isoformat() #HACK #Workaround # isoformat() on raspberry Pi does not work for some dates # far from now. It will fail with an overflow from time stamp out # of range while checking for daylight savings time. Workaround is # to force the date-time combination into the year 2016, which seems to # get the timestamp into a reasonable range. This workaround should be # removed when Arrow or Dateutil.tz is fixed. # FIXME: Remove the workaround when arrow is fixed (but only after testing # on raspberry Pi --- failure is likely due to 32-bit integers on that platform) def interpret_date( text ): """ Convert text of date to ISO format used internally, with the local time zone. """ try: as_arrow = arrow.get(text, "MM/DD/YYYY").replace( tzinfo=tz.tzlocal()) except: flask.flash("Date '{}' didn't fit expected format 12/31/2001") raise return as_arrow.isoformat() def next_day(isotext): """ ISO date + 1 day (used in query to Google calendar) """ as_arrow = arrow.get(isotext) return as_arrow.replace(days=+1).isoformat() #### # # Functions (NOT pages) that return some information # #### def list_calendars(service): """ Given a google 'service' object, return a list of calendars. Each calendar is represented by a dict. The returned list is sorted to have the primary calendar first, and selected (that is, displayed in Google Calendars web app) calendars before unselected calendars. """ app.logger.debug("Entering list_calendars") calendar_list = service.calendarList().list().execute()["items"] result = [ ] for cal in calendar_list: kind = cal["kind"] id = cal["id"] if "description" in cal: desc = cal["description"] else: desc = "(no description)" summary = cal["summary"] # Optional binary attributes with False as default selected = ("selected" in cal) and cal["selected"] primary = ("primary" in cal) and cal["primary"] result.append( { "kind": kind, "id": id, "summary": summary, "selected": selected, "primary": primary }) return sorted(result, key=cal_sort_key) def cal_sort_key( cal ): """ Sort key for the list of calendars: primary calendar first, then other selected calendars, then unselected calendars. (" " sorts before "X", and tuples are compared piecewise) """ if cal["selected"]: selected_key = " " else: selected_key = "X" if cal["primary"]: primary_key = " " else: primary_key = "X" return (primary_key, selected_key, cal["summary"]) ################# # # Functions used within the templates # ################# @app.template_filter( 'fmtdate' ) def format_arrow_date( date ): try: normal = arrow.get( date ) return normal.format("ddd MM/DD/YYYY") except: return "(bad date)" @app.template_filter( 'fmttime' ) def format_arrow_time( time ): try: normal = arrow.get( time ) return normal.format("HH:mm") except: return "(bad time)" ############# if __name__ == "__main__": # App is created above so that it will # exist whether this is 'main' or not # (e.g., if we are running under green unicorn) app.run(port=CONFIG.PORT,host="0.0.0.0")
return None
conditional_block
flask_main.py
import flask from flask import render_template from flask import request from flask import url_for import uuid import json import logging # Date handling import arrow # Replacement for datetime, based on moment.js # import datetime # But we still need time from dateutil import tz # For interpreting local times # OAuth2 - Google library implementation for convenience from oauth2client import client import httplib2 # used in oauth2 flow # Google API for services from apiclient import discovery ### # Globals ### import config if __name__ == "__main__": CONFIG = config.configuration() else: CONFIG = config.configuration(proxied=True) app = flask.Flask(__name__) app.debug=CONFIG.DEBUG app.logger.setLevel(logging.DEBUG) app.secret_key=CONFIG.SECRET_KEY SCOPES = 'https://www.googleapis.com/auth/calendar.readonly' CLIENT_SECRET_FILE = CONFIG.GOOGLE_KEY_FILE ## You'll need this APPLICATION_NAME = 'MeetMe class project' ############################# # # Pages (routed from URLs) # ############################# @app.route("/") @app.route("/index") def index(): app.logger.debug("Entering index") if 'begin_date' not in flask.session: init_session_values() return render_template('index.html') @app.route("/choose") def choose(): ## We'll need authorization to list calendars ## I wanted to put what follows into a function, but had ## to pull it back here because the redirect has to be a ## 'return' app.logger.debug("Checking credentials for Google calendar access") credentials = valid_credentials() if not credentials: app.logger.debug("Redirecting to authorization") return flask.redirect(flask.url_for('oauth2callback')) gcal_service = get_gcal_service(credentials) app.logger.debug("Returned from get_gcal_service") flask.g.calendars = list_calendars(gcal_service) return render_template('index.html') #### # # Google calendar authorization: # Returns us to the main /choose screen after inserting # the calendar_service object in the session state. May # redirect to OAuth server first, and may take multiple # trips through the oauth2 callback function. # # Protocol for use ON EACH REQUEST: # First, check for valid credentials # If we don't have valid credentials # Get credentials (jump to the oauth2 protocol) # (redirects back to /choose, this time with credentials) # If we do have valid credentials # Get the service object # # The final result of successful authorization is a 'service' # object. We use a 'service' object to actually retrieve data # from the Google services. Service objects are NOT serializable --- # we can't stash one in a cookie. Instead, on each request we # get a fresh serivce object from our credentials, which are # serializable. # # Note that after authorization we always redirect to /choose; # If this is unsatisfactory, we'll need a session variable to use # as a 'continuation' or 'return address' to use instead. # #### def valid_credentials(): """ Returns OAuth2 credentials if we have valid credentials in the session. This is a 'truthy' value. Return None if we don't have credentials, or if they have expired or are otherwise invalid. This is a 'falsy' value. """ if 'credentials' not in flask.session: return None credentials = client.OAuth2Credentials.from_json( flask.session['credentials']) if (credentials.invalid or credentials.access_token_expired): return None return credentials def get_gcal_service(credentials): """ We need a Google calendar 'service' object to obtain list of calendars, busy times, etc. This requires authorization. If authorization is already in effect, we'll just return with the authorization. Otherwise, control flow will be interrupted by authorization, and we'll end up redirected back to /choose *without a service object*. Then the second call will succeed without additional authorization. """ app.logger.debug("Entering get_gcal_service") http_auth = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http_auth) app.logger.debug("Returning service") return service @app.route('/oauth2callback') def oauth2callback(): """ The 'flow' has this one place to call back to. We'll enter here more than once as steps in the flow are completed, and need to keep track of how far we've gotten. The first time we'll do the first step, the second time we'll skip the first step and do the second, and so on. """ app.logger.debug("Entering oauth2callback") flow = client.flow_from_clientsecrets( CLIENT_SECRET_FILE, scope= SCOPES, redirect_uri=flask.url_for('oauth2callback', _external=True)) ## Note we are *not* redirecting above. We are noting *where* ## we will redirect to, which is this function. ## The *second* time we enter here, it's a callback ## with 'code' set in the URL parameter. If we don't ## see that, it must be the first time through, so we ## need to do step 1. app.logger.debug("Got flow") if 'code' not in flask.request.args: app.logger.debug("Code not in flask.request.args") auth_uri = flow.step1_get_authorize_url() return flask.redirect(auth_uri) ## This will redirect back here, but the second time through ## we'll have the 'code' parameter set else: ## It's the second time through ... we can tell because ## we got the 'code' argument in the URL. app.logger.debug("Code was in flask.request.args") auth_code = flask.request.args.get('code') credentials = flow.step2_exchange(auth_code) flask.session['credentials'] = credentials.to_json() ## Now I can build the service and execute the query, ## but for the moment I'll just log it and go back to ## the main screen app.logger.debug("Got credentials") return flask.redirect(flask.url_for('choose')) ##### # # Option setting: Buttons or forms that add some # information into session state. Don't do the # computation here; use of the information might # depend on what other information we have. # Setting an option sends us back to the main display # page, where we may put the new information to use. # ##### @app.route('/setrange', methods=['POST']) def setrange(): """ User chose a date range with the bootstrap daterange widget. """ app.logger.debug("Entering setrange") flask.flash("Setrange gave us '{}'".format( request.form.get('daterange'))) daterange = request.form.get('daterange') flask.session['daterange'] = daterange daterange_parts = daterange.split() flask.session['begin_date'] = interpret_date(daterange_parts[0]) flask.session['end_date'] = interpret_date(daterange_parts[2]) app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format( daterange_parts[0], daterange_parts[1], flask.session['begin_date'], flask.session['end_date'])) return flask.redirect(flask.url_for("choose")) #### # # Initialize session variables # #### def init_session_values(): """ Start with some reasonable defaults for date and time ranges. Note this must be run in app context ... can't call from main. """ # Default date span = tomorrow to 1 week from now now = arrow.now('local') # We really should be using tz from browser tomorrow = now.replace(days=+1) nextweek = now.replace(days=+7) flask.session["begin_date"] = tomorrow.floor('day').isoformat() flask.session["end_date"] = nextweek.ceil('day').isoformat() flask.session["daterange"] = "{} - {}".format( tomorrow.format("MM/DD/YYYY"), nextweek.format("MM/DD/YYYY")) # Default time span each day, 8 to 5 flask.session["begin_time"] = interpret_time("9am") flask.session["end_time"] = interpret_time("5pm") def interpret_time( text ): """ Read time in a human-compatible format and interpret as ISO format with local timezone. May throw exception if time can't be interpreted. In that case it will also flash a message explaining accepted formats. """ app.logger.debug("Decoding time '{}'".format(text)) time_formats = ["ha", "h:mma", "h:mm a", "H:mm"] try: as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal()) as_arrow = as_arrow.replace(year=2016) #HACK see below app.logger.debug("Succeeded interpreting time") except: app.logger.debug("Failed to interpret time") flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm" .format(text)) raise return as_arrow.isoformat() #HACK #Workaround # isoformat() on raspberry Pi does not work for some dates # far from now. It will fail with an overflow from time stamp out # of range while checking for daylight savings time. Workaround is # to force the date-time combination into the year 2016, which seems to # get the timestamp into a reasonable range. This workaround should be # removed when Arrow or Dateutil.tz is fixed. # FIXME: Remove the workaround when arrow is fixed (but only after testing # on raspberry Pi --- failure is likely due to 32-bit integers on that platform) def interpret_date( text ): """ Convert text of date to ISO format used internally, with the local time zone. """ try: as_arrow = arrow.get(text, "MM/DD/YYYY").replace( tzinfo=tz.tzlocal()) except: flask.flash("Date '{}' didn't fit expected format 12/31/2001") raise return as_arrow.isoformat() def next_day(isotext): """ ISO date + 1 day (used in query to Google calendar) """ as_arrow = arrow.get(isotext) return as_arrow.replace(days=+1).isoformat() #### # # Functions (NOT pages) that return some information # #### def list_calendars(service): """ Given a google 'service' object, return a list of calendars. Each calendar is represented by a dict. The returned list is sorted to have the primary calendar first, and selected (that is, displayed in Google Calendars web app) calendars before unselected calendars. """ app.logger.debug("Entering list_calendars") calendar_list = service.calendarList().list().execute()["items"] result = [ ] for cal in calendar_list: kind = cal["kind"] id = cal["id"] if "description" in cal: desc = cal["description"] else: desc = "(no description)" summary = cal["summary"] # Optional binary attributes with False as default selected = ("selected" in cal) and cal["selected"] primary = ("primary" in cal) and cal["primary"] result.append( { "kind": kind, "id": id, "summary": summary, "selected": selected, "primary": primary }) return sorted(result, key=cal_sort_key) def cal_sort_key( cal ): """ Sort key for the list of calendars: primary calendar first, then other selected calendars, then unselected calendars. (" " sorts before "X", and tuples are compared piecewise) """ if cal["selected"]: selected_key = " " else: selected_key = "X" if cal["primary"]: primary_key = " " else: primary_key = "X" return (primary_key, selected_key, cal["summary"]) ################# # # Functions used within the templates # ################# @app.template_filter( 'fmtdate' ) def format_arrow_date( date ): try: normal = arrow.get( date ) return normal.format("ddd MM/DD/YYYY") except: return "(bad date)" @app.template_filter( 'fmttime' ) def format_arrow_time( time ): try: normal = arrow.get( time ) return normal.format("HH:mm") except: return "(bad time)" ############# if __name__ == "__main__": # App is created above so that it will # exist whether this is 'main' or not # (e.g., if we are running under green unicorn) app.run(port=CONFIG.PORT,host="0.0.0.0")
random_line_split
flask_main.py
import flask from flask import render_template from flask import request from flask import url_for import uuid import json import logging # Date handling import arrow # Replacement for datetime, based on moment.js # import datetime # But we still need time from dateutil import tz # For interpreting local times # OAuth2 - Google library implementation for convenience from oauth2client import client import httplib2 # used in oauth2 flow # Google API for services from apiclient import discovery ### # Globals ### import config if __name__ == "__main__": CONFIG = config.configuration() else: CONFIG = config.configuration(proxied=True) app = flask.Flask(__name__) app.debug=CONFIG.DEBUG app.logger.setLevel(logging.DEBUG) app.secret_key=CONFIG.SECRET_KEY SCOPES = 'https://www.googleapis.com/auth/calendar.readonly' CLIENT_SECRET_FILE = CONFIG.GOOGLE_KEY_FILE ## You'll need this APPLICATION_NAME = 'MeetMe class project' ############################# # # Pages (routed from URLs) # ############################# @app.route("/") @app.route("/index") def index(): app.logger.debug("Entering index") if 'begin_date' not in flask.session: init_session_values() return render_template('index.html') @app.route("/choose") def choose(): ## We'll need authorization to list calendars ## I wanted to put what follows into a function, but had ## to pull it back here because the redirect has to be a ## 'return' app.logger.debug("Checking credentials for Google calendar access") credentials = valid_credentials() if not credentials: app.logger.debug("Redirecting to authorization") return flask.redirect(flask.url_for('oauth2callback')) gcal_service = get_gcal_service(credentials) app.logger.debug("Returned from get_gcal_service") flask.g.calendars = list_calendars(gcal_service) return render_template('index.html') #### # # Google calendar authorization: # Returns us to the main /choose screen after inserting # the calendar_service object in the session state. May # redirect to OAuth server first, and may take multiple # trips through the oauth2 callback function. # # Protocol for use ON EACH REQUEST: # First, check for valid credentials # If we don't have valid credentials # Get credentials (jump to the oauth2 protocol) # (redirects back to /choose, this time with credentials) # If we do have valid credentials # Get the service object # # The final result of successful authorization is a 'service' # object. We use a 'service' object to actually retrieve data # from the Google services. Service objects are NOT serializable --- # we can't stash one in a cookie. Instead, on each request we # get a fresh serivce object from our credentials, which are # serializable. # # Note that after authorization we always redirect to /choose; # If this is unsatisfactory, we'll need a session variable to use # as a 'continuation' or 'return address' to use instead. # #### def valid_credentials(): """ Returns OAuth2 credentials if we have valid credentials in the session. This is a 'truthy' value. Return None if we don't have credentials, or if they have expired or are otherwise invalid. This is a 'falsy' value. """ if 'credentials' not in flask.session: return None credentials = client.OAuth2Credentials.from_json( flask.session['credentials']) if (credentials.invalid or credentials.access_token_expired): return None return credentials def get_gcal_service(credentials): """ We need a Google calendar 'service' object to obtain list of calendars, busy times, etc. This requires authorization. If authorization is already in effect, we'll just return with the authorization. Otherwise, control flow will be interrupted by authorization, and we'll end up redirected back to /choose *without a service object*. Then the second call will succeed without additional authorization. """ app.logger.debug("Entering get_gcal_service") http_auth = credentials.authorize(httplib2.Http()) service = discovery.build('calendar', 'v3', http=http_auth) app.logger.debug("Returning service") return service @app.route('/oauth2callback') def oauth2callback(): """ The 'flow' has this one place to call back to. We'll enter here more than once as steps in the flow are completed, and need to keep track of how far we've gotten. The first time we'll do the first step, the second time we'll skip the first step and do the second, and so on. """ app.logger.debug("Entering oauth2callback") flow = client.flow_from_clientsecrets( CLIENT_SECRET_FILE, scope= SCOPES, redirect_uri=flask.url_for('oauth2callback', _external=True)) ## Note we are *not* redirecting above. We are noting *where* ## we will redirect to, which is this function. ## The *second* time we enter here, it's a callback ## with 'code' set in the URL parameter. If we don't ## see that, it must be the first time through, so we ## need to do step 1. app.logger.debug("Got flow") if 'code' not in flask.request.args: app.logger.debug("Code not in flask.request.args") auth_uri = flow.step1_get_authorize_url() return flask.redirect(auth_uri) ## This will redirect back here, but the second time through ## we'll have the 'code' parameter set else: ## It's the second time through ... we can tell because ## we got the 'code' argument in the URL. app.logger.debug("Code was in flask.request.args") auth_code = flask.request.args.get('code') credentials = flow.step2_exchange(auth_code) flask.session['credentials'] = credentials.to_json() ## Now I can build the service and execute the query, ## but for the moment I'll just log it and go back to ## the main screen app.logger.debug("Got credentials") return flask.redirect(flask.url_for('choose')) ##### # # Option setting: Buttons or forms that add some # information into session state. Don't do the # computation here; use of the information might # depend on what other information we have. # Setting an option sends us back to the main display # page, where we may put the new information to use. # ##### @app.route('/setrange', methods=['POST']) def setrange(): """ User chose a date range with the bootstrap daterange widget. """ app.logger.debug("Entering setrange") flask.flash("Setrange gave us '{}'".format( request.form.get('daterange'))) daterange = request.form.get('daterange') flask.session['daterange'] = daterange daterange_parts = daterange.split() flask.session['begin_date'] = interpret_date(daterange_parts[0]) flask.session['end_date'] = interpret_date(daterange_parts[2]) app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format( daterange_parts[0], daterange_parts[1], flask.session['begin_date'], flask.session['end_date'])) return flask.redirect(flask.url_for("choose")) #### # # Initialize session variables # #### def init_session_values(): """ Start with some reasonable defaults for date and time ranges. Note this must be run in app context ... can't call from main. """ # Default date span = tomorrow to 1 week from now now = arrow.now('local') # We really should be using tz from browser tomorrow = now.replace(days=+1) nextweek = now.replace(days=+7) flask.session["begin_date"] = tomorrow.floor('day').isoformat() flask.session["end_date"] = nextweek.ceil('day').isoformat() flask.session["daterange"] = "{} - {}".format( tomorrow.format("MM/DD/YYYY"), nextweek.format("MM/DD/YYYY")) # Default time span each day, 8 to 5 flask.session["begin_time"] = interpret_time("9am") flask.session["end_time"] = interpret_time("5pm") def
( text ): """ Read time in a human-compatible format and interpret as ISO format with local timezone. May throw exception if time can't be interpreted. In that case it will also flash a message explaining accepted formats. """ app.logger.debug("Decoding time '{}'".format(text)) time_formats = ["ha", "h:mma", "h:mm a", "H:mm"] try: as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal()) as_arrow = as_arrow.replace(year=2016) #HACK see below app.logger.debug("Succeeded interpreting time") except: app.logger.debug("Failed to interpret time") flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm" .format(text)) raise return as_arrow.isoformat() #HACK #Workaround # isoformat() on raspberry Pi does not work for some dates # far from now. It will fail with an overflow from time stamp out # of range while checking for daylight savings time. Workaround is # to force the date-time combination into the year 2016, which seems to # get the timestamp into a reasonable range. This workaround should be # removed when Arrow or Dateutil.tz is fixed. # FIXME: Remove the workaround when arrow is fixed (but only after testing # on raspberry Pi --- failure is likely due to 32-bit integers on that platform) def interpret_date( text ): """ Convert text of date to ISO format used internally, with the local time zone. """ try: as_arrow = arrow.get(text, "MM/DD/YYYY").replace( tzinfo=tz.tzlocal()) except: flask.flash("Date '{}' didn't fit expected format 12/31/2001") raise return as_arrow.isoformat() def next_day(isotext): """ ISO date + 1 day (used in query to Google calendar) """ as_arrow = arrow.get(isotext) return as_arrow.replace(days=+1).isoformat() #### # # Functions (NOT pages) that return some information # #### def list_calendars(service): """ Given a google 'service' object, return a list of calendars. Each calendar is represented by a dict. The returned list is sorted to have the primary calendar first, and selected (that is, displayed in Google Calendars web app) calendars before unselected calendars. """ app.logger.debug("Entering list_calendars") calendar_list = service.calendarList().list().execute()["items"] result = [ ] for cal in calendar_list: kind = cal["kind"] id = cal["id"] if "description" in cal: desc = cal["description"] else: desc = "(no description)" summary = cal["summary"] # Optional binary attributes with False as default selected = ("selected" in cal) and cal["selected"] primary = ("primary" in cal) and cal["primary"] result.append( { "kind": kind, "id": id, "summary": summary, "selected": selected, "primary": primary }) return sorted(result, key=cal_sort_key) def cal_sort_key( cal ): """ Sort key for the list of calendars: primary calendar first, then other selected calendars, then unselected calendars. (" " sorts before "X", and tuples are compared piecewise) """ if cal["selected"]: selected_key = " " else: selected_key = "X" if cal["primary"]: primary_key = " " else: primary_key = "X" return (primary_key, selected_key, cal["summary"]) ################# # # Functions used within the templates # ################# @app.template_filter( 'fmtdate' ) def format_arrow_date( date ): try: normal = arrow.get( date ) return normal.format("ddd MM/DD/YYYY") except: return "(bad date)" @app.template_filter( 'fmttime' ) def format_arrow_time( time ): try: normal = arrow.get( time ) return normal.format("HH:mm") except: return "(bad time)" ############# if __name__ == "__main__": # App is created above so that it will # exist whether this is 'main' or not # (e.g., if we are running under green unicorn) app.run(port=CONFIG.PORT,host="0.0.0.0")
interpret_time
identifier_name
flask_main.py
import flask from flask import render_template from flask import request from flask import url_for import uuid import json import logging # Date handling import arrow # Replacement for datetime, based on moment.js # import datetime # But we still need time from dateutil import tz # For interpreting local times # OAuth2 - Google library implementation for convenience from oauth2client import client import httplib2 # used in oauth2 flow # Google API for services from apiclient import discovery ### # Globals ### import config if __name__ == "__main__": CONFIG = config.configuration() else: CONFIG = config.configuration(proxied=True) app = flask.Flask(__name__) app.debug=CONFIG.DEBUG app.logger.setLevel(logging.DEBUG) app.secret_key=CONFIG.SECRET_KEY SCOPES = 'https://www.googleapis.com/auth/calendar.readonly' CLIENT_SECRET_FILE = CONFIG.GOOGLE_KEY_FILE ## You'll need this APPLICATION_NAME = 'MeetMe class project' ############################# # # Pages (routed from URLs) # ############################# @app.route("/") @app.route("/index") def index(): app.logger.debug("Entering index") if 'begin_date' not in flask.session: init_session_values() return render_template('index.html') @app.route("/choose") def choose(): ## We'll need authorization to list calendars ## I wanted to put what follows into a function, but had ## to pull it back here because the redirect has to be a ## 'return' app.logger.debug("Checking credentials for Google calendar access") credentials = valid_credentials() if not credentials: app.logger.debug("Redirecting to authorization") return flask.redirect(flask.url_for('oauth2callback')) gcal_service = get_gcal_service(credentials) app.logger.debug("Returned from get_gcal_service") flask.g.calendars = list_calendars(gcal_service) return render_template('index.html') #### # # Google calendar authorization: # Returns us to the main /choose screen after inserting # the calendar_service object in the session state. May # redirect to OAuth server first, and may take multiple # trips through the oauth2 callback function. # # Protocol for use ON EACH REQUEST: # First, check for valid credentials # If we don't have valid credentials # Get credentials (jump to the oauth2 protocol) # (redirects back to /choose, this time with credentials) # If we do have valid credentials # Get the service object # # The final result of successful authorization is a 'service' # object. We use a 'service' object to actually retrieve data # from the Google services. Service objects are NOT serializable --- # we can't stash one in a cookie. Instead, on each request we # get a fresh serivce object from our credentials, which are # serializable. # # Note that after authorization we always redirect to /choose; # If this is unsatisfactory, we'll need a session variable to use # as a 'continuation' or 'return address' to use instead. # #### def valid_credentials(): """ Returns OAuth2 credentials if we have valid credentials in the session. This is a 'truthy' value. Return None if we don't have credentials, or if they have expired or are otherwise invalid. This is a 'falsy' value. """ if 'credentials' not in flask.session: return None credentials = client.OAuth2Credentials.from_json( flask.session['credentials']) if (credentials.invalid or credentials.access_token_expired): return None return credentials def get_gcal_service(credentials): """ We need a Google calendar 'service' object to obtain list of calendars, busy times, etc. This requires authorization. If authorization is already in effect, we'll just return with the authorization. Otherwise, control flow will be interrupted by authorization, and we'll end up redirected back to /choose *without a service object*. Then the second call will succeed without additional authorization. """ app.logger.debug("Entering get_gcal_service") http_auth = credentials.authorize(httplib2.Http()) service = discovery.build('calendar', 'v3', http=http_auth) app.logger.debug("Returning service") return service @app.route('/oauth2callback') def oauth2callback(): """ The 'flow' has this one place to call back to. We'll enter here more than once as steps in the flow are completed, and need to keep track of how far we've gotten. The first time we'll do the first step, the second time we'll skip the first step and do the second, and so on. """ app.logger.debug("Entering oauth2callback") flow = client.flow_from_clientsecrets( CLIENT_SECRET_FILE, scope= SCOPES, redirect_uri=flask.url_for('oauth2callback', _external=True)) ## Note we are *not* redirecting above. We are noting *where* ## we will redirect to, which is this function. ## The *second* time we enter here, it's a callback ## with 'code' set in the URL parameter. If we don't ## see that, it must be the first time through, so we ## need to do step 1. app.logger.debug("Got flow") if 'code' not in flask.request.args: app.logger.debug("Code not in flask.request.args") auth_uri = flow.step1_get_authorize_url() return flask.redirect(auth_uri) ## This will redirect back here, but the second time through ## we'll have the 'code' parameter set else: ## It's the second time through ... we can tell because ## we got the 'code' argument in the URL. app.logger.debug("Code was in flask.request.args") auth_code = flask.request.args.get('code') credentials = flow.step2_exchange(auth_code) flask.session['credentials'] = credentials.to_json() ## Now I can build the service and execute the query, ## but for the moment I'll just log it and go back to ## the main screen app.logger.debug("Got credentials") return flask.redirect(flask.url_for('choose')) ##### # # Option setting: Buttons or forms that add some # information into session state. Don't do the # computation here; use of the information might # depend on what other information we have. # Setting an option sends us back to the main display # page, where we may put the new information to use. # ##### @app.route('/setrange', methods=['POST']) def setrange(): """ User chose a date range with the bootstrap daterange widget. """ app.logger.debug("Entering setrange") flask.flash("Setrange gave us '{}'".format( request.form.get('daterange'))) daterange = request.form.get('daterange') flask.session['daterange'] = daterange daterange_parts = daterange.split() flask.session['begin_date'] = interpret_date(daterange_parts[0]) flask.session['end_date'] = interpret_date(daterange_parts[2]) app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format( daterange_parts[0], daterange_parts[1], flask.session['begin_date'], flask.session['end_date'])) return flask.redirect(flask.url_for("choose")) #### # # Initialize session variables # #### def init_session_values(): """ Start with some reasonable defaults for date and time ranges. Note this must be run in app context ... can't call from main. """ # Default date span = tomorrow to 1 week from now now = arrow.now('local') # We really should be using tz from browser tomorrow = now.replace(days=+1) nextweek = now.replace(days=+7) flask.session["begin_date"] = tomorrow.floor('day').isoformat() flask.session["end_date"] = nextweek.ceil('day').isoformat() flask.session["daterange"] = "{} - {}".format( tomorrow.format("MM/DD/YYYY"), nextweek.format("MM/DD/YYYY")) # Default time span each day, 8 to 5 flask.session["begin_time"] = interpret_time("9am") flask.session["end_time"] = interpret_time("5pm") def interpret_time( text ): """ Read time in a human-compatible format and interpret as ISO format with local timezone. May throw exception if time can't be interpreted. In that case it will also flash a message explaining accepted formats. """ app.logger.debug("Decoding time '{}'".format(text)) time_formats = ["ha", "h:mma", "h:mm a", "H:mm"] try: as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal()) as_arrow = as_arrow.replace(year=2016) #HACK see below app.logger.debug("Succeeded interpreting time") except: app.logger.debug("Failed to interpret time") flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm" .format(text)) raise return as_arrow.isoformat() #HACK #Workaround # isoformat() on raspberry Pi does not work for some dates # far from now. It will fail with an overflow from time stamp out # of range while checking for daylight savings time. Workaround is # to force the date-time combination into the year 2016, which seems to # get the timestamp into a reasonable range. This workaround should be # removed when Arrow or Dateutil.tz is fixed. # FIXME: Remove the workaround when arrow is fixed (but only after testing # on raspberry Pi --- failure is likely due to 32-bit integers on that platform) def interpret_date( text ):
def next_day(isotext): """ ISO date + 1 day (used in query to Google calendar) """ as_arrow = arrow.get(isotext) return as_arrow.replace(days=+1).isoformat() #### # # Functions (NOT pages) that return some information # #### def list_calendars(service): """ Given a google 'service' object, return a list of calendars. Each calendar is represented by a dict. The returned list is sorted to have the primary calendar first, and selected (that is, displayed in Google Calendars web app) calendars before unselected calendars. """ app.logger.debug("Entering list_calendars") calendar_list = service.calendarList().list().execute()["items"] result = [ ] for cal in calendar_list: kind = cal["kind"] id = cal["id"] if "description" in cal: desc = cal["description"] else: desc = "(no description)" summary = cal["summary"] # Optional binary attributes with False as default selected = ("selected" in cal) and cal["selected"] primary = ("primary" in cal) and cal["primary"] result.append( { "kind": kind, "id": id, "summary": summary, "selected": selected, "primary": primary }) return sorted(result, key=cal_sort_key) def cal_sort_key( cal ): """ Sort key for the list of calendars: primary calendar first, then other selected calendars, then unselected calendars. (" " sorts before "X", and tuples are compared piecewise) """ if cal["selected"]: selected_key = " " else: selected_key = "X" if cal["primary"]: primary_key = " " else: primary_key = "X" return (primary_key, selected_key, cal["summary"]) ################# # # Functions used within the templates # ################# @app.template_filter( 'fmtdate' ) def format_arrow_date( date ): try: normal = arrow.get( date ) return normal.format("ddd MM/DD/YYYY") except: return "(bad date)" @app.template_filter( 'fmttime' ) def format_arrow_time( time ): try: normal = arrow.get( time ) return normal.format("HH:mm") except: return "(bad time)" ############# if __name__ == "__main__": # App is created above so that it will # exist whether this is 'main' or not # (e.g., if we are running under green unicorn) app.run(port=CONFIG.PORT,host="0.0.0.0")
""" Convert text of date to ISO format used internally, with the local time zone. """ try: as_arrow = arrow.get(text, "MM/DD/YYYY").replace( tzinfo=tz.tzlocal()) except: flask.flash("Date '{}' didn't fit expected format 12/31/2001") raise return as_arrow.isoformat()
identifier_body
root.go
package cmd import ( "cronitor/lib" "errors" "fmt" "io/ioutil" "math/rand" "net/http" "net/url" "os" "os/exec" "regexp" "runtime" "strconv" "strings" "sync" "time" "github.com/fatih/color" "github.com/getsentry/raven-go" "github.com/spf13/cobra" "github.com/spf13/viper" ) var Version string = "25.2" var cfgFile string var userAgent string // Flags that are either global or used in multiple commands var apiKey string var debugLog string var dev bool var hostname string var pingApiKey string var verbose bool var noStdoutPassthru bool // RootCmd represents the base command when called without any subcommands var RootCmd = &cobra.Command{ Use: "cronitor", Short: shortDescription(Version), Long: shortDescription(Version) + ` Command line tools for Cronitor.io. See https://cronitor.io/docs/using-cronitor-cli for details.`, } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { if err := RootCmd.Execute(); err != nil { fatal(err.Error(), 1) } } var varApiKey = "CRONITOR_API_KEY" var varHostname = "CRONITOR_HOSTNAME" var varLog = "CRONITOR_LOG" var varPingApiKey = "CRONITOR_PING_API_KEY" var varExcludeText = "CRONITOR_EXCLUDE_TEXT" var varConfig = "CRONITOR_CONFIG" func init() { userAgent = fmt.Sprintf("CronitorCLI/%s", Version) cobra.OnInitialize(initConfig) // Here you will define your flags and configuration settings. // Cobra supports persistent flags, which, if defined here, // will be global for your application. RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", cfgFile, "Config file") RootCmd.PersistentFlags().StringVarP(&apiKey, "api-key", "k", apiKey, "Cronitor API Key") RootCmd.PersistentFlags().StringVarP(&pingApiKey, "ping-api-key", "p", pingApiKey, "Ping API Key") RootCmd.PersistentFlags().StringVarP(&hostname, "hostname", "n", hostname, "A unique identifier for this host (default: system hostname)") RootCmd.PersistentFlags().StringVarP(&debugLog, "log", "l", debugLog, "Write debug logs to supplied file") RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", verbose, "Verbose output") RootCmd.PersistentFlags().BoolVar(&dev, "use-dev", dev, "Dev mode") RootCmd.PersistentFlags().MarkHidden("use-dev") viper.BindPFlag(varApiKey, RootCmd.PersistentFlags().Lookup("api-key")) viper.BindPFlag(varHostname, RootCmd.PersistentFlags().Lookup("hostname")) viper.BindPFlag(varLog, RootCmd.PersistentFlags().Lookup("log")) viper.BindPFlag(varPingApiKey, RootCmd.PersistentFlags().Lookup("ping-api-key")) viper.BindPFlag(varConfig, RootCmd.PersistentFlags().Lookup("config")) } // initConfig reads in config file and ENV variables if set. func initConfig() { viper.AutomaticEnv() // read in environment variables that match // If a custom config file is specified by flag or env var, use it. Otherwise use default file. if len(viper.GetString(varConfig)) > 0 { viper.SetConfigFile(viper.GetString(varConfig)) } else { viper.AddConfigPath(defaultConfigFileDirectory()) viper.SetConfigName("cronitor") } // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { log("Reading config from " + viper.ConfigFileUsed()) } } func sendPing(endpoint string, uniqueIdentifier string, message string, series string, timestamp float64, duration *float64, exitCode *int, group *sync.WaitGroup) { defer group.Done() Client := &http.Client{ Timeout: time.Second * 10, } hostname := effectiveHostname() pingApiAuthKey := viper.GetString(varPingApiKey) pingApiHost := "" formattedStamp := "" formattedDuration := "" formattedStatusCode := "" if timestamp > 0 { formattedStamp = fmt.Sprintf("&stamp=%s", formatStamp(timestamp)) } if len(message) > 0 { message = fmt.Sprintf("&msg=%s", url.QueryEscape(truncateString(message, 1000))) } if len(pingApiAuthKey) > 0 { pingApiAuthKey = fmt.Sprintf("&auth_key=%s", truncateString(pingApiAuthKey, 50)) } if len(hostname) > 0 { hostname = fmt.Sprintf("&host=%s", url.QueryEscape(truncateString(hostname, 50))) } // By passing duration up, we save the computation on the server side if duration != nil { formattedDuration = fmt.Sprintf("&duration=%s", formatStamp(*duration)) } // We aren't using exit code at time of writing, but we have the field available for healthcheck monitors. if exitCode != nil { formattedStatusCode = fmt.Sprintf("&status_code=%d", *exitCode) } // The `series` data is used to match run events with complete or fail. Useful if multiple instances of a job are running. if len(series) > 0 { series = fmt.Sprintf("&series=%s", series) } pingSent := false uri := "" for i := 1; i <= 6; i++ { if dev { pingApiHost = "http://dev.cronitor.io" } else if i > 2 && pingApiHost == "https://cronitor.link" { pingApiHost = "https://cronitor.io" } else { pingApiHost = "https://cronitor.link" } // After 2 failed attempts, take a brief random break before trying again if i > 2 { time.Sleep(time.Second * time.Duration(float32(i)*1.5*rand.Float32())) } uri = fmt.Sprintf("%s/%s/%s?try=%d%s%s%s%s%s%s%s", pingApiHost, uniqueIdentifier, endpoint, i, formattedStamp, message, pingApiAuthKey, hostname, formattedDuration, series, formattedStatusCode) log("Sending ping " + uri) request, _ := http.NewRequest("GET", uri, nil) request.Header.Add("User-Agent", userAgent) response, err := Client.Do(request) if err != nil { log(err.Error()) continue } _, err = ioutil.ReadAll(response.Body) response.Body.Close() // Any 2xx is considered a successful response if response.StatusCode >= 200 && response.StatusCode < 300 { pingSent = true break } // Backoff on any 4xx request, e.g. 429 Too Many Requests if response.StatusCode >= 400 && response.StatusCode < 500 { pingSent = true break } } if !pingSent { raven.CaptureErrorAndWait(errors.New("Ping failure; retries exhausted: "+uri), nil) } } func effectiveHostname() string { if len(viper.GetString(varHostname)) > 0 { return viper.GetString(varHostname) } hostname, _ := os.Hostname() return hostname } func effectiveTimezoneLocationName() lib.TimezoneLocationName { // First, check if a TZ or CRON_TZ environemnt variable is set -- Diff var used by diff distros if locale, isSetFlag := os.LookupEnv("TZ"); isSetFlag { return lib.TimezoneLocationName{locale} } if locale, isSetFlag := os.LookupEnv("CRON_TZ"); isSetFlag { return lib.TimezoneLocationName{locale} } // Attempt to parse timedatectl (should work on FreeBSD, many linux distros) if output, err := exec.Command("timedatectl").Output(); err == nil { outputString := strings.Replace(string(output), "Time zone", "Timezone", -1) r := regexp.MustCompile(`(?m:Timezone:\s+(\S+).+$)`) if ret := r.FindStringSubmatch(outputString); ret != nil && len(ret) > 1 { return lib.TimezoneLocationName{ret[1]} } } // If /etc/localtime is a symlink, check what it is linking to if localtimeFile, err := os.Lstat("/etc/localtime"); err == nil && localtimeFile.Mode()&os.ModeSymlink == os.ModeSymlink { if symlink, _ := os.Readlink("/etc/localtime"); len(symlink) > 0 { if strings.Contains(symlink, "UTC") { return lib.TimezoneLocationName{"UTC"} } symlinkParts := strings.Split(symlink, "/") return lib.TimezoneLocationName{strings.Join(symlinkParts[len(symlinkParts)-2:], "/")} } } // If we happen to have an /etc/timezone, no guarantee it's used, but read that if locale, err := ioutil.ReadFile("/etc/timezone"); err == nil { return lib.TimezoneLocationName{string(locale)} } return lib.TimezoneLocationName{""} } func
() string { if runtime.GOOS == "windows" { return fmt.Sprintf("%s\\ProgramData\\Cronitor", os.Getenv("SYSTEMDRIVE")) } return "/etc/cronitor" } func truncateString(s string, length int) string { if len(s) <= length { return s } return s[:length] } func printSuccessText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { color := color.New(color.FgHiGreen) if indent { color.Println(fmt.Sprintf(" |--► %s", message)) } else { color.Println(fmt.Sprintf("----► %s", message)) } } } func printDoneText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { printSuccessText(message+" ✔", indent) } } func printWarningText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { color := color.New(color.FgHiYellow) if indent { color.Println(fmt.Sprintf(" |--► %s", message)) } else { color.Println(fmt.Sprintf("----► %s", message)) } } } func printErrorText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { red := color.New(color.FgHiRed) if indent { red.Println(fmt.Sprintf(" |--► %s", message)) } else { red.Println(fmt.Sprintf("----► %s", message)) } } } func printLn() { if isAutoDiscover || isSilent { return } fmt.Println() } func isPathToDirectory(path string) bool { fileInfo, err := os.Stat(path) if err != nil { return false } return fileInfo.Mode().IsDir() } func log(msg string) { debugLog := viper.GetString(varLog) if len(debugLog) > 0 { f, _ := os.OpenFile(debugLog, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) defer f.Close() f.WriteString(msg + "\n") } if verbose { fmt.Println(msg) } } func fatal(msg string, exitCode int) { debugLog := viper.GetString(varLog) if len(debugLog) > 0 { f, _ := os.OpenFile(debugLog, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) defer f.Close() f.WriteString(msg + "\n") } fmt.Fprintln(os.Stderr, msg) os.Exit(exitCode) } func makeStamp() float64 { return float64(time.Now().UnixNano()) / float64(time.Second) } func formatStamp(timestamp float64) string { return strconv.FormatFloat(timestamp, 'f', 3, 64) } func shortDescription(version string) string { return fmt.Sprintf("CronitorCLI version %s", version) } func getCronitorApi() *lib.CronitorApi { return &lib.CronitorApi{ IsDev: dev, IsAutoDiscover: isAutoDiscover, ApiKey: varApiKey, UserAgent: userAgent, Logger: log, } }
defaultConfigFileDirectory
identifier_name
root.go
package cmd import ( "cronitor/lib" "errors" "fmt" "io/ioutil" "math/rand" "net/http" "net/url" "os" "os/exec" "regexp" "runtime" "strconv" "strings" "sync" "time" "github.com/fatih/color" "github.com/getsentry/raven-go" "github.com/spf13/cobra" "github.com/spf13/viper" ) var Version string = "25.2" var cfgFile string var userAgent string // Flags that are either global or used in multiple commands var apiKey string var debugLog string var dev bool var hostname string var pingApiKey string var verbose bool var noStdoutPassthru bool // RootCmd represents the base command when called without any subcommands var RootCmd = &cobra.Command{ Use: "cronitor", Short: shortDescription(Version), Long: shortDescription(Version) + ` Command line tools for Cronitor.io. See https://cronitor.io/docs/using-cronitor-cli for details.`, } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { if err := RootCmd.Execute(); err != nil { fatal(err.Error(), 1) } } var varApiKey = "CRONITOR_API_KEY" var varHostname = "CRONITOR_HOSTNAME" var varLog = "CRONITOR_LOG" var varPingApiKey = "CRONITOR_PING_API_KEY" var varExcludeText = "CRONITOR_EXCLUDE_TEXT" var varConfig = "CRONITOR_CONFIG" func init() { userAgent = fmt.Sprintf("CronitorCLI/%s", Version) cobra.OnInitialize(initConfig) // Here you will define your flags and configuration settings. // Cobra supports persistent flags, which, if defined here, // will be global for your application. RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", cfgFile, "Config file") RootCmd.PersistentFlags().StringVarP(&apiKey, "api-key", "k", apiKey, "Cronitor API Key") RootCmd.PersistentFlags().StringVarP(&pingApiKey, "ping-api-key", "p", pingApiKey, "Ping API Key") RootCmd.PersistentFlags().StringVarP(&hostname, "hostname", "n", hostname, "A unique identifier for this host (default: system hostname)") RootCmd.PersistentFlags().StringVarP(&debugLog, "log", "l", debugLog, "Write debug logs to supplied file") RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", verbose, "Verbose output") RootCmd.PersistentFlags().BoolVar(&dev, "use-dev", dev, "Dev mode") RootCmd.PersistentFlags().MarkHidden("use-dev") viper.BindPFlag(varApiKey, RootCmd.PersistentFlags().Lookup("api-key")) viper.BindPFlag(varHostname, RootCmd.PersistentFlags().Lookup("hostname")) viper.BindPFlag(varLog, RootCmd.PersistentFlags().Lookup("log")) viper.BindPFlag(varPingApiKey, RootCmd.PersistentFlags().Lookup("ping-api-key")) viper.BindPFlag(varConfig, RootCmd.PersistentFlags().Lookup("config")) } // initConfig reads in config file and ENV variables if set. func initConfig() { viper.AutomaticEnv() // read in environment variables that match // If a custom config file is specified by flag or env var, use it. Otherwise use default file. if len(viper.GetString(varConfig)) > 0 { viper.SetConfigFile(viper.GetString(varConfig)) } else { viper.AddConfigPath(defaultConfigFileDirectory()) viper.SetConfigName("cronitor") } // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { log("Reading config from " + viper.ConfigFileUsed()) } } func sendPing(endpoint string, uniqueIdentifier string, message string, series string, timestamp float64, duration *float64, exitCode *int, group *sync.WaitGroup) { defer group.Done() Client := &http.Client{ Timeout: time.Second * 10, } hostname := effectiveHostname() pingApiAuthKey := viper.GetString(varPingApiKey) pingApiHost := "" formattedStamp := "" formattedDuration := "" formattedStatusCode := "" if timestamp > 0 { formattedStamp = fmt.Sprintf("&stamp=%s", formatStamp(timestamp)) } if len(message) > 0 { message = fmt.Sprintf("&msg=%s", url.QueryEscape(truncateString(message, 1000))) } if len(pingApiAuthKey) > 0 { pingApiAuthKey = fmt.Sprintf("&auth_key=%s", truncateString(pingApiAuthKey, 50)) } if len(hostname) > 0 { hostname = fmt.Sprintf("&host=%s", url.QueryEscape(truncateString(hostname, 50))) } // By passing duration up, we save the computation on the server side if duration != nil { formattedDuration = fmt.Sprintf("&duration=%s", formatStamp(*duration)) } // We aren't using exit code at time of writing, but we have the field available for healthcheck monitors. if exitCode != nil { formattedStatusCode = fmt.Sprintf("&status_code=%d", *exitCode) } // The `series` data is used to match run events with complete or fail. Useful if multiple instances of a job are running. if len(series) > 0 { series = fmt.Sprintf("&series=%s", series) } pingSent := false uri := "" for i := 1; i <= 6; i++ { if dev { pingApiHost = "http://dev.cronitor.io" } else if i > 2 && pingApiHost == "https://cronitor.link" { pingApiHost = "https://cronitor.io" } else { pingApiHost = "https://cronitor.link" } // After 2 failed attempts, take a brief random break before trying again if i > 2 { time.Sleep(time.Second * time.Duration(float32(i)*1.5*rand.Float32())) } uri = fmt.Sprintf("%s/%s/%s?try=%d%s%s%s%s%s%s%s", pingApiHost, uniqueIdentifier, endpoint, i, formattedStamp, message, pingApiAuthKey, hostname, formattedDuration, series, formattedStatusCode) log("Sending ping " + uri) request, _ := http.NewRequest("GET", uri, nil) request.Header.Add("User-Agent", userAgent) response, err := Client.Do(request) if err != nil { log(err.Error()) continue } _, err = ioutil.ReadAll(response.Body) response.Body.Close() // Any 2xx is considered a successful response if response.StatusCode >= 200 && response.StatusCode < 300 { pingSent = true break } // Backoff on any 4xx request, e.g. 429 Too Many Requests if response.StatusCode >= 400 && response.StatusCode < 500 { pingSent = true break } } if !pingSent { raven.CaptureErrorAndWait(errors.New("Ping failure; retries exhausted: "+uri), nil) } } func effectiveHostname() string { if len(viper.GetString(varHostname)) > 0 { return viper.GetString(varHostname) } hostname, _ := os.Hostname() return hostname } func effectiveTimezoneLocationName() lib.TimezoneLocationName { // First, check if a TZ or CRON_TZ environemnt variable is set -- Diff var used by diff distros if locale, isSetFlag := os.LookupEnv("TZ"); isSetFlag { return lib.TimezoneLocationName{locale} } if locale, isSetFlag := os.LookupEnv("CRON_TZ"); isSetFlag { return lib.TimezoneLocationName{locale} } // Attempt to parse timedatectl (should work on FreeBSD, many linux distros) if output, err := exec.Command("timedatectl").Output(); err == nil { outputString := strings.Replace(string(output), "Time zone", "Timezone", -1) r := regexp.MustCompile(`(?m:Timezone:\s+(\S+).+$)`) if ret := r.FindStringSubmatch(outputString); ret != nil && len(ret) > 1 { return lib.TimezoneLocationName{ret[1]} } } // If /etc/localtime is a symlink, check what it is linking to if localtimeFile, err := os.Lstat("/etc/localtime"); err == nil && localtimeFile.Mode()&os.ModeSymlink == os.ModeSymlink { if symlink, _ := os.Readlink("/etc/localtime"); len(symlink) > 0 { if strings.Contains(symlink, "UTC") { return lib.TimezoneLocationName{"UTC"} } symlinkParts := strings.Split(symlink, "/") return lib.TimezoneLocationName{strings.Join(symlinkParts[len(symlinkParts)-2:], "/")} } } // If we happen to have an /etc/timezone, no guarantee it's used, but read that if locale, err := ioutil.ReadFile("/etc/timezone"); err == nil { return lib.TimezoneLocationName{string(locale)} } return lib.TimezoneLocationName{""} } func defaultConfigFileDirectory() string { if runtime.GOOS == "windows" { return fmt.Sprintf("%s\\ProgramData\\Cronitor", os.Getenv("SYSTEMDRIVE")) } return "/etc/cronitor" } func truncateString(s string, length int) string { if len(s) <= length { return s } return s[:length] }
color := color.New(color.FgHiGreen) if indent { color.Println(fmt.Sprintf(" |--► %s", message)) } else { color.Println(fmt.Sprintf("----► %s", message)) } } } func printDoneText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { printSuccessText(message+" ✔", indent) } } func printWarningText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { color := color.New(color.FgHiYellow) if indent { color.Println(fmt.Sprintf(" |--► %s", message)) } else { color.Println(fmt.Sprintf("----► %s", message)) } } } func printErrorText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { red := color.New(color.FgHiRed) if indent { red.Println(fmt.Sprintf(" |--► %s", message)) } else { red.Println(fmt.Sprintf("----► %s", message)) } } } func printLn() { if isAutoDiscover || isSilent { return } fmt.Println() } func isPathToDirectory(path string) bool { fileInfo, err := os.Stat(path) if err != nil { return false } return fileInfo.Mode().IsDir() } func log(msg string) { debugLog := viper.GetString(varLog) if len(debugLog) > 0 { f, _ := os.OpenFile(debugLog, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) defer f.Close() f.WriteString(msg + "\n") } if verbose { fmt.Println(msg) } } func fatal(msg string, exitCode int) { debugLog := viper.GetString(varLog) if len(debugLog) > 0 { f, _ := os.OpenFile(debugLog, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) defer f.Close() f.WriteString(msg + "\n") } fmt.Fprintln(os.Stderr, msg) os.Exit(exitCode) } func makeStamp() float64 { return float64(time.Now().UnixNano()) / float64(time.Second) } func formatStamp(timestamp float64) string { return strconv.FormatFloat(timestamp, 'f', 3, 64) } func shortDescription(version string) string { return fmt.Sprintf("CronitorCLI version %s", version) } func getCronitorApi() *lib.CronitorApi { return &lib.CronitorApi{ IsDev: dev, IsAutoDiscover: isAutoDiscover, ApiKey: varApiKey, UserAgent: userAgent, Logger: log, } }
func printSuccessText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else {
random_line_split
root.go
package cmd import ( "cronitor/lib" "errors" "fmt" "io/ioutil" "math/rand" "net/http" "net/url" "os" "os/exec" "regexp" "runtime" "strconv" "strings" "sync" "time" "github.com/fatih/color" "github.com/getsentry/raven-go" "github.com/spf13/cobra" "github.com/spf13/viper" ) var Version string = "25.2" var cfgFile string var userAgent string // Flags that are either global or used in multiple commands var apiKey string var debugLog string var dev bool var hostname string var pingApiKey string var verbose bool var noStdoutPassthru bool // RootCmd represents the base command when called without any subcommands var RootCmd = &cobra.Command{ Use: "cronitor", Short: shortDescription(Version), Long: shortDescription(Version) + ` Command line tools for Cronitor.io. See https://cronitor.io/docs/using-cronitor-cli for details.`, } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { if err := RootCmd.Execute(); err != nil { fatal(err.Error(), 1) } } var varApiKey = "CRONITOR_API_KEY" var varHostname = "CRONITOR_HOSTNAME" var varLog = "CRONITOR_LOG" var varPingApiKey = "CRONITOR_PING_API_KEY" var varExcludeText = "CRONITOR_EXCLUDE_TEXT" var varConfig = "CRONITOR_CONFIG" func init() { userAgent = fmt.Sprintf("CronitorCLI/%s", Version) cobra.OnInitialize(initConfig) // Here you will define your flags and configuration settings. // Cobra supports persistent flags, which, if defined here, // will be global for your application. RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", cfgFile, "Config file") RootCmd.PersistentFlags().StringVarP(&apiKey, "api-key", "k", apiKey, "Cronitor API Key") RootCmd.PersistentFlags().StringVarP(&pingApiKey, "ping-api-key", "p", pingApiKey, "Ping API Key") RootCmd.PersistentFlags().StringVarP(&hostname, "hostname", "n", hostname, "A unique identifier for this host (default: system hostname)") RootCmd.PersistentFlags().StringVarP(&debugLog, "log", "l", debugLog, "Write debug logs to supplied file") RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", verbose, "Verbose output") RootCmd.PersistentFlags().BoolVar(&dev, "use-dev", dev, "Dev mode") RootCmd.PersistentFlags().MarkHidden("use-dev") viper.BindPFlag(varApiKey, RootCmd.PersistentFlags().Lookup("api-key")) viper.BindPFlag(varHostname, RootCmd.PersistentFlags().Lookup("hostname")) viper.BindPFlag(varLog, RootCmd.PersistentFlags().Lookup("log")) viper.BindPFlag(varPingApiKey, RootCmd.PersistentFlags().Lookup("ping-api-key")) viper.BindPFlag(varConfig, RootCmd.PersistentFlags().Lookup("config")) } // initConfig reads in config file and ENV variables if set. func initConfig() { viper.AutomaticEnv() // read in environment variables that match // If a custom config file is specified by flag or env var, use it. Otherwise use default file. if len(viper.GetString(varConfig)) > 0 { viper.SetConfigFile(viper.GetString(varConfig)) } else { viper.AddConfigPath(defaultConfigFileDirectory()) viper.SetConfigName("cronitor") } // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { log("Reading config from " + viper.ConfigFileUsed()) } } func sendPing(endpoint string, uniqueIdentifier string, message string, series string, timestamp float64, duration *float64, exitCode *int, group *sync.WaitGroup) { defer group.Done() Client := &http.Client{ Timeout: time.Second * 10, } hostname := effectiveHostname() pingApiAuthKey := viper.GetString(varPingApiKey) pingApiHost := "" formattedStamp := "" formattedDuration := "" formattedStatusCode := "" if timestamp > 0 { formattedStamp = fmt.Sprintf("&stamp=%s", formatStamp(timestamp)) } if len(message) > 0 { message = fmt.Sprintf("&msg=%s", url.QueryEscape(truncateString(message, 1000))) } if len(pingApiAuthKey) > 0 { pingApiAuthKey = fmt.Sprintf("&auth_key=%s", truncateString(pingApiAuthKey, 50)) } if len(hostname) > 0 { hostname = fmt.Sprintf("&host=%s", url.QueryEscape(truncateString(hostname, 50))) } // By passing duration up, we save the computation on the server side if duration != nil { formattedDuration = fmt.Sprintf("&duration=%s", formatStamp(*duration)) } // We aren't using exit code at time of writing, but we have the field available for healthcheck monitors. if exitCode != nil { formattedStatusCode = fmt.Sprintf("&status_code=%d", *exitCode) } // The `series` data is used to match run events with complete or fail. Useful if multiple instances of a job are running. if len(series) > 0 { series = fmt.Sprintf("&series=%s", series) } pingSent := false uri := "" for i := 1; i <= 6; i++ { if dev { pingApiHost = "http://dev.cronitor.io" } else if i > 2 && pingApiHost == "https://cronitor.link" { pingApiHost = "https://cronitor.io" } else { pingApiHost = "https://cronitor.link" } // After 2 failed attempts, take a brief random break before trying again if i > 2 { time.Sleep(time.Second * time.Duration(float32(i)*1.5*rand.Float32())) } uri = fmt.Sprintf("%s/%s/%s?try=%d%s%s%s%s%s%s%s", pingApiHost, uniqueIdentifier, endpoint, i, formattedStamp, message, pingApiAuthKey, hostname, formattedDuration, series, formattedStatusCode) log("Sending ping " + uri) request, _ := http.NewRequest("GET", uri, nil) request.Header.Add("User-Agent", userAgent) response, err := Client.Do(request) if err != nil { log(err.Error()) continue } _, err = ioutil.ReadAll(response.Body) response.Body.Close() // Any 2xx is considered a successful response if response.StatusCode >= 200 && response.StatusCode < 300 { pingSent = true break } // Backoff on any 4xx request, e.g. 429 Too Many Requests if response.StatusCode >= 400 && response.StatusCode < 500 { pingSent = true break } } if !pingSent { raven.CaptureErrorAndWait(errors.New("Ping failure; retries exhausted: "+uri), nil) } } func effectiveHostname() string { if len(viper.GetString(varHostname)) > 0 { return viper.GetString(varHostname) } hostname, _ := os.Hostname() return hostname } func effectiveTimezoneLocationName() lib.TimezoneLocationName { // First, check if a TZ or CRON_TZ environemnt variable is set -- Diff var used by diff distros if locale, isSetFlag := os.LookupEnv("TZ"); isSetFlag { return lib.TimezoneLocationName{locale} } if locale, isSetFlag := os.LookupEnv("CRON_TZ"); isSetFlag { return lib.TimezoneLocationName{locale} } // Attempt to parse timedatectl (should work on FreeBSD, many linux distros) if output, err := exec.Command("timedatectl").Output(); err == nil { outputString := strings.Replace(string(output), "Time zone", "Timezone", -1) r := regexp.MustCompile(`(?m:Timezone:\s+(\S+).+$)`) if ret := r.FindStringSubmatch(outputString); ret != nil && len(ret) > 1 { return lib.TimezoneLocationName{ret[1]} } } // If /etc/localtime is a symlink, check what it is linking to if localtimeFile, err := os.Lstat("/etc/localtime"); err == nil && localtimeFile.Mode()&os.ModeSymlink == os.ModeSymlink { if symlink, _ := os.Readlink("/etc/localtime"); len(symlink) > 0 { if strings.Contains(symlink, "UTC") { return lib.TimezoneLocationName{"UTC"} } symlinkParts := strings.Split(symlink, "/") return lib.TimezoneLocationName{strings.Join(symlinkParts[len(symlinkParts)-2:], "/")} } } // If we happen to have an /etc/timezone, no guarantee it's used, but read that if locale, err := ioutil.ReadFile("/etc/timezone"); err == nil { return lib.TimezoneLocationName{string(locale)} } return lib.TimezoneLocationName{""} } func defaultConfigFileDirectory() string { if runtime.GOOS == "windows" { return fmt.Sprintf("%s\\ProgramData\\Cronitor", os.Getenv("SYSTEMDRIVE")) } return "/etc/cronitor" } func truncateString(s string, length int) string { if len(s) <= length { return s } return s[:length] } func printSuccessText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { color := color.New(color.FgHiGreen) if indent { color.Println(fmt.Sprintf(" |--► %s", message)) } else { color.Println(fmt.Sprintf("----► %s", message)) } } } func printDoneText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { printSuccessText(message+" ✔", indent) } } func printWarningText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { color := color.New(color.FgHiYellow) if indent { c
color.Println(fmt.Sprintf("----► %s", message)) } } } func printErrorText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { red := color.New(color.FgHiRed) if indent { red.Println(fmt.Sprintf(" |--► %s", message)) } else { red.Println(fmt.Sprintf("----► %s", message)) } } } func printLn() { if isAutoDiscover || isSilent { return } fmt.Println() } func isPathToDirectory(path string) bool { fileInfo, err := os.Stat(path) if err != nil { return false } return fileInfo.Mode().IsDir() } func log(msg string) { debugLog := viper.GetString(varLog) if len(debugLog) > 0 { f, _ := os.OpenFile(debugLog, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) defer f.Close() f.WriteString(msg + "\n") } if verbose { fmt.Println(msg) } } func fatal(msg string, exitCode int) { debugLog := viper.GetString(varLog) if len(debugLog) > 0 { f, _ := os.OpenFile(debugLog, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) defer f.Close() f.WriteString(msg + "\n") } fmt.Fprintln(os.Stderr, msg) os.Exit(exitCode) } func makeStamp() float64 { return float64(time.Now().UnixNano()) / float64(time.Second) } func formatStamp(timestamp float64) string { return strconv.FormatFloat(timestamp, 'f', 3, 64) } func shortDescription(version string) string { return fmt.Sprintf("CronitorCLI version %s", version) } func getCronitorApi() *lib.CronitorApi { return &lib.CronitorApi{ IsDev: dev, IsAutoDiscover: isAutoDiscover, ApiKey: varApiKey, UserAgent: userAgent, Logger: log, } }
olor.Println(fmt.Sprintf(" |--► %s", message)) } else {
conditional_block
root.go
package cmd import ( "cronitor/lib" "errors" "fmt" "io/ioutil" "math/rand" "net/http" "net/url" "os" "os/exec" "regexp" "runtime" "strconv" "strings" "sync" "time" "github.com/fatih/color" "github.com/getsentry/raven-go" "github.com/spf13/cobra" "github.com/spf13/viper" ) var Version string = "25.2" var cfgFile string var userAgent string // Flags that are either global or used in multiple commands var apiKey string var debugLog string var dev bool var hostname string var pingApiKey string var verbose bool var noStdoutPassthru bool // RootCmd represents the base command when called without any subcommands var RootCmd = &cobra.Command{ Use: "cronitor", Short: shortDescription(Version), Long: shortDescription(Version) + ` Command line tools for Cronitor.io. See https://cronitor.io/docs/using-cronitor-cli for details.`, } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { if err := RootCmd.Execute(); err != nil { fatal(err.Error(), 1) } } var varApiKey = "CRONITOR_API_KEY" var varHostname = "CRONITOR_HOSTNAME" var varLog = "CRONITOR_LOG" var varPingApiKey = "CRONITOR_PING_API_KEY" var varExcludeText = "CRONITOR_EXCLUDE_TEXT" var varConfig = "CRONITOR_CONFIG" func init() { userAgent = fmt.Sprintf("CronitorCLI/%s", Version) cobra.OnInitialize(initConfig) // Here you will define your flags and configuration settings. // Cobra supports persistent flags, which, if defined here, // will be global for your application. RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", cfgFile, "Config file") RootCmd.PersistentFlags().StringVarP(&apiKey, "api-key", "k", apiKey, "Cronitor API Key") RootCmd.PersistentFlags().StringVarP(&pingApiKey, "ping-api-key", "p", pingApiKey, "Ping API Key") RootCmd.PersistentFlags().StringVarP(&hostname, "hostname", "n", hostname, "A unique identifier for this host (default: system hostname)") RootCmd.PersistentFlags().StringVarP(&debugLog, "log", "l", debugLog, "Write debug logs to supplied file") RootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", verbose, "Verbose output") RootCmd.PersistentFlags().BoolVar(&dev, "use-dev", dev, "Dev mode") RootCmd.PersistentFlags().MarkHidden("use-dev") viper.BindPFlag(varApiKey, RootCmd.PersistentFlags().Lookup("api-key")) viper.BindPFlag(varHostname, RootCmd.PersistentFlags().Lookup("hostname")) viper.BindPFlag(varLog, RootCmd.PersistentFlags().Lookup("log")) viper.BindPFlag(varPingApiKey, RootCmd.PersistentFlags().Lookup("ping-api-key")) viper.BindPFlag(varConfig, RootCmd.PersistentFlags().Lookup("config")) } // initConfig reads in config file and ENV variables if set. func initConfig() { viper.AutomaticEnv() // read in environment variables that match // If a custom config file is specified by flag or env var, use it. Otherwise use default file. if len(viper.GetString(varConfig)) > 0 { viper.SetConfigFile(viper.GetString(varConfig)) } else { viper.AddConfigPath(defaultConfigFileDirectory()) viper.SetConfigName("cronitor") } // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { log("Reading config from " + viper.ConfigFileUsed()) } } func sendPing(endpoint string, uniqueIdentifier string, message string, series string, timestamp float64, duration *float64, exitCode *int, group *sync.WaitGroup) { defer group.Done() Client := &http.Client{ Timeout: time.Second * 10, } hostname := effectiveHostname() pingApiAuthKey := viper.GetString(varPingApiKey) pingApiHost := "" formattedStamp := "" formattedDuration := "" formattedStatusCode := "" if timestamp > 0 { formattedStamp = fmt.Sprintf("&stamp=%s", formatStamp(timestamp)) } if len(message) > 0 { message = fmt.Sprintf("&msg=%s", url.QueryEscape(truncateString(message, 1000))) } if len(pingApiAuthKey) > 0 { pingApiAuthKey = fmt.Sprintf("&auth_key=%s", truncateString(pingApiAuthKey, 50)) } if len(hostname) > 0 { hostname = fmt.Sprintf("&host=%s", url.QueryEscape(truncateString(hostname, 50))) } // By passing duration up, we save the computation on the server side if duration != nil { formattedDuration = fmt.Sprintf("&duration=%s", formatStamp(*duration)) } // We aren't using exit code at time of writing, but we have the field available for healthcheck monitors. if exitCode != nil { formattedStatusCode = fmt.Sprintf("&status_code=%d", *exitCode) } // The `series` data is used to match run events with complete or fail. Useful if multiple instances of a job are running. if len(series) > 0 { series = fmt.Sprintf("&series=%s", series) } pingSent := false uri := "" for i := 1; i <= 6; i++ { if dev { pingApiHost = "http://dev.cronitor.io" } else if i > 2 && pingApiHost == "https://cronitor.link" { pingApiHost = "https://cronitor.io" } else { pingApiHost = "https://cronitor.link" } // After 2 failed attempts, take a brief random break before trying again if i > 2 { time.Sleep(time.Second * time.Duration(float32(i)*1.5*rand.Float32())) } uri = fmt.Sprintf("%s/%s/%s?try=%d%s%s%s%s%s%s%s", pingApiHost, uniqueIdentifier, endpoint, i, formattedStamp, message, pingApiAuthKey, hostname, formattedDuration, series, formattedStatusCode) log("Sending ping " + uri) request, _ := http.NewRequest("GET", uri, nil) request.Header.Add("User-Agent", userAgent) response, err := Client.Do(request) if err != nil { log(err.Error()) continue } _, err = ioutil.ReadAll(response.Body) response.Body.Close() // Any 2xx is considered a successful response if response.StatusCode >= 200 && response.StatusCode < 300 { pingSent = true break } // Backoff on any 4xx request, e.g. 429 Too Many Requests if response.StatusCode >= 400 && response.StatusCode < 500 { pingSent = true break } } if !pingSent { raven.CaptureErrorAndWait(errors.New("Ping failure; retries exhausted: "+uri), nil) } } func effectiveHostname() string { if len(viper.GetString(varHostname)) > 0 { return viper.GetString(varHostname) } hostname, _ := os.Hostname() return hostname } func effectiveTimezoneLocationName() lib.TimezoneLocationName { // First, check if a TZ or CRON_TZ environemnt variable is set -- Diff var used by diff distros if locale, isSetFlag := os.LookupEnv("TZ"); isSetFlag { return lib.TimezoneLocationName{locale} } if locale, isSetFlag := os.LookupEnv("CRON_TZ"); isSetFlag { return lib.TimezoneLocationName{locale} } // Attempt to parse timedatectl (should work on FreeBSD, many linux distros) if output, err := exec.Command("timedatectl").Output(); err == nil { outputString := strings.Replace(string(output), "Time zone", "Timezone", -1) r := regexp.MustCompile(`(?m:Timezone:\s+(\S+).+$)`) if ret := r.FindStringSubmatch(outputString); ret != nil && len(ret) > 1 { return lib.TimezoneLocationName{ret[1]} } } // If /etc/localtime is a symlink, check what it is linking to if localtimeFile, err := os.Lstat("/etc/localtime"); err == nil && localtimeFile.Mode()&os.ModeSymlink == os.ModeSymlink { if symlink, _ := os.Readlink("/etc/localtime"); len(symlink) > 0 { if strings.Contains(symlink, "UTC") { return lib.TimezoneLocationName{"UTC"} } symlinkParts := strings.Split(symlink, "/") return lib.TimezoneLocationName{strings.Join(symlinkParts[len(symlinkParts)-2:], "/")} } } // If we happen to have an /etc/timezone, no guarantee it's used, but read that if locale, err := ioutil.ReadFile("/etc/timezone"); err == nil { return lib.TimezoneLocationName{string(locale)} } return lib.TimezoneLocationName{""} } func defaultConfigFileDirectory() string { if runtime.GOOS == "windows" { return fmt.Sprintf("%s\\ProgramData\\Cronitor", os.Getenv("SYSTEMDRIVE")) } return "/etc/cronitor" } func truncateString(s string, length int) string
func printSuccessText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { color := color.New(color.FgHiGreen) if indent { color.Println(fmt.Sprintf(" |--► %s", message)) } else { color.Println(fmt.Sprintf("----► %s", message)) } } } func printDoneText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { printSuccessText(message+" ✔", indent) } } func printWarningText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { color := color.New(color.FgHiYellow) if indent { color.Println(fmt.Sprintf(" |--► %s", message)) } else { color.Println(fmt.Sprintf("----► %s", message)) } } } func printErrorText(message string, indent bool) { if isAutoDiscover || isSilent { log(message) } else { red := color.New(color.FgHiRed) if indent { red.Println(fmt.Sprintf(" |--► %s", message)) } else { red.Println(fmt.Sprintf("----► %s", message)) } } } func printLn() { if isAutoDiscover || isSilent { return } fmt.Println() } func isPathToDirectory(path string) bool { fileInfo, err := os.Stat(path) if err != nil { return false } return fileInfo.Mode().IsDir() } func log(msg string) { debugLog := viper.GetString(varLog) if len(debugLog) > 0 { f, _ := os.OpenFile(debugLog, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) defer f.Close() f.WriteString(msg + "\n") } if verbose { fmt.Println(msg) } } func fatal(msg string, exitCode int) { debugLog := viper.GetString(varLog) if len(debugLog) > 0 { f, _ := os.OpenFile(debugLog, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) defer f.Close() f.WriteString(msg + "\n") } fmt.Fprintln(os.Stderr, msg) os.Exit(exitCode) } func makeStamp() float64 { return float64(time.Now().UnixNano()) / float64(time.Second) } func formatStamp(timestamp float64) string { return strconv.FormatFloat(timestamp, 'f', 3, 64) } func shortDescription(version string) string { return fmt.Sprintf("CronitorCLI version %s", version) } func getCronitorApi() *lib.CronitorApi { return &lib.CronitorApi{ IsDev: dev, IsAutoDiscover: isAutoDiscover, ApiKey: varApiKey, UserAgent: userAgent, Logger: log, } }
{ if len(s) <= length { return s } return s[:length] }
identifier_body
browser.rs
use std::time::Duration; use std::{ collections::HashMap, io::{self, BufRead, BufReader}, path::{Path, PathBuf}, process::{self, Child, Stdio}, }; use futures::channel::mpsc::{channel, unbounded, Sender}; use futures::channel::oneshot::channel as oneshot_channel; use futures::SinkExt; use chromiumoxide_cdp::cdp::browser_protocol::target::{ CreateBrowserContextParams, CreateTargetParams, DisposeBrowserContextParams, TargetId, }; use chromiumoxide_cdp::cdp::{CdpEventMessage, IntoEventKind}; use chromiumoxide_types::*; use crate::cmd::{to_command_response, CommandMessage}; use crate::conn::Connection; use crate::error::{CdpError, Result}; use crate::handler::browser::BrowserContext; use crate::handler::viewport::Viewport; use crate::handler::{Handler, HandlerConfig, HandlerMessage, REQUEST_TIMEOUT}; use crate::listeners::{EventListenerRequest, EventStream}; use crate::page::Page; use chromiumoxide_cdp::cdp::browser_protocol::browser::{GetVersionParams, GetVersionReturns}; /// A [`Browser`] is created when chromiumoxide connects to a Chromium instance. #[derive(Debug)] pub struct Browser { /// The `Sender` to send messages to the connection handler that drives the /// websocket sender: Sender<HandlerMessage>, /// How the spawned chromium instance was configured, if any config: Option<BrowserConfig>, /// The spawned chromium instance child: Option<Child>, /// The debug web socket url of the chromium instance debug_ws_url: String, /// The context of the browser browser_context: BrowserContext, } impl Browser { /// Connect to an already running chromium instance via websocket pub async fn connect(debug_ws_url: impl Into<String>) -> Result<(Self, Handler)> { let debug_ws_url = debug_ws_url.into(); let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?; let (tx, rx) = channel(1); let fut = Handler::new(conn, rx, HandlerConfig::default()); let browser_context = fut.default_browser_context().clone(); let browser = Self { sender: tx, config: None, child: None, debug_ws_url, browser_context, }; Ok((browser, fut)) } /// Launches a new instance of `chromium` in the background and attaches to /// its debug web socket. /// /// This fails when no chromium executable could be detected. /// /// This fails if no web socket url could be detected from the child /// processes stderr for more than 20 seconds. pub async fn launch(config: BrowserConfig) -> Result<(Self, Handler)> { // launch a new chromium instance let mut child = config.launch()?; // extract the ws: let get_ws_url = ws_url_from_output(&mut child); let dur = Duration::from_secs(20); cfg_if::cfg_if! { if #[cfg(feature = "async-std-runtime")] { let debug_ws_url = async_std::future::timeout(dur, get_ws_url) .await .map_err(|_| CdpError::Timeout)?; } else if #[cfg(feature = "tokio-runtime")] { let debug_ws_url = tokio::time::timeout(dur, get_ws_url).await .map_err(|_| CdpError::Timeout)?; } } let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?; let (tx, rx) = channel(1); let handler_config = HandlerConfig { ignore_https_errors: config.ignore_https_errors, viewport: Some(config.viewport.clone()), context_ids: Vec::new(), request_timeout: config.request_timeout, }; let fut = Handler::new(conn, rx, handler_config); let browser_context = fut.default_browser_context().clone(); let browser = Self { sender: tx, config: Some(config), child: Some(child), debug_ws_url, browser_context, }; Ok((browser, fut)) } /// If not launched as incognito this creates a new incognito browser /// context. After that this browser exists within the incognito session. /// New pages created while being in incognito mode will also run in the /// incognito context. Incognito contexts won't share cookies/cache with /// other browser contexts. pub async fn start_incognito_context(&mut self) -> Result<&mut Self> { if !self.is_incognito_configured() { let resp = self .execute(CreateBrowserContextParams::default()) .await? .result; self.browser_context = BrowserContext::from(resp.browser_context_id); self.sender .clone() .send(HandlerMessage::InsertContext(self.browser_context.clone())) .await?; } Ok(self) } /// If a incognito session was created with /// `Browser::start_incognito_context` this disposes this context. /// /// # Note This will also dispose all pages that were running within the /// incognito context. pub async fn quit_incognito_context(&mut self) -> Result<&mut Self> { if let Some(id) = self.browser_context.take() { self.execute(DisposeBrowserContextParams::new(id.clone())) .await?; self.sender .clone() .send(HandlerMessage::DisposeContext(BrowserContext::from(id))) .await?; } Ok(self) } /// Whether incognito mode was configured from the start fn is_incognito_configured(&self) -> bool { self.config .as_ref() .map(|c| c.incognito) .unwrap_or_default() } /// Returns the address of the websocket this browser is attached to pub fn websocket_address(&self) -> &String { &self.debug_ws_url } /// Whether the BrowserContext is incognito. pub fn is_incognito(&self) -> bool { self.is_incognito_configured() || self.browser_context.is_incognito() } /// The config of the spawned chromium instance if any. pub fn config(&self) -> Option<&BrowserConfig> { self.config.as_ref() } /// Create a new browser page pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> { let (tx, rx) = oneshot_channel(); let mut params = params.into(); if let Some(id) = self.browser_context.id() { if params.browser_context_id.is_none() { params.browser_context_id = Some(id.clone()); } } self.sender .clone() .send(HandlerMessage::CreatePage(params, tx)) .await?; rx.await? } /// Version information about the browser pub async fn version(&self) -> Result<GetVersionReturns> { Ok(self.execute(GetVersionParams::default()).await?.result) } /// Returns the user agent of the browser pub async fn user_agent(&self) -> Result<String> { Ok(self.version().await?.user_agent) } /// Call a browser method. pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> { let (tx, rx) = oneshot_channel(); let method = cmd.identifier(); let msg = CommandMessage::new(cmd, tx)?; self.sender .clone() .send(HandlerMessage::Command(msg)) .await?; let resp = rx.await??; to_command_response::<T>(resp, method) } /// Return all of the pages of the browser pub async fn pages(&self) -> Result<Vec<Page>> { let (tx, rx) = oneshot_channel(); self.sender .clone() .send(HandlerMessage::GetPages(tx)) .await?; Ok(rx.await?) } /// Return page of given target_id pub async fn get_page(&self, target_id: TargetId) -> Result<Page> { let (tx, rx) = oneshot_channel(); self.sender .clone() .send(HandlerMessage::GetPage(target_id, tx)) .await?; rx.await?.ok_or(CdpError::NotFound) } //Set listener for browser event pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> { let (tx, rx) = unbounded(); self.sender .clone() .send(HandlerMessage::AddEventListener( EventListenerRequest::new::<T>(tx), )) .await?; Ok(EventStream::new(rx)) } } impl Drop for Browser { fn drop(&mut self) { if let Some(child) = self.child.as_mut() { child.kill().expect("!kill"); } } } async fn ws_url_from_output(child_process: &mut Child) -> String { let stdout = child_process.stderr.take().expect("no stderror"); fn read_debug_url(stdout: std::process::ChildStderr) -> String { let mut buf = BufReader::new(stdout); let mut line = String::new(); loop { if buf.read_line(&mut line).is_ok() { // check for ws in line if let Some(ws) = line.rsplit("listening on ").next() { if ws.starts_with("ws") && ws.contains("devtools/browser") { return ws.trim().to_string(); } } } else { line = String::new(); } } } cfg_if::cfg_if! { if #[cfg(feature = "async-std-runtime")] { async_std::task::spawn_blocking(|| read_debug_url(stdout)).await } else if #[cfg(feature = "tokio-runtime")] { tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output") } } } #[derive(Debug, Clone)] pub struct BrowserConfig { /// Determines whether to run headless version of the browser. Defaults to /// true. headless: bool, /// Determines whether to run the browser with a sandbox. sandbox: bool, /// Launch the browser with a specific window width and height. window_size: Option<(u32, u32)>, /// Launch the browser with a specific debugging port. port: u16, /// Path for Chrome or Chromium. /// /// If unspecified, the create will try to automatically detect a suitable /// binary. executable: std::path::PathBuf, /// A list of Chrome extensions to load. /// /// An extension should be a path to a folder containing the extension code. /// CRX files cannot be used directly and must be first extracted. /// /// Note that Chrome does not support loading extensions in headless-mode. /// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5 extensions: Vec<String>, /// Environment variables to set for the Chromium process. /// Passes value through to std::process::Command::envs. pub process_envs: Option<HashMap<String, String>>, /// Data dir for user data pub user_data_dir: Option<PathBuf>, /// Whether to launch the `Browser` in incognito mode incognito: bool, /// Ignore https errors, default is true ignore_https_errors: bool, viewport: Viewport, /// The duration after a request with no response should time out request_timeout: Duration, /// Additional command line arguments to pass to the browser instance. args: Vec<String>, } #[derive(Debug, Clone)] pub struct BrowserConfigBuilder { headless: bool, sandbox: bool, window_size: Option<(u32, u32)>, port: u16, executable: Option<PathBuf>, extensions: Vec<String>, process_envs: Option<HashMap<String, String>>, user_data_dir: Option<PathBuf>, incognito: bool, ignore_https_errors: bool, viewport: Viewport, request_timeout: Duration, args: Vec<String>, } impl BrowserConfig { pub fn builder() -> BrowserConfigBuilder { BrowserConfigBuilder::default() } pub fn with_executable(path: impl AsRef<Path>) -> Self { Self::builder().chrome_executable(path).build().unwrap() } } impl Default for BrowserConfigBuilder { fn default() -> Self { Self { headless: true, sandbox: true, window_size: None, port: 0, executable: None, extensions: Vec::new(), process_envs: None, user_data_dir: None, incognito: false, ignore_https_errors: true, viewport: Default::default(), request_timeout: Duration::from_millis(REQUEST_TIMEOUT), args: Vec::new(), } } } impl BrowserConfigBuilder { pub fn window_size(mut self, width: u32, height: u32) -> Self { self.window_size = Some((width, height)); self } pub fn no_sandbox(mut self) -> Self { self.sandbox = false; self } pub fn with_head(mut self) -> Self { self.headless = false; self } pub fn incognito(mut self) -> Self { self.incognito = true; self } pub fn respect_https_errors(mut self) -> Self { self.ignore_https_errors = false; self } pub fn request_timeout(mut self, timeout: Duration) -> Self { self.request_timeout = timeout; self } pub fn viewport(mut self, viewport: Viewport) -> Self { self.viewport = viewport; self } pub fn user_data_dir(mut self, data_dir: impl AsRef<Path>) -> Self { self.user_data_dir = Some(data_dir.as_ref().to_path_buf()); self } pub fn chrome_executable(mut self, path: impl AsRef<Path>) -> Self { self.executable = Some(path.as_ref().to_path_buf()); self } pub fn extension(mut self, extension: impl Into<String>) -> Self { self.extensions.push(extension.into()); self } pub fn extensions<I, S>(mut self, extensions: I) -> Self where I: IntoIterator<Item = S>, S: Into<String>, { for ext in extensions { self.extensions.push(ext.into()); } self } pub fn env(mut self, key: impl Into<String>, val: impl Into<String>) -> Self { self.process_envs .get_or_insert(HashMap::new()) .insert(key.into(), val.into()); self } pub fn envs<I, K, V>(mut self, envs: I) -> Self where I: IntoIterator<Item = (K, V)>, K: Into<String>, V: Into<String>, { self.process_envs .get_or_insert(HashMap::new()) .extend(envs.into_iter().map(|(k, v)| (k.into(), v.into()))); self } pub fn arg(mut self, arg: impl Into<String>) -> Self { self.args.push(arg.into()); self } pub fn args<I, S>(mut self, args: I) -> Self where I: IntoIterator<Item = S>, S: Into<String>, { for arg in args {
} self } pub fn build(self) -> std::result::Result<BrowserConfig, String> { let executable = if let Some(e) = self.executable { e } else { default_executable()? }; Ok(BrowserConfig { headless: self.headless, sandbox: self.sandbox, window_size: self.window_size, port: self.port, executable, extensions: self.extensions, process_envs: self.process_envs, user_data_dir: self.user_data_dir, incognito: self.incognito, ignore_https_errors: self.ignore_https_errors, viewport: self.viewport, request_timeout: self.request_timeout, args: self.args, }) } } impl BrowserConfig { pub fn launch(&self) -> io::Result<Child> { let dbg_port = format!("--remote-debugging-port={}", self.port); let args = [ dbg_port.as_str(), "--disable-background-networking", "--enable-features=NetworkService,NetworkServiceInProcess", "--disable-background-timer-throttling", "--disable-backgrounding-occluded-windows", "--disable-breakpad", "--disable-client-side-phishing-detection", "--disable-component-extensions-with-background-pages", "--disable-default-apps", "--disable-dev-shm-usage", "--disable-extensions", "--disable-features=TranslateUI", "--disable-hang-monitor", "--disable-ipc-flooding-protection", "--disable-popup-blocking", "--disable-prompt-on-repost", "--disable-renderer-backgrounding", "--disable-sync", "--force-color-profile=srgb", "--metrics-recording-only", "--no-first-run", "--enable-automation", "--password-store=basic", "--use-mock-keychain", "--enable-blink-features=IdleDetection", ]; let mut cmd = process::Command::new(&self.executable); cmd.args(&args).args(&DEFAULT_ARGS).args(&self.args).args( self.extensions .iter() .map(|e| format!("--load-extension={}", e)), ); if let Some(ref user_data) = self.user_data_dir { cmd.arg(format!("--user-data-dir={}", user_data.display())); } if let Some((width, height)) = self.window_size { cmd.arg(format!("--window-size={},{}", width, height)); } if !self.sandbox { cmd.args(&["--no-sandbox", "--disable-setuid-sandbox"]); } if self.headless { cmd.args(&["--headless", "--hide-scrollbars", "--mute-audio"]); } if self.incognito { cmd.arg("--incognito"); } if let Some(ref envs) = self.process_envs { cmd.envs(envs); } cmd.stderr(Stdio::piped()).spawn() } } /// Returns the path to Chrome's executable. /// /// If the `CHROME` environment variable is set, `default_executable` will /// use it as the default path. Otherwise, the filenames `google-chrome-stable` /// `chromium`, `chromium-browser`, `chrome` and `chrome-browser` are /// searched for in standard places. If that fails, /// `/Applications/Google Chrome.app/...` (on MacOS) or the registry (on /// Windows) is consulted. If all of the above fail, an error is returned. pub fn default_executable() -> Result<std::path::PathBuf, String> { if let Ok(path) = std::env::var("CHROME") { if std::path::Path::new(&path).exists() { return Ok(path.into()); } } for app in &[ "google-chrome-stable", "chromium", "chromium-browser", "chrome", "chrome-browser", ] { if let Ok(path) = which::which(app) { return Ok(path); } } #[cfg(target_os = "macos")] { let default_paths = &["/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"][..]; for path in default_paths { if std::path::Path::new(path).exists() { return Ok(path.into()); } } } #[cfg(windows)] { if let Some(path) = get_chrome_path_from_windows_registry() { if path.exists() { return Ok(path); } } } Err("Could not auto detect a chrome executable".to_string()) } #[cfg(windows)] pub(crate) fn get_chrome_path_from_windows_registry() -> Option<std::path::PathBuf> { winreg::RegKey::predef(winreg::enums::HKEY_LOCAL_MACHINE) .open_subkey("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe") .and_then(|key| key.get_value::<String, _>("")) .map(std::path::PathBuf::from) .ok() } /// These are passed to the Chrome binary by default. /// Via https://github.com/puppeteer/puppeteer/blob/4846b8723cf20d3551c0d755df394cc5e0c82a94/src/node/Launcher.ts#L157 static DEFAULT_ARGS: [&str; 23] = [ "--disable-background-networking", "--enable-features=NetworkService,NetworkServiceInProcess", "--disable-background-timer-throttling", "--disable-backgrounding-occluded-windows", "--disable-breakpad", "--disable-client-side-phishing-detection", "--disable-component-extensions-with-background-pages", "--disable-default-apps", "--disable-dev-shm-usage", "--disable-extensions", "--disable-features=TranslateUI", "--disable-hang-monitor", "--disable-ipc-flooding-protection", "--disable-popup-blocking", "--disable-prompt-on-repost", "--disable-renderer-backgrounding", "--disable-sync", "--force-color-profile=srgb", "--metrics-recording-only", "--no-first-run", "--enable-automation", "--password-store=basic", "--use-mock-keychain", ];
self.args.push(arg.into());
random_line_split
browser.rs
use std::time::Duration; use std::{ collections::HashMap, io::{self, BufRead, BufReader}, path::{Path, PathBuf}, process::{self, Child, Stdio}, }; use futures::channel::mpsc::{channel, unbounded, Sender}; use futures::channel::oneshot::channel as oneshot_channel; use futures::SinkExt; use chromiumoxide_cdp::cdp::browser_protocol::target::{ CreateBrowserContextParams, CreateTargetParams, DisposeBrowserContextParams, TargetId, }; use chromiumoxide_cdp::cdp::{CdpEventMessage, IntoEventKind}; use chromiumoxide_types::*; use crate::cmd::{to_command_response, CommandMessage}; use crate::conn::Connection; use crate::error::{CdpError, Result}; use crate::handler::browser::BrowserContext; use crate::handler::viewport::Viewport; use crate::handler::{Handler, HandlerConfig, HandlerMessage, REQUEST_TIMEOUT}; use crate::listeners::{EventListenerRequest, EventStream}; use crate::page::Page; use chromiumoxide_cdp::cdp::browser_protocol::browser::{GetVersionParams, GetVersionReturns}; /// A [`Browser`] is created when chromiumoxide connects to a Chromium instance. #[derive(Debug)] pub struct Browser { /// The `Sender` to send messages to the connection handler that drives the /// websocket sender: Sender<HandlerMessage>, /// How the spawned chromium instance was configured, if any config: Option<BrowserConfig>, /// The spawned chromium instance child: Option<Child>, /// The debug web socket url of the chromium instance debug_ws_url: String, /// The context of the browser browser_context: BrowserContext, } impl Browser { /// Connect to an already running chromium instance via websocket pub async fn connect(debug_ws_url: impl Into<String>) -> Result<(Self, Handler)> { let debug_ws_url = debug_ws_url.into(); let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?; let (tx, rx) = channel(1); let fut = Handler::new(conn, rx, HandlerConfig::default()); let browser_context = fut.default_browser_context().clone(); let browser = Self { sender: tx, config: None, child: None, debug_ws_url, browser_context, }; Ok((browser, fut)) } /// Launches a new instance of `chromium` in the background and attaches to /// its debug web socket. /// /// This fails when no chromium executable could be detected. /// /// This fails if no web socket url could be detected from the child /// processes stderr for more than 20 seconds. pub async fn launch(config: BrowserConfig) -> Result<(Self, Handler)> { // launch a new chromium instance let mut child = config.launch()?; // extract the ws: let get_ws_url = ws_url_from_output(&mut child); let dur = Duration::from_secs(20); cfg_if::cfg_if! { if #[cfg(feature = "async-std-runtime")] { let debug_ws_url = async_std::future::timeout(dur, get_ws_url) .await .map_err(|_| CdpError::Timeout)?; } else if #[cfg(feature = "tokio-runtime")] { let debug_ws_url = tokio::time::timeout(dur, get_ws_url).await .map_err(|_| CdpError::Timeout)?; } } let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?; let (tx, rx) = channel(1); let handler_config = HandlerConfig { ignore_https_errors: config.ignore_https_errors, viewport: Some(config.viewport.clone()), context_ids: Vec::new(), request_timeout: config.request_timeout, }; let fut = Handler::new(conn, rx, handler_config); let browser_context = fut.default_browser_context().clone(); let browser = Self { sender: tx, config: Some(config), child: Some(child), debug_ws_url, browser_context, }; Ok((browser, fut)) } /// If not launched as incognito this creates a new incognito browser /// context. After that this browser exists within the incognito session. /// New pages created while being in incognito mode will also run in the /// incognito context. Incognito contexts won't share cookies/cache with /// other browser contexts. pub async fn start_incognito_context(&mut self) -> Result<&mut Self> { if !self.is_incognito_configured() { let resp = self .execute(CreateBrowserContextParams::default()) .await? .result; self.browser_context = BrowserContext::from(resp.browser_context_id); self.sender .clone() .send(HandlerMessage::InsertContext(self.browser_context.clone())) .await?; } Ok(self) } /// If a incognito session was created with /// `Browser::start_incognito_context` this disposes this context. /// /// # Note This will also dispose all pages that were running within the /// incognito context. pub async fn quit_incognito_context(&mut self) -> Result<&mut Self> { if let Some(id) = self.browser_context.take() { self.execute(DisposeBrowserContextParams::new(id.clone())) .await?; self.sender .clone() .send(HandlerMessage::DisposeContext(BrowserContext::from(id))) .await?; } Ok(self) } /// Whether incognito mode was configured from the start fn is_incognito_configured(&self) -> bool { self.config .as_ref() .map(|c| c.incognito) .unwrap_or_default() } /// Returns the address of the websocket this browser is attached to pub fn websocket_address(&self) -> &String { &self.debug_ws_url } /// Whether the BrowserContext is incognito. pub fn is_incognito(&self) -> bool { self.is_incognito_configured() || self.browser_context.is_incognito() } /// The config of the spawned chromium instance if any. pub fn config(&self) -> Option<&BrowserConfig> { self.config.as_ref() } /// Create a new browser page pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> { let (tx, rx) = oneshot_channel(); let mut params = params.into(); if let Some(id) = self.browser_context.id() { if params.browser_context_id.is_none() { params.browser_context_id = Some(id.clone()); } } self.sender .clone() .send(HandlerMessage::CreatePage(params, tx)) .await?; rx.await? } /// Version information about the browser pub async fn version(&self) -> Result<GetVersionReturns> { Ok(self.execute(GetVersionParams::default()).await?.result) } /// Returns the user agent of the browser pub async fn user_agent(&self) -> Result<String> { Ok(self.version().await?.user_agent) } /// Call a browser method. pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> { let (tx, rx) = oneshot_channel(); let method = cmd.identifier(); let msg = CommandMessage::new(cmd, tx)?; self.sender .clone() .send(HandlerMessage::Command(msg)) .await?; let resp = rx.await??; to_command_response::<T>(resp, method) } /// Return all of the pages of the browser pub async fn pages(&self) -> Result<Vec<Page>> { let (tx, rx) = oneshot_channel(); self.sender .clone() .send(HandlerMessage::GetPages(tx)) .await?; Ok(rx.await?) } /// Return page of given target_id pub async fn get_page(&self, target_id: TargetId) -> Result<Page> { let (tx, rx) = oneshot_channel(); self.sender .clone() .send(HandlerMessage::GetPage(target_id, tx)) .await?; rx.await?.ok_or(CdpError::NotFound) } //Set listener for browser event pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> { let (tx, rx) = unbounded(); self.sender .clone() .send(HandlerMessage::AddEventListener( EventListenerRequest::new::<T>(tx), )) .await?; Ok(EventStream::new(rx)) } } impl Drop for Browser { fn drop(&mut self) { if let Some(child) = self.child.as_mut() { child.kill().expect("!kill"); } } } async fn ws_url_from_output(child_process: &mut Child) -> String { let stdout = child_process.stderr.take().expect("no stderror"); fn read_debug_url(stdout: std::process::ChildStderr) -> String { let mut buf = BufReader::new(stdout); let mut line = String::new(); loop { if buf.read_line(&mut line).is_ok() { // check for ws in line if let Some(ws) = line.rsplit("listening on ").next() { if ws.starts_with("ws") && ws.contains("devtools/browser")
} } else { line = String::new(); } } } cfg_if::cfg_if! { if #[cfg(feature = "async-std-runtime")] { async_std::task::spawn_blocking(|| read_debug_url(stdout)).await } else if #[cfg(feature = "tokio-runtime")] { tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output") } } } #[derive(Debug, Clone)] pub struct BrowserConfig { /// Determines whether to run headless version of the browser. Defaults to /// true. headless: bool, /// Determines whether to run the browser with a sandbox. sandbox: bool, /// Launch the browser with a specific window width and height. window_size: Option<(u32, u32)>, /// Launch the browser with a specific debugging port. port: u16, /// Path for Chrome or Chromium. /// /// If unspecified, the create will try to automatically detect a suitable /// binary. executable: std::path::PathBuf, /// A list of Chrome extensions to load. /// /// An extension should be a path to a folder containing the extension code. /// CRX files cannot be used directly and must be first extracted. /// /// Note that Chrome does not support loading extensions in headless-mode. /// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5 extensions: Vec<String>, /// Environment variables to set for the Chromium process. /// Passes value through to std::process::Command::envs. pub process_envs: Option<HashMap<String, String>>, /// Data dir for user data pub user_data_dir: Option<PathBuf>, /// Whether to launch the `Browser` in incognito mode incognito: bool, /// Ignore https errors, default is true ignore_https_errors: bool, viewport: Viewport, /// The duration after a request with no response should time out request_timeout: Duration, /// Additional command line arguments to pass to the browser instance. args: Vec<String>, } #[derive(Debug, Clone)] pub struct BrowserConfigBuilder { headless: bool, sandbox: bool, window_size: Option<(u32, u32)>, port: u16, executable: Option<PathBuf>, extensions: Vec<String>, process_envs: Option<HashMap<String, String>>, user_data_dir: Option<PathBuf>, incognito: bool, ignore_https_errors: bool, viewport: Viewport, request_timeout: Duration, args: Vec<String>, } impl BrowserConfig { pub fn builder() -> BrowserConfigBuilder { BrowserConfigBuilder::default() } pub fn with_executable(path: impl AsRef<Path>) -> Self { Self::builder().chrome_executable(path).build().unwrap() } } impl Default for BrowserConfigBuilder { fn default() -> Self { Self { headless: true, sandbox: true, window_size: None, port: 0, executable: None, extensions: Vec::new(), process_envs: None, user_data_dir: None, incognito: false, ignore_https_errors: true, viewport: Default::default(), request_timeout: Duration::from_millis(REQUEST_TIMEOUT), args: Vec::new(), } } } impl BrowserConfigBuilder { pub fn window_size(mut self, width: u32, height: u32) -> Self { self.window_size = Some((width, height)); self } pub fn no_sandbox(mut self) -> Self { self.sandbox = false; self } pub fn with_head(mut self) -> Self { self.headless = false; self } pub fn incognito(mut self) -> Self { self.incognito = true; self } pub fn respect_https_errors(mut self) -> Self { self.ignore_https_errors = false; self } pub fn request_timeout(mut self, timeout: Duration) -> Self { self.request_timeout = timeout; self } pub fn viewport(mut self, viewport: Viewport) -> Self { self.viewport = viewport; self } pub fn user_data_dir(mut self, data_dir: impl AsRef<Path>) -> Self { self.user_data_dir = Some(data_dir.as_ref().to_path_buf()); self } pub fn chrome_executable(mut self, path: impl AsRef<Path>) -> Self { self.executable = Some(path.as_ref().to_path_buf()); self } pub fn extension(mut self, extension: impl Into<String>) -> Self { self.extensions.push(extension.into()); self } pub fn extensions<I, S>(mut self, extensions: I) -> Self where I: IntoIterator<Item = S>, S: Into<String>, { for ext in extensions { self.extensions.push(ext.into()); } self } pub fn env(mut self, key: impl Into<String>, val: impl Into<String>) -> Self { self.process_envs .get_or_insert(HashMap::new()) .insert(key.into(), val.into()); self } pub fn envs<I, K, V>(mut self, envs: I) -> Self where I: IntoIterator<Item = (K, V)>, K: Into<String>, V: Into<String>, { self.process_envs .get_or_insert(HashMap::new()) .extend(envs.into_iter().map(|(k, v)| (k.into(), v.into()))); self } pub fn arg(mut self, arg: impl Into<String>) -> Self { self.args.push(arg.into()); self } pub fn args<I, S>(mut self, args: I) -> Self where I: IntoIterator<Item = S>, S: Into<String>, { for arg in args { self.args.push(arg.into()); } self } pub fn build(self) -> std::result::Result<BrowserConfig, String> { let executable = if let Some(e) = self.executable { e } else { default_executable()? }; Ok(BrowserConfig { headless: self.headless, sandbox: self.sandbox, window_size: self.window_size, port: self.port, executable, extensions: self.extensions, process_envs: self.process_envs, user_data_dir: self.user_data_dir, incognito: self.incognito, ignore_https_errors: self.ignore_https_errors, viewport: self.viewport, request_timeout: self.request_timeout, args: self.args, }) } } impl BrowserConfig { pub fn launch(&self) -> io::Result<Child> { let dbg_port = format!("--remote-debugging-port={}", self.port); let args = [ dbg_port.as_str(), "--disable-background-networking", "--enable-features=NetworkService,NetworkServiceInProcess", "--disable-background-timer-throttling", "--disable-backgrounding-occluded-windows", "--disable-breakpad", "--disable-client-side-phishing-detection", "--disable-component-extensions-with-background-pages", "--disable-default-apps", "--disable-dev-shm-usage", "--disable-extensions", "--disable-features=TranslateUI", "--disable-hang-monitor", "--disable-ipc-flooding-protection", "--disable-popup-blocking", "--disable-prompt-on-repost", "--disable-renderer-backgrounding", "--disable-sync", "--force-color-profile=srgb", "--metrics-recording-only", "--no-first-run", "--enable-automation", "--password-store=basic", "--use-mock-keychain", "--enable-blink-features=IdleDetection", ]; let mut cmd = process::Command::new(&self.executable); cmd.args(&args).args(&DEFAULT_ARGS).args(&self.args).args( self.extensions .iter() .map(|e| format!("--load-extension={}", e)), ); if let Some(ref user_data) = self.user_data_dir { cmd.arg(format!("--user-data-dir={}", user_data.display())); } if let Some((width, height)) = self.window_size { cmd.arg(format!("--window-size={},{}", width, height)); } if !self.sandbox { cmd.args(&["--no-sandbox", "--disable-setuid-sandbox"]); } if self.headless { cmd.args(&["--headless", "--hide-scrollbars", "--mute-audio"]); } if self.incognito { cmd.arg("--incognito"); } if let Some(ref envs) = self.process_envs { cmd.envs(envs); } cmd.stderr(Stdio::piped()).spawn() } } /// Returns the path to Chrome's executable. /// /// If the `CHROME` environment variable is set, `default_executable` will /// use it as the default path. Otherwise, the filenames `google-chrome-stable` /// `chromium`, `chromium-browser`, `chrome` and `chrome-browser` are /// searched for in standard places. If that fails, /// `/Applications/Google Chrome.app/...` (on MacOS) or the registry (on /// Windows) is consulted. If all of the above fail, an error is returned. pub fn default_executable() -> Result<std::path::PathBuf, String> { if let Ok(path) = std::env::var("CHROME") { if std::path::Path::new(&path).exists() { return Ok(path.into()); } } for app in &[ "google-chrome-stable", "chromium", "chromium-browser", "chrome", "chrome-browser", ] { if let Ok(path) = which::which(app) { return Ok(path); } } #[cfg(target_os = "macos")] { let default_paths = &["/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"][..]; for path in default_paths { if std::path::Path::new(path).exists() { return Ok(path.into()); } } } #[cfg(windows)] { if let Some(path) = get_chrome_path_from_windows_registry() { if path.exists() { return Ok(path); } } } Err("Could not auto detect a chrome executable".to_string()) } #[cfg(windows)] pub(crate) fn get_chrome_path_from_windows_registry() -> Option<std::path::PathBuf> { winreg::RegKey::predef(winreg::enums::HKEY_LOCAL_MACHINE) .open_subkey("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe") .and_then(|key| key.get_value::<String, _>("")) .map(std::path::PathBuf::from) .ok() } /// These are passed to the Chrome binary by default. /// Via https://github.com/puppeteer/puppeteer/blob/4846b8723cf20d3551c0d755df394cc5e0c82a94/src/node/Launcher.ts#L157 static DEFAULT_ARGS: [&str; 23] = [ "--disable-background-networking", "--enable-features=NetworkService,NetworkServiceInProcess", "--disable-background-timer-throttling", "--disable-backgrounding-occluded-windows", "--disable-breakpad", "--disable-client-side-phishing-detection", "--disable-component-extensions-with-background-pages", "--disable-default-apps", "--disable-dev-shm-usage", "--disable-extensions", "--disable-features=TranslateUI", "--disable-hang-monitor", "--disable-ipc-flooding-protection", "--disable-popup-blocking", "--disable-prompt-on-repost", "--disable-renderer-backgrounding", "--disable-sync", "--force-color-profile=srgb", "--metrics-recording-only", "--no-first-run", "--enable-automation", "--password-store=basic", "--use-mock-keychain", ];
{ return ws.trim().to_string(); }
conditional_block
browser.rs
use std::time::Duration; use std::{ collections::HashMap, io::{self, BufRead, BufReader}, path::{Path, PathBuf}, process::{self, Child, Stdio}, }; use futures::channel::mpsc::{channel, unbounded, Sender}; use futures::channel::oneshot::channel as oneshot_channel; use futures::SinkExt; use chromiumoxide_cdp::cdp::browser_protocol::target::{ CreateBrowserContextParams, CreateTargetParams, DisposeBrowserContextParams, TargetId, }; use chromiumoxide_cdp::cdp::{CdpEventMessage, IntoEventKind}; use chromiumoxide_types::*; use crate::cmd::{to_command_response, CommandMessage}; use crate::conn::Connection; use crate::error::{CdpError, Result}; use crate::handler::browser::BrowserContext; use crate::handler::viewport::Viewport; use crate::handler::{Handler, HandlerConfig, HandlerMessage, REQUEST_TIMEOUT}; use crate::listeners::{EventListenerRequest, EventStream}; use crate::page::Page; use chromiumoxide_cdp::cdp::browser_protocol::browser::{GetVersionParams, GetVersionReturns}; /// A [`Browser`] is created when chromiumoxide connects to a Chromium instance. #[derive(Debug)] pub struct Browser { /// The `Sender` to send messages to the connection handler that drives the /// websocket sender: Sender<HandlerMessage>, /// How the spawned chromium instance was configured, if any config: Option<BrowserConfig>, /// The spawned chromium instance child: Option<Child>, /// The debug web socket url of the chromium instance debug_ws_url: String, /// The context of the browser browser_context: BrowserContext, } impl Browser { /// Connect to an already running chromium instance via websocket pub async fn connect(debug_ws_url: impl Into<String>) -> Result<(Self, Handler)> { let debug_ws_url = debug_ws_url.into(); let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?; let (tx, rx) = channel(1); let fut = Handler::new(conn, rx, HandlerConfig::default()); let browser_context = fut.default_browser_context().clone(); let browser = Self { sender: tx, config: None, child: None, debug_ws_url, browser_context, }; Ok((browser, fut)) } /// Launches a new instance of `chromium` in the background and attaches to /// its debug web socket. /// /// This fails when no chromium executable could be detected. /// /// This fails if no web socket url could be detected from the child /// processes stderr for more than 20 seconds. pub async fn launch(config: BrowserConfig) -> Result<(Self, Handler)> { // launch a new chromium instance let mut child = config.launch()?; // extract the ws: let get_ws_url = ws_url_from_output(&mut child); let dur = Duration::from_secs(20); cfg_if::cfg_if! { if #[cfg(feature = "async-std-runtime")] { let debug_ws_url = async_std::future::timeout(dur, get_ws_url) .await .map_err(|_| CdpError::Timeout)?; } else if #[cfg(feature = "tokio-runtime")] { let debug_ws_url = tokio::time::timeout(dur, get_ws_url).await .map_err(|_| CdpError::Timeout)?; } } let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?; let (tx, rx) = channel(1); let handler_config = HandlerConfig { ignore_https_errors: config.ignore_https_errors, viewport: Some(config.viewport.clone()), context_ids: Vec::new(), request_timeout: config.request_timeout, }; let fut = Handler::new(conn, rx, handler_config); let browser_context = fut.default_browser_context().clone(); let browser = Self { sender: tx, config: Some(config), child: Some(child), debug_ws_url, browser_context, }; Ok((browser, fut)) } /// If not launched as incognito this creates a new incognito browser /// context. After that this browser exists within the incognito session. /// New pages created while being in incognito mode will also run in the /// incognito context. Incognito contexts won't share cookies/cache with /// other browser contexts. pub async fn start_incognito_context(&mut self) -> Result<&mut Self> { if !self.is_incognito_configured() { let resp = self .execute(CreateBrowserContextParams::default()) .await? .result; self.browser_context = BrowserContext::from(resp.browser_context_id); self.sender .clone() .send(HandlerMessage::InsertContext(self.browser_context.clone())) .await?; } Ok(self) } /// If a incognito session was created with /// `Browser::start_incognito_context` this disposes this context. /// /// # Note This will also dispose all pages that were running within the /// incognito context. pub async fn quit_incognito_context(&mut self) -> Result<&mut Self> { if let Some(id) = self.browser_context.take() { self.execute(DisposeBrowserContextParams::new(id.clone())) .await?; self.sender .clone() .send(HandlerMessage::DisposeContext(BrowserContext::from(id))) .await?; } Ok(self) } /// Whether incognito mode was configured from the start fn is_incognito_configured(&self) -> bool { self.config .as_ref() .map(|c| c.incognito) .unwrap_or_default() } /// Returns the address of the websocket this browser is attached to pub fn websocket_address(&self) -> &String { &self.debug_ws_url } /// Whether the BrowserContext is incognito. pub fn is_incognito(&self) -> bool { self.is_incognito_configured() || self.browser_context.is_incognito() } /// The config of the spawned chromium instance if any. pub fn config(&self) -> Option<&BrowserConfig> { self.config.as_ref() } /// Create a new browser page pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> { let (tx, rx) = oneshot_channel(); let mut params = params.into(); if let Some(id) = self.browser_context.id() { if params.browser_context_id.is_none() { params.browser_context_id = Some(id.clone()); } } self.sender .clone() .send(HandlerMessage::CreatePage(params, tx)) .await?; rx.await? } /// Version information about the browser pub async fn version(&self) -> Result<GetVersionReturns> { Ok(self.execute(GetVersionParams::default()).await?.result) } /// Returns the user agent of the browser pub async fn user_agent(&self) -> Result<String> { Ok(self.version().await?.user_agent) } /// Call a browser method. pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> { let (tx, rx) = oneshot_channel(); let method = cmd.identifier(); let msg = CommandMessage::new(cmd, tx)?; self.sender .clone() .send(HandlerMessage::Command(msg)) .await?; let resp = rx.await??; to_command_response::<T>(resp, method) } /// Return all of the pages of the browser pub async fn pages(&self) -> Result<Vec<Page>> { let (tx, rx) = oneshot_channel(); self.sender .clone() .send(HandlerMessage::GetPages(tx)) .await?; Ok(rx.await?) } /// Return page of given target_id pub async fn get_page(&self, target_id: TargetId) -> Result<Page> { let (tx, rx) = oneshot_channel(); self.sender .clone() .send(HandlerMessage::GetPage(target_id, tx)) .await?; rx.await?.ok_or(CdpError::NotFound) } //Set listener for browser event pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> { let (tx, rx) = unbounded(); self.sender .clone() .send(HandlerMessage::AddEventListener( EventListenerRequest::new::<T>(tx), )) .await?; Ok(EventStream::new(rx)) } } impl Drop for Browser { fn drop(&mut self) { if let Some(child) = self.child.as_mut() { child.kill().expect("!kill"); } } } async fn ws_url_from_output(child_process: &mut Child) -> String { let stdout = child_process.stderr.take().expect("no stderror"); fn read_debug_url(stdout: std::process::ChildStderr) -> String { let mut buf = BufReader::new(stdout); let mut line = String::new(); loop { if buf.read_line(&mut line).is_ok() { // check for ws in line if let Some(ws) = line.rsplit("listening on ").next() { if ws.starts_with("ws") && ws.contains("devtools/browser") { return ws.trim().to_string(); } } } else { line = String::new(); } } } cfg_if::cfg_if! { if #[cfg(feature = "async-std-runtime")] { async_std::task::spawn_blocking(|| read_debug_url(stdout)).await } else if #[cfg(feature = "tokio-runtime")] { tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output") } } } #[derive(Debug, Clone)] pub struct BrowserConfig { /// Determines whether to run headless version of the browser. Defaults to /// true. headless: bool, /// Determines whether to run the browser with a sandbox. sandbox: bool, /// Launch the browser with a specific window width and height. window_size: Option<(u32, u32)>, /// Launch the browser with a specific debugging port. port: u16, /// Path for Chrome or Chromium. /// /// If unspecified, the create will try to automatically detect a suitable /// binary. executable: std::path::PathBuf, /// A list of Chrome extensions to load. /// /// An extension should be a path to a folder containing the extension code. /// CRX files cannot be used directly and must be first extracted. /// /// Note that Chrome does not support loading extensions in headless-mode. /// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5 extensions: Vec<String>, /// Environment variables to set for the Chromium process. /// Passes value through to std::process::Command::envs. pub process_envs: Option<HashMap<String, String>>, /// Data dir for user data pub user_data_dir: Option<PathBuf>, /// Whether to launch the `Browser` in incognito mode incognito: bool, /// Ignore https errors, default is true ignore_https_errors: bool, viewport: Viewport, /// The duration after a request with no response should time out request_timeout: Duration, /// Additional command line arguments to pass to the browser instance. args: Vec<String>, } #[derive(Debug, Clone)] pub struct BrowserConfigBuilder { headless: bool, sandbox: bool, window_size: Option<(u32, u32)>, port: u16, executable: Option<PathBuf>, extensions: Vec<String>, process_envs: Option<HashMap<String, String>>, user_data_dir: Option<PathBuf>, incognito: bool, ignore_https_errors: bool, viewport: Viewport, request_timeout: Duration, args: Vec<String>, } impl BrowserConfig { pub fn builder() -> BrowserConfigBuilder { BrowserConfigBuilder::default() } pub fn with_executable(path: impl AsRef<Path>) -> Self { Self::builder().chrome_executable(path).build().unwrap() } } impl Default for BrowserConfigBuilder { fn default() -> Self { Self { headless: true, sandbox: true, window_size: None, port: 0, executable: None, extensions: Vec::new(), process_envs: None, user_data_dir: None, incognito: false, ignore_https_errors: true, viewport: Default::default(), request_timeout: Duration::from_millis(REQUEST_TIMEOUT), args: Vec::new(), } } } impl BrowserConfigBuilder { pub fn window_size(mut self, width: u32, height: u32) -> Self { self.window_size = Some((width, height)); self } pub fn no_sandbox(mut self) -> Self { self.sandbox = false; self } pub fn with_head(mut self) -> Self { self.headless = false; self } pub fn incognito(mut self) -> Self { self.incognito = true; self } pub fn respect_https_errors(mut self) -> Self { self.ignore_https_errors = false; self } pub fn request_timeout(mut self, timeout: Duration) -> Self { self.request_timeout = timeout; self } pub fn viewport(mut self, viewport: Viewport) -> Self { self.viewport = viewport; self } pub fn user_data_dir(mut self, data_dir: impl AsRef<Path>) -> Self { self.user_data_dir = Some(data_dir.as_ref().to_path_buf()); self } pub fn chrome_executable(mut self, path: impl AsRef<Path>) -> Self { self.executable = Some(path.as_ref().to_path_buf()); self } pub fn extension(mut self, extension: impl Into<String>) -> Self { self.extensions.push(extension.into()); self } pub fn extensions<I, S>(mut self, extensions: I) -> Self where I: IntoIterator<Item = S>, S: Into<String>, { for ext in extensions { self.extensions.push(ext.into()); } self } pub fn env(mut self, key: impl Into<String>, val: impl Into<String>) -> Self { self.process_envs .get_or_insert(HashMap::new()) .insert(key.into(), val.into()); self } pub fn envs<I, K, V>(mut self, envs: I) -> Self where I: IntoIterator<Item = (K, V)>, K: Into<String>, V: Into<String>, { self.process_envs .get_or_insert(HashMap::new()) .extend(envs.into_iter().map(|(k, v)| (k.into(), v.into()))); self } pub fn arg(mut self, arg: impl Into<String>) -> Self { self.args.push(arg.into()); self } pub fn args<I, S>(mut self, args: I) -> Self where I: IntoIterator<Item = S>, S: Into<String>, { for arg in args { self.args.push(arg.into()); } self } pub fn build(self) -> std::result::Result<BrowserConfig, String> { let executable = if let Some(e) = self.executable { e } else { default_executable()? }; Ok(BrowserConfig { headless: self.headless, sandbox: self.sandbox, window_size: self.window_size, port: self.port, executable, extensions: self.extensions, process_envs: self.process_envs, user_data_dir: self.user_data_dir, incognito: self.incognito, ignore_https_errors: self.ignore_https_errors, viewport: self.viewport, request_timeout: self.request_timeout, args: self.args, }) } } impl BrowserConfig { pub fn launch(&self) -> io::Result<Child> { let dbg_port = format!("--remote-debugging-port={}", self.port); let args = [ dbg_port.as_str(), "--disable-background-networking", "--enable-features=NetworkService,NetworkServiceInProcess", "--disable-background-timer-throttling", "--disable-backgrounding-occluded-windows", "--disable-breakpad", "--disable-client-side-phishing-detection", "--disable-component-extensions-with-background-pages", "--disable-default-apps", "--disable-dev-shm-usage", "--disable-extensions", "--disable-features=TranslateUI", "--disable-hang-monitor", "--disable-ipc-flooding-protection", "--disable-popup-blocking", "--disable-prompt-on-repost", "--disable-renderer-backgrounding", "--disable-sync", "--force-color-profile=srgb", "--metrics-recording-only", "--no-first-run", "--enable-automation", "--password-store=basic", "--use-mock-keychain", "--enable-blink-features=IdleDetection", ]; let mut cmd = process::Command::new(&self.executable); cmd.args(&args).args(&DEFAULT_ARGS).args(&self.args).args( self.extensions .iter() .map(|e| format!("--load-extension={}", e)), ); if let Some(ref user_data) = self.user_data_dir { cmd.arg(format!("--user-data-dir={}", user_data.display())); } if let Some((width, height)) = self.window_size { cmd.arg(format!("--window-size={},{}", width, height)); } if !self.sandbox { cmd.args(&["--no-sandbox", "--disable-setuid-sandbox"]); } if self.headless { cmd.args(&["--headless", "--hide-scrollbars", "--mute-audio"]); } if self.incognito { cmd.arg("--incognito"); } if let Some(ref envs) = self.process_envs { cmd.envs(envs); } cmd.stderr(Stdio::piped()).spawn() } } /// Returns the path to Chrome's executable. /// /// If the `CHROME` environment variable is set, `default_executable` will /// use it as the default path. Otherwise, the filenames `google-chrome-stable` /// `chromium`, `chromium-browser`, `chrome` and `chrome-browser` are /// searched for in standard places. If that fails, /// `/Applications/Google Chrome.app/...` (on MacOS) or the registry (on /// Windows) is consulted. If all of the above fail, an error is returned. pub fn default_executable() -> Result<std::path::PathBuf, String>
#[cfg(windows)] pub(crate) fn get_chrome_path_from_windows_registry() -> Option<std::path::PathBuf> { winreg::RegKey::predef(winreg::enums::HKEY_LOCAL_MACHINE) .open_subkey("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe") .and_then(|key| key.get_value::<String, _>("")) .map(std::path::PathBuf::from) .ok() } /// These are passed to the Chrome binary by default. /// Via https://github.com/puppeteer/puppeteer/blob/4846b8723cf20d3551c0d755df394cc5e0c82a94/src/node/Launcher.ts#L157 static DEFAULT_ARGS: [&str; 23] = [ "--disable-background-networking", "--enable-features=NetworkService,NetworkServiceInProcess", "--disable-background-timer-throttling", "--disable-backgrounding-occluded-windows", "--disable-breakpad", "--disable-client-side-phishing-detection", "--disable-component-extensions-with-background-pages", "--disable-default-apps", "--disable-dev-shm-usage", "--disable-extensions", "--disable-features=TranslateUI", "--disable-hang-monitor", "--disable-ipc-flooding-protection", "--disable-popup-blocking", "--disable-prompt-on-repost", "--disable-renderer-backgrounding", "--disable-sync", "--force-color-profile=srgb", "--metrics-recording-only", "--no-first-run", "--enable-automation", "--password-store=basic", "--use-mock-keychain", ];
{ if let Ok(path) = std::env::var("CHROME") { if std::path::Path::new(&path).exists() { return Ok(path.into()); } } for app in &[ "google-chrome-stable", "chromium", "chromium-browser", "chrome", "chrome-browser", ] { if let Ok(path) = which::which(app) { return Ok(path); } } #[cfg(target_os = "macos")] { let default_paths = &["/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"][..]; for path in default_paths { if std::path::Path::new(path).exists() { return Ok(path.into()); } } } #[cfg(windows)] { if let Some(path) = get_chrome_path_from_windows_registry() { if path.exists() { return Ok(path); } } } Err("Could not auto detect a chrome executable".to_string()) }
identifier_body
browser.rs
use std::time::Duration; use std::{ collections::HashMap, io::{self, BufRead, BufReader}, path::{Path, PathBuf}, process::{self, Child, Stdio}, }; use futures::channel::mpsc::{channel, unbounded, Sender}; use futures::channel::oneshot::channel as oneshot_channel; use futures::SinkExt; use chromiumoxide_cdp::cdp::browser_protocol::target::{ CreateBrowserContextParams, CreateTargetParams, DisposeBrowserContextParams, TargetId, }; use chromiumoxide_cdp::cdp::{CdpEventMessage, IntoEventKind}; use chromiumoxide_types::*; use crate::cmd::{to_command_response, CommandMessage}; use crate::conn::Connection; use crate::error::{CdpError, Result}; use crate::handler::browser::BrowserContext; use crate::handler::viewport::Viewport; use crate::handler::{Handler, HandlerConfig, HandlerMessage, REQUEST_TIMEOUT}; use crate::listeners::{EventListenerRequest, EventStream}; use crate::page::Page; use chromiumoxide_cdp::cdp::browser_protocol::browser::{GetVersionParams, GetVersionReturns}; /// A [`Browser`] is created when chromiumoxide connects to a Chromium instance. #[derive(Debug)] pub struct Browser { /// The `Sender` to send messages to the connection handler that drives the /// websocket sender: Sender<HandlerMessage>, /// How the spawned chromium instance was configured, if any config: Option<BrowserConfig>, /// The spawned chromium instance child: Option<Child>, /// The debug web socket url of the chromium instance debug_ws_url: String, /// The context of the browser browser_context: BrowserContext, } impl Browser { /// Connect to an already running chromium instance via websocket pub async fn connect(debug_ws_url: impl Into<String>) -> Result<(Self, Handler)> { let debug_ws_url = debug_ws_url.into(); let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?; let (tx, rx) = channel(1); let fut = Handler::new(conn, rx, HandlerConfig::default()); let browser_context = fut.default_browser_context().clone(); let browser = Self { sender: tx, config: None, child: None, debug_ws_url, browser_context, }; Ok((browser, fut)) } /// Launches a new instance of `chromium` in the background and attaches to /// its debug web socket. /// /// This fails when no chromium executable could be detected. /// /// This fails if no web socket url could be detected from the child /// processes stderr for more than 20 seconds. pub async fn
(config: BrowserConfig) -> Result<(Self, Handler)> { // launch a new chromium instance let mut child = config.launch()?; // extract the ws: let get_ws_url = ws_url_from_output(&mut child); let dur = Duration::from_secs(20); cfg_if::cfg_if! { if #[cfg(feature = "async-std-runtime")] { let debug_ws_url = async_std::future::timeout(dur, get_ws_url) .await .map_err(|_| CdpError::Timeout)?; } else if #[cfg(feature = "tokio-runtime")] { let debug_ws_url = tokio::time::timeout(dur, get_ws_url).await .map_err(|_| CdpError::Timeout)?; } } let conn = Connection::<CdpEventMessage>::connect(&debug_ws_url).await?; let (tx, rx) = channel(1); let handler_config = HandlerConfig { ignore_https_errors: config.ignore_https_errors, viewport: Some(config.viewport.clone()), context_ids: Vec::new(), request_timeout: config.request_timeout, }; let fut = Handler::new(conn, rx, handler_config); let browser_context = fut.default_browser_context().clone(); let browser = Self { sender: tx, config: Some(config), child: Some(child), debug_ws_url, browser_context, }; Ok((browser, fut)) } /// If not launched as incognito this creates a new incognito browser /// context. After that this browser exists within the incognito session. /// New pages created while being in incognito mode will also run in the /// incognito context. Incognito contexts won't share cookies/cache with /// other browser contexts. pub async fn start_incognito_context(&mut self) -> Result<&mut Self> { if !self.is_incognito_configured() { let resp = self .execute(CreateBrowserContextParams::default()) .await? .result; self.browser_context = BrowserContext::from(resp.browser_context_id); self.sender .clone() .send(HandlerMessage::InsertContext(self.browser_context.clone())) .await?; } Ok(self) } /// If a incognito session was created with /// `Browser::start_incognito_context` this disposes this context. /// /// # Note This will also dispose all pages that were running within the /// incognito context. pub async fn quit_incognito_context(&mut self) -> Result<&mut Self> { if let Some(id) = self.browser_context.take() { self.execute(DisposeBrowserContextParams::new(id.clone())) .await?; self.sender .clone() .send(HandlerMessage::DisposeContext(BrowserContext::from(id))) .await?; } Ok(self) } /// Whether incognito mode was configured from the start fn is_incognito_configured(&self) -> bool { self.config .as_ref() .map(|c| c.incognito) .unwrap_or_default() } /// Returns the address of the websocket this browser is attached to pub fn websocket_address(&self) -> &String { &self.debug_ws_url } /// Whether the BrowserContext is incognito. pub fn is_incognito(&self) -> bool { self.is_incognito_configured() || self.browser_context.is_incognito() } /// The config of the spawned chromium instance if any. pub fn config(&self) -> Option<&BrowserConfig> { self.config.as_ref() } /// Create a new browser page pub async fn new_page(&self, params: impl Into<CreateTargetParams>) -> Result<Page> { let (tx, rx) = oneshot_channel(); let mut params = params.into(); if let Some(id) = self.browser_context.id() { if params.browser_context_id.is_none() { params.browser_context_id = Some(id.clone()); } } self.sender .clone() .send(HandlerMessage::CreatePage(params, tx)) .await?; rx.await? } /// Version information about the browser pub async fn version(&self) -> Result<GetVersionReturns> { Ok(self.execute(GetVersionParams::default()).await?.result) } /// Returns the user agent of the browser pub async fn user_agent(&self) -> Result<String> { Ok(self.version().await?.user_agent) } /// Call a browser method. pub async fn execute<T: Command>(&self, cmd: T) -> Result<CommandResponse<T::Response>> { let (tx, rx) = oneshot_channel(); let method = cmd.identifier(); let msg = CommandMessage::new(cmd, tx)?; self.sender .clone() .send(HandlerMessage::Command(msg)) .await?; let resp = rx.await??; to_command_response::<T>(resp, method) } /// Return all of the pages of the browser pub async fn pages(&self) -> Result<Vec<Page>> { let (tx, rx) = oneshot_channel(); self.sender .clone() .send(HandlerMessage::GetPages(tx)) .await?; Ok(rx.await?) } /// Return page of given target_id pub async fn get_page(&self, target_id: TargetId) -> Result<Page> { let (tx, rx) = oneshot_channel(); self.sender .clone() .send(HandlerMessage::GetPage(target_id, tx)) .await?; rx.await?.ok_or(CdpError::NotFound) } //Set listener for browser event pub async fn event_listener<T: IntoEventKind>(&self) -> Result<EventStream<T>> { let (tx, rx) = unbounded(); self.sender .clone() .send(HandlerMessage::AddEventListener( EventListenerRequest::new::<T>(tx), )) .await?; Ok(EventStream::new(rx)) } } impl Drop for Browser { fn drop(&mut self) { if let Some(child) = self.child.as_mut() { child.kill().expect("!kill"); } } } async fn ws_url_from_output(child_process: &mut Child) -> String { let stdout = child_process.stderr.take().expect("no stderror"); fn read_debug_url(stdout: std::process::ChildStderr) -> String { let mut buf = BufReader::new(stdout); let mut line = String::new(); loop { if buf.read_line(&mut line).is_ok() { // check for ws in line if let Some(ws) = line.rsplit("listening on ").next() { if ws.starts_with("ws") && ws.contains("devtools/browser") { return ws.trim().to_string(); } } } else { line = String::new(); } } } cfg_if::cfg_if! { if #[cfg(feature = "async-std-runtime")] { async_std::task::spawn_blocking(|| read_debug_url(stdout)).await } else if #[cfg(feature = "tokio-runtime")] { tokio::task::spawn_blocking(move || read_debug_url(stdout)).await.expect("Failed to read debug url from process output") } } } #[derive(Debug, Clone)] pub struct BrowserConfig { /// Determines whether to run headless version of the browser. Defaults to /// true. headless: bool, /// Determines whether to run the browser with a sandbox. sandbox: bool, /// Launch the browser with a specific window width and height. window_size: Option<(u32, u32)>, /// Launch the browser with a specific debugging port. port: u16, /// Path for Chrome or Chromium. /// /// If unspecified, the create will try to automatically detect a suitable /// binary. executable: std::path::PathBuf, /// A list of Chrome extensions to load. /// /// An extension should be a path to a folder containing the extension code. /// CRX files cannot be used directly and must be first extracted. /// /// Note that Chrome does not support loading extensions in headless-mode. /// See https://bugs.chromium.org/p/chromium/issues/detail?id=706008#c5 extensions: Vec<String>, /// Environment variables to set for the Chromium process. /// Passes value through to std::process::Command::envs. pub process_envs: Option<HashMap<String, String>>, /// Data dir for user data pub user_data_dir: Option<PathBuf>, /// Whether to launch the `Browser` in incognito mode incognito: bool, /// Ignore https errors, default is true ignore_https_errors: bool, viewport: Viewport, /// The duration after a request with no response should time out request_timeout: Duration, /// Additional command line arguments to pass to the browser instance. args: Vec<String>, } #[derive(Debug, Clone)] pub struct BrowserConfigBuilder { headless: bool, sandbox: bool, window_size: Option<(u32, u32)>, port: u16, executable: Option<PathBuf>, extensions: Vec<String>, process_envs: Option<HashMap<String, String>>, user_data_dir: Option<PathBuf>, incognito: bool, ignore_https_errors: bool, viewport: Viewport, request_timeout: Duration, args: Vec<String>, } impl BrowserConfig { pub fn builder() -> BrowserConfigBuilder { BrowserConfigBuilder::default() } pub fn with_executable(path: impl AsRef<Path>) -> Self { Self::builder().chrome_executable(path).build().unwrap() } } impl Default for BrowserConfigBuilder { fn default() -> Self { Self { headless: true, sandbox: true, window_size: None, port: 0, executable: None, extensions: Vec::new(), process_envs: None, user_data_dir: None, incognito: false, ignore_https_errors: true, viewport: Default::default(), request_timeout: Duration::from_millis(REQUEST_TIMEOUT), args: Vec::new(), } } } impl BrowserConfigBuilder { pub fn window_size(mut self, width: u32, height: u32) -> Self { self.window_size = Some((width, height)); self } pub fn no_sandbox(mut self) -> Self { self.sandbox = false; self } pub fn with_head(mut self) -> Self { self.headless = false; self } pub fn incognito(mut self) -> Self { self.incognito = true; self } pub fn respect_https_errors(mut self) -> Self { self.ignore_https_errors = false; self } pub fn request_timeout(mut self, timeout: Duration) -> Self { self.request_timeout = timeout; self } pub fn viewport(mut self, viewport: Viewport) -> Self { self.viewport = viewport; self } pub fn user_data_dir(mut self, data_dir: impl AsRef<Path>) -> Self { self.user_data_dir = Some(data_dir.as_ref().to_path_buf()); self } pub fn chrome_executable(mut self, path: impl AsRef<Path>) -> Self { self.executable = Some(path.as_ref().to_path_buf()); self } pub fn extension(mut self, extension: impl Into<String>) -> Self { self.extensions.push(extension.into()); self } pub fn extensions<I, S>(mut self, extensions: I) -> Self where I: IntoIterator<Item = S>, S: Into<String>, { for ext in extensions { self.extensions.push(ext.into()); } self } pub fn env(mut self, key: impl Into<String>, val: impl Into<String>) -> Self { self.process_envs .get_or_insert(HashMap::new()) .insert(key.into(), val.into()); self } pub fn envs<I, K, V>(mut self, envs: I) -> Self where I: IntoIterator<Item = (K, V)>, K: Into<String>, V: Into<String>, { self.process_envs .get_or_insert(HashMap::new()) .extend(envs.into_iter().map(|(k, v)| (k.into(), v.into()))); self } pub fn arg(mut self, arg: impl Into<String>) -> Self { self.args.push(arg.into()); self } pub fn args<I, S>(mut self, args: I) -> Self where I: IntoIterator<Item = S>, S: Into<String>, { for arg in args { self.args.push(arg.into()); } self } pub fn build(self) -> std::result::Result<BrowserConfig, String> { let executable = if let Some(e) = self.executable { e } else { default_executable()? }; Ok(BrowserConfig { headless: self.headless, sandbox: self.sandbox, window_size: self.window_size, port: self.port, executable, extensions: self.extensions, process_envs: self.process_envs, user_data_dir: self.user_data_dir, incognito: self.incognito, ignore_https_errors: self.ignore_https_errors, viewport: self.viewport, request_timeout: self.request_timeout, args: self.args, }) } } impl BrowserConfig { pub fn launch(&self) -> io::Result<Child> { let dbg_port = format!("--remote-debugging-port={}", self.port); let args = [ dbg_port.as_str(), "--disable-background-networking", "--enable-features=NetworkService,NetworkServiceInProcess", "--disable-background-timer-throttling", "--disable-backgrounding-occluded-windows", "--disable-breakpad", "--disable-client-side-phishing-detection", "--disable-component-extensions-with-background-pages", "--disable-default-apps", "--disable-dev-shm-usage", "--disable-extensions", "--disable-features=TranslateUI", "--disable-hang-monitor", "--disable-ipc-flooding-protection", "--disable-popup-blocking", "--disable-prompt-on-repost", "--disable-renderer-backgrounding", "--disable-sync", "--force-color-profile=srgb", "--metrics-recording-only", "--no-first-run", "--enable-automation", "--password-store=basic", "--use-mock-keychain", "--enable-blink-features=IdleDetection", ]; let mut cmd = process::Command::new(&self.executable); cmd.args(&args).args(&DEFAULT_ARGS).args(&self.args).args( self.extensions .iter() .map(|e| format!("--load-extension={}", e)), ); if let Some(ref user_data) = self.user_data_dir { cmd.arg(format!("--user-data-dir={}", user_data.display())); } if let Some((width, height)) = self.window_size { cmd.arg(format!("--window-size={},{}", width, height)); } if !self.sandbox { cmd.args(&["--no-sandbox", "--disable-setuid-sandbox"]); } if self.headless { cmd.args(&["--headless", "--hide-scrollbars", "--mute-audio"]); } if self.incognito { cmd.arg("--incognito"); } if let Some(ref envs) = self.process_envs { cmd.envs(envs); } cmd.stderr(Stdio::piped()).spawn() } } /// Returns the path to Chrome's executable. /// /// If the `CHROME` environment variable is set, `default_executable` will /// use it as the default path. Otherwise, the filenames `google-chrome-stable` /// `chromium`, `chromium-browser`, `chrome` and `chrome-browser` are /// searched for in standard places. If that fails, /// `/Applications/Google Chrome.app/...` (on MacOS) or the registry (on /// Windows) is consulted. If all of the above fail, an error is returned. pub fn default_executable() -> Result<std::path::PathBuf, String> { if let Ok(path) = std::env::var("CHROME") { if std::path::Path::new(&path).exists() { return Ok(path.into()); } } for app in &[ "google-chrome-stable", "chromium", "chromium-browser", "chrome", "chrome-browser", ] { if let Ok(path) = which::which(app) { return Ok(path); } } #[cfg(target_os = "macos")] { let default_paths = &["/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"][..]; for path in default_paths { if std::path::Path::new(path).exists() { return Ok(path.into()); } } } #[cfg(windows)] { if let Some(path) = get_chrome_path_from_windows_registry() { if path.exists() { return Ok(path); } } } Err("Could not auto detect a chrome executable".to_string()) } #[cfg(windows)] pub(crate) fn get_chrome_path_from_windows_registry() -> Option<std::path::PathBuf> { winreg::RegKey::predef(winreg::enums::HKEY_LOCAL_MACHINE) .open_subkey("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe") .and_then(|key| key.get_value::<String, _>("")) .map(std::path::PathBuf::from) .ok() } /// These are passed to the Chrome binary by default. /// Via https://github.com/puppeteer/puppeteer/blob/4846b8723cf20d3551c0d755df394cc5e0c82a94/src/node/Launcher.ts#L157 static DEFAULT_ARGS: [&str; 23] = [ "--disable-background-networking", "--enable-features=NetworkService,NetworkServiceInProcess", "--disable-background-timer-throttling", "--disable-backgrounding-occluded-windows", "--disable-breakpad", "--disable-client-side-phishing-detection", "--disable-component-extensions-with-background-pages", "--disable-default-apps", "--disable-dev-shm-usage", "--disable-extensions", "--disable-features=TranslateUI", "--disable-hang-monitor", "--disable-ipc-flooding-protection", "--disable-popup-blocking", "--disable-prompt-on-repost", "--disable-renderer-backgrounding", "--disable-sync", "--force-color-profile=srgb", "--metrics-recording-only", "--no-first-run", "--enable-automation", "--password-store=basic", "--use-mock-keychain", ];
launch
identifier_name
pybatch_mast.py
"""Main module.""" from typing import Optional, Generator, Tuple, List, Sequence, Dict, Any, Union from anndata import AnnData from pandas import DataFrame import boto3 as bt from botocore.exceptions import ClientError import os import pandas as pd import scanpy as sc import tempfile import time import uuid class MASTCollectionError(Exception): def __init__( self, message: str, job_collection, ): # Call the base class constructor with the parameters it needs super().__init__(message) self.jc = job_collection class BatchMAST(): def __init__( self, job_queue: str, job_def: str, bucket: str, layer: str = 'counts', ): self.job_queue = job_queue self.job_def = job_def self.bucket = bucket self.layer = layer def mast( self, adata: AnnData, keys: Sequence[str], group: str, fdr: float, lfc: float, covs: str = '', bys: Optional[Sequence[Tuple[str, Sequence[str]]]] = None, min_perc: Optional[Union[float, Dict[str, float]]] = None, on_total: Optional[bool] = False, min_cells_limit: Optional[int] = 3, jobs: int = 1, ) -> Generator[ Tuple[ Dict[str, DataFrame], Dict[str, Dict[str, List[str]]], Optional[str], ], None, None, ]: # NOTE n_genes is always assumed as covariate if bys is None: if min_perc is not None: adata = adata.copy() if on_total: total_cells = adata.shape[0] else: total_cells = adata.obs[group].value_counts().min() min_cells = max(total_cells * min_perc, min_cells_limit) print( f'Filtering genes detected in fewer than {min_cells} cells' ) sc.pp.filter_genes(adata, min_cells=min_cells) enough_genes = adata.shape[1] > 0 job_collection = {} if enough_genes: job_collection = self._mast( job_collection, adata, covs, group, keys, jobs=jobs, ) else: print('Not enough genes, computation skipped') try: de, top = self.mast_prep_output(job_collection, lfc, fdr) except ClientError as e: raise MASTCollectionError(e, job_collection) from e except Exception as e: raise MASTCollectionError(e.message, job_collection) from e yield de, top, None else: for by, groups in bys: job_collection = {} for b in groups: adata_b = adata[adata.obs[by] == b].copy() if min_perc is not None: if on_total: total_cells = adata_b.shape[0] else: total_cells = adata_b.obs[group].value_counts( ).min() min_cells = max( total_cells * min_perc[b], min_cells_limit ) print( 'Filtering genes detected in fewer ' f'than {min_cells} cells' ) sc.pp.filter_genes( adata_b, min_cells=min_cells, ) enough_groups = ( adata_b.obs[group].value_counts() >= 3 ).sum() > 1 enough_genes = adata_b.shape[1] > 0 if enough_groups and enough_genes: job_collection = self._mast( job_collection, adata_b, covs, group, keys, by=by, b=b, jobs=jobs, ) else: print(f'Computation for {b} skipped') try: de, top = self.mast_prep_output(job_collection, lfc, fdr) except ClientError as e: raise MASTCollectionError(e, job_collection) from e except Exception as e: raise MASTCollectionError(e.message, job_collection) from e yield de, top, by def _mast( self, job_collection: Dict[str, Dict[str, str]], adata: AnnData, covs: str, group: str, keys: Sequence[str], by: Optional[str] = None, b: Optional[str] = None, jobs: int = 1, ) -> Dict[str, Dict[str, str]]: if by is None: b = 'Sheet0' new_covs = BatchMAST._clean_covs(adata, covs, group, by=by) remote_dir, job_id, job_name, content = self.mast_compute( adata, keys, group=group, covs=new_covs, block=False, jobs=jobs, ) job_collection[job_id] = {'group': b, 'remote_dir': remote_dir} return job_collection def mast_prep_output( self, job_collection: Dict[str, Dict[str, str]], lfc: float, fdr: float, wait: float = 30, ) -> Tuple[DataFrame, Dict[str, Dict[str, List[str]]]]: de = {} top = {} for job_id, status, metadata, content in self.mast_collect( job_collection, wait=wait, ): if status == 'SUCCEEDED': b = metadata['group'] de[b] = content elif status == 'FAILED': print(f'Job Failed: group {metadata["group"]}') else: raise NotImplementedError(f'Status {status} not managed') top = BatchMAST.mast_filter(de, lfc, fdr) return de, top @staticmethod def mast_filter( de: Dict[str, DataFrame], lfc: float, fdr: float, ) -> Dict[str, Dict[str, List[str]]]: top = {} for b in de.keys(): cols = [ '_'.join(c.split('_')[:-1]) for c in de[b].columns[de[b].columns.str.endswith('_coef')] ] top[b] = {} for c in cols: top[b][c] = de[b][ (de[b][f'{c}_fdr'] < fdr) & (de[b][f'{c}_coef'] > lfc) ].sort_values([ f'{c}_fdr', f'{c}_coef' ], ascending=[True, False]).index.tolist() return top @staticmethod def _clean_covs( adata: AnnData, covs: str, group: str, by: Optional[str] = None, ) -> str: covs_s = covs.split('+')[1:] new_covs = '' for c in covs_s: # Including only covariates with more than 1 level # Otherwise, MAST will stop execution with error: # contrasts can be applied only to factors with 2 or more levels if c not in (group, by) and adata.obs[c].nunique() > 1: new_covs += f'+{c}' return new_covs def
( self, adata: AnnData, remote_dir: str, keys: Sequence[str], group: str, covs: str = '', ready: Optional[Sequence[str]] = None, jobs: int = 1, ) -> str: s3 = bt.resource('s3') if ready is None: ready = [] with tempfile.TemporaryDirectory() as td: if 'mat' not in ready: local_mat = os.path.join(td, 'mat.fth') adata = adata.copy() adata.X = adata.layers[self.layer] sc.pp.normalize_total(adata, target_sum=1e6) sc.pp.log1p(adata, base=2) adata.to_df().reset_index().to_feather( local_mat, compression='uncompressed', ) remote_mat = os.path.join(remote_dir, 'mat.fth') print(f'Uploading matrix ({adata.shape}) to s3...') s3.meta.client.upload_file(local_mat, self.bucket, remote_mat) if 'cdat' not in ready: local_cdat = os.path.join(td, 'cdat.csv') adata.obs[keys].to_csv(local_cdat) remote_cdat = os.path.join(remote_dir, 'cdat.csv') print('Uploading metadata to s3...') s3.meta.client.upload_file( local_cdat, self.bucket, remote_cdat) remote = os.path.join(self.bucket, remote_dir) manifest = '\n'.join([ f'WORKSPACE={remote}', 'BATCH_INDEX_OFFSET=0', 'CDAT=cdat.csv', 'MAT=mat.fth', f'GROUP={group}', 'OUT_NAME=out.csv', f'MODEL=\'~group+n_genes{covs}\'', f'JOBS={jobs}', ]) local_manifest = os.path.join(td, 'manifest.txt') with open(local_manifest, 'w') as m: m.write(manifest + '\n') remote_manifest = os.path.join(remote_dir, 'manifest.txt') print('Uploading manifest to s3...') s3.meta.client.upload_file( local_manifest, self.bucket, remote_manifest, ) return remote_manifest def _mast_submit( self, manifest: str, block: bool = False, job_name: str = 'mast', ) -> str: batch = bt.client('batch') job_manifest = f's3://{os.path.join(self.bucket, manifest)}' job_id = None try: print( f'Submitting job {job_name} to the job queue {self.job_queue}' ) submit_job_response = batch.submit_job( jobName=job_name, jobQueue=self.job_queue, jobDefinition=self.job_def, containerOverrides={'command': [job_manifest]} ) job_id = submit_job_response['jobId'] print( f'Submitted job {job_name} {job_id} to the job queue' f' {self.job_queue}' ) except Exception as err: print(f'error: {str(err)}') return job_id def mast_compute( self, adata: AnnData, keys: Sequence[str], group: str, covs: str = '', block: bool = False, remote_dir: Optional[str] = None, jobs: int = 1, ) -> Tuple[str, str, str, Optional[DataFrame]]: content = None if remote_dir is None: remote_dir = os.path.join('mast', str(uuid.uuid4())) ready = [] else: ready = ['mat', 'cdat'] manifest = self._mast_prep( adata, remote_dir, keys, group, covs=covs, ready=ready, jobs=jobs, ) job_name = f'mast-{"".join(filter(str.isalnum, group))}-{"".join(filter(str.isalnum, covs))}' job_id = self._mast_submit( manifest, block=block, job_name=job_name, ) if block: status = BatchMAST._batch_job_status(job_id, wait=60) if status == 'SUCCEEDED': content = self._mast_results(remote_dir) return remote_dir, job_id, job_name, content def mast_collect( self, collection: Dict[str, Dict[str, str]], wait: int = 30, ) -> Generator[ Tuple[str, str, Dict[str, str], Optional[DataFrame]], None, None, ]: while wait and len(collection) > 0: time.sleep(wait) for job_id in list(collection.keys()): status = BatchMAST._batch_job_status(job_id) if status == 'SUCCEEDED': remote_dir = collection[job_id]['remote_dir'] content = self._mast_results(remote_dir) yield job_id, status, collection.pop(job_id), content elif status == 'FAILED': yield job_id, status, collection.pop(job_id), None @staticmethod def _batch_job_status( job_id: str, wait: int = 0, verbose: bool = False, ) -> str: batch = bt.client('batch') describe_jobs_response = batch.describe_jobs(jobs=[job_id]) status = describe_jobs_response['jobs'][0]['status'] if verbose: print(status) while wait: time.sleep(wait) describe_jobs_response = batch.describe_jobs(jobs=[job_id]) new_status = describe_jobs_response['jobs'][0]['status'] if new_status != status: status = new_status if verbose: print(status) if status == 'SUCCEEDED' or status == 'FAILED': break return status def _mast_results( self, remote_dir: str, ) -> DataFrame: s3 = bt.resource('s3') with tempfile.TemporaryDirectory() as td: remote_out = os.path.join(remote_dir, 'out.csv') local_out = os.path.join(td, 'out.csv') s3.Bucket(self.bucket).download_file(remote_out, local_out) content = pd.read_csv(local_out, index_col=0) return content @staticmethod def mast_to_excel( de: Dict[str, DataFrame], fname: str, top: Optional[Dict[str, Dict[str, List[str]]]] = None, top_prefix: str = None, only_top: bool = False, ): if not only_top: writer = pd.ExcelWriter( f'{fname}.xlsx', engine='xlsxwriter' ) for s in de.keys(): de[s].to_excel(writer, sheet_name=str(s)) writer.save() if top is not None: if top_prefix is None: top_prefix = '' else: top_prefix = f'{top_prefix}.' writer = pd.ExcelWriter( f'{fname}.{top_prefix}top.xlsx', engine='xlsxwriter' ) for s in top.keys(): pd.DataFrame.from_dict( top[s], orient='index', ).T.fillna('').to_excel( writer, sheet_name=str(s), index=False ) writer.save()
_mast_prep
identifier_name
pybatch_mast.py
"""Main module.""" from typing import Optional, Generator, Tuple, List, Sequence, Dict, Any, Union from anndata import AnnData from pandas import DataFrame import boto3 as bt from botocore.exceptions import ClientError import os import pandas as pd import scanpy as sc import tempfile import time import uuid class MASTCollectionError(Exception): def __init__( self, message: str, job_collection, ): # Call the base class constructor with the parameters it needs super().__init__(message) self.jc = job_collection class BatchMAST(): def __init__( self, job_queue: str, job_def: str, bucket: str, layer: str = 'counts', ): self.job_queue = job_queue self.job_def = job_def self.bucket = bucket self.layer = layer def mast( self, adata: AnnData, keys: Sequence[str], group: str, fdr: float, lfc: float, covs: str = '', bys: Optional[Sequence[Tuple[str, Sequence[str]]]] = None, min_perc: Optional[Union[float, Dict[str, float]]] = None, on_total: Optional[bool] = False, min_cells_limit: Optional[int] = 3, jobs: int = 1, ) -> Generator[ Tuple[ Dict[str, DataFrame], Dict[str, Dict[str, List[str]]], Optional[str], ], None, None, ]: # NOTE n_genes is always assumed as covariate if bys is None: if min_perc is not None: adata = adata.copy() if on_total: total_cells = adata.shape[0] else: total_cells = adata.obs[group].value_counts().min() min_cells = max(total_cells * min_perc, min_cells_limit) print( f'Filtering genes detected in fewer than {min_cells} cells' ) sc.pp.filter_genes(adata, min_cells=min_cells) enough_genes = adata.shape[1] > 0 job_collection = {} if enough_genes: job_collection = self._mast( job_collection, adata, covs, group, keys, jobs=jobs, ) else: print('Not enough genes, computation skipped') try: de, top = self.mast_prep_output(job_collection, lfc, fdr) except ClientError as e: raise MASTCollectionError(e, job_collection) from e except Exception as e: raise MASTCollectionError(e.message, job_collection) from e yield de, top, None else: for by, groups in bys: job_collection = {} for b in groups: adata_b = adata[adata.obs[by] == b].copy() if min_perc is not None: if on_total: total_cells = adata_b.shape[0] else: total_cells = adata_b.obs[group].value_counts( ).min() min_cells = max( total_cells * min_perc[b], min_cells_limit ) print( 'Filtering genes detected in fewer ' f'than {min_cells} cells' ) sc.pp.filter_genes( adata_b, min_cells=min_cells, ) enough_groups = ( adata_b.obs[group].value_counts() >= 3 ).sum() > 1 enough_genes = adata_b.shape[1] > 0 if enough_groups and enough_genes: job_collection = self._mast( job_collection, adata_b, covs, group, keys, by=by, b=b, jobs=jobs, ) else: print(f'Computation for {b} skipped') try: de, top = self.mast_prep_output(job_collection, lfc, fdr) except ClientError as e: raise MASTCollectionError(e, job_collection) from e except Exception as e: raise MASTCollectionError(e.message, job_collection) from e yield de, top, by def _mast( self, job_collection: Dict[str, Dict[str, str]], adata: AnnData, covs: str, group: str, keys: Sequence[str], by: Optional[str] = None, b: Optional[str] = None, jobs: int = 1, ) -> Dict[str, Dict[str, str]]: if by is None: b = 'Sheet0' new_covs = BatchMAST._clean_covs(adata, covs, group, by=by) remote_dir, job_id, job_name, content = self.mast_compute( adata, keys, group=group, covs=new_covs, block=False, jobs=jobs, ) job_collection[job_id] = {'group': b, 'remote_dir': remote_dir} return job_collection def mast_prep_output( self, job_collection: Dict[str, Dict[str, str]], lfc: float, fdr: float, wait: float = 30, ) -> Tuple[DataFrame, Dict[str, Dict[str, List[str]]]]: de = {} top = {} for job_id, status, metadata, content in self.mast_collect( job_collection, wait=wait, ): if status == 'SUCCEEDED': b = metadata['group'] de[b] = content elif status == 'FAILED': print(f'Job Failed: group {metadata["group"]}') else: raise NotImplementedError(f'Status {status} not managed') top = BatchMAST.mast_filter(de, lfc, fdr) return de, top @staticmethod def mast_filter( de: Dict[str, DataFrame], lfc: float, fdr: float, ) -> Dict[str, Dict[str, List[str]]]: top = {} for b in de.keys(): cols = [ '_'.join(c.split('_')[:-1]) for c in de[b].columns[de[b].columns.str.endswith('_coef')] ] top[b] = {} for c in cols: top[b][c] = de[b][ (de[b][f'{c}_fdr'] < fdr) & (de[b][f'{c}_coef'] > lfc) ].sort_values([ f'{c}_fdr', f'{c}_coef' ], ascending=[True, False]).index.tolist() return top @staticmethod def _clean_covs( adata: AnnData, covs: str, group: str, by: Optional[str] = None, ) -> str: covs_s = covs.split('+')[1:] new_covs = '' for c in covs_s: # Including only covariates with more than 1 level # Otherwise, MAST will stop execution with error: # contrasts can be applied only to factors with 2 or more levels if c not in (group, by) and adata.obs[c].nunique() > 1: new_covs += f'+{c}' return new_covs def _mast_prep( self, adata: AnnData, remote_dir: str, keys: Sequence[str], group: str, covs: str = '', ready: Optional[Sequence[str]] = None, jobs: int = 1, ) -> str: s3 = bt.resource('s3') if ready is None: ready = [] with tempfile.TemporaryDirectory() as td: if 'mat' not in ready: local_mat = os.path.join(td, 'mat.fth') adata = adata.copy() adata.X = adata.layers[self.layer] sc.pp.normalize_total(adata, target_sum=1e6) sc.pp.log1p(adata, base=2) adata.to_df().reset_index().to_feather( local_mat, compression='uncompressed', ) remote_mat = os.path.join(remote_dir, 'mat.fth') print(f'Uploading matrix ({adata.shape}) to s3...') s3.meta.client.upload_file(local_mat, self.bucket, remote_mat) if 'cdat' not in ready: local_cdat = os.path.join(td, 'cdat.csv') adata.obs[keys].to_csv(local_cdat) remote_cdat = os.path.join(remote_dir, 'cdat.csv') print('Uploading metadata to s3...') s3.meta.client.upload_file( local_cdat, self.bucket, remote_cdat) remote = os.path.join(self.bucket, remote_dir) manifest = '\n'.join([ f'WORKSPACE={remote}', 'BATCH_INDEX_OFFSET=0', 'CDAT=cdat.csv', 'MAT=mat.fth', f'GROUP={group}', 'OUT_NAME=out.csv', f'MODEL=\'~group+n_genes{covs}\'', f'JOBS={jobs}', ]) local_manifest = os.path.join(td, 'manifest.txt') with open(local_manifest, 'w') as m: m.write(manifest + '\n') remote_manifest = os.path.join(remote_dir, 'manifest.txt') print('Uploading manifest to s3...') s3.meta.client.upload_file( local_manifest, self.bucket, remote_manifest, ) return remote_manifest def _mast_submit( self, manifest: str, block: bool = False, job_name: str = 'mast', ) -> str: batch = bt.client('batch') job_manifest = f's3://{os.path.join(self.bucket, manifest)}' job_id = None try: print( f'Submitting job {job_name} to the job queue {self.job_queue}' ) submit_job_response = batch.submit_job( jobName=job_name, jobQueue=self.job_queue, jobDefinition=self.job_def, containerOverrides={'command': [job_manifest]} ) job_id = submit_job_response['jobId'] print( f'Submitted job {job_name} {job_id} to the job queue' f' {self.job_queue}' ) except Exception as err: print(f'error: {str(err)}') return job_id def mast_compute( self, adata: AnnData, keys: Sequence[str], group: str, covs: str = '', block: bool = False, remote_dir: Optional[str] = None, jobs: int = 1, ) -> Tuple[str, str, str, Optional[DataFrame]]:
def mast_collect( self, collection: Dict[str, Dict[str, str]], wait: int = 30, ) -> Generator[ Tuple[str, str, Dict[str, str], Optional[DataFrame]], None, None, ]: while wait and len(collection) > 0: time.sleep(wait) for job_id in list(collection.keys()): status = BatchMAST._batch_job_status(job_id) if status == 'SUCCEEDED': remote_dir = collection[job_id]['remote_dir'] content = self._mast_results(remote_dir) yield job_id, status, collection.pop(job_id), content elif status == 'FAILED': yield job_id, status, collection.pop(job_id), None @staticmethod def _batch_job_status( job_id: str, wait: int = 0, verbose: bool = False, ) -> str: batch = bt.client('batch') describe_jobs_response = batch.describe_jobs(jobs=[job_id]) status = describe_jobs_response['jobs'][0]['status'] if verbose: print(status) while wait: time.sleep(wait) describe_jobs_response = batch.describe_jobs(jobs=[job_id]) new_status = describe_jobs_response['jobs'][0]['status'] if new_status != status: status = new_status if verbose: print(status) if status == 'SUCCEEDED' or status == 'FAILED': break return status def _mast_results( self, remote_dir: str, ) -> DataFrame: s3 = bt.resource('s3') with tempfile.TemporaryDirectory() as td: remote_out = os.path.join(remote_dir, 'out.csv') local_out = os.path.join(td, 'out.csv') s3.Bucket(self.bucket).download_file(remote_out, local_out) content = pd.read_csv(local_out, index_col=0) return content @staticmethod def mast_to_excel( de: Dict[str, DataFrame], fname: str, top: Optional[Dict[str, Dict[str, List[str]]]] = None, top_prefix: str = None, only_top: bool = False, ): if not only_top: writer = pd.ExcelWriter( f'{fname}.xlsx', engine='xlsxwriter' ) for s in de.keys(): de[s].to_excel(writer, sheet_name=str(s)) writer.save() if top is not None: if top_prefix is None: top_prefix = '' else: top_prefix = f'{top_prefix}.' writer = pd.ExcelWriter( f'{fname}.{top_prefix}top.xlsx', engine='xlsxwriter' ) for s in top.keys(): pd.DataFrame.from_dict( top[s], orient='index', ).T.fillna('').to_excel( writer, sheet_name=str(s), index=False ) writer.save()
content = None if remote_dir is None: remote_dir = os.path.join('mast', str(uuid.uuid4())) ready = [] else: ready = ['mat', 'cdat'] manifest = self._mast_prep( adata, remote_dir, keys, group, covs=covs, ready=ready, jobs=jobs, ) job_name = f'mast-{"".join(filter(str.isalnum, group))}-{"".join(filter(str.isalnum, covs))}' job_id = self._mast_submit( manifest, block=block, job_name=job_name, ) if block: status = BatchMAST._batch_job_status(job_id, wait=60) if status == 'SUCCEEDED': content = self._mast_results(remote_dir) return remote_dir, job_id, job_name, content
identifier_body
pybatch_mast.py
"""Main module.""" from typing import Optional, Generator, Tuple, List, Sequence, Dict, Any, Union from anndata import AnnData from pandas import DataFrame import boto3 as bt from botocore.exceptions import ClientError import os import pandas as pd import scanpy as sc import tempfile import time import uuid class MASTCollectionError(Exception): def __init__( self, message: str, job_collection, ): # Call the base class constructor with the parameters it needs super().__init__(message) self.jc = job_collection class BatchMAST(): def __init__( self, job_queue: str, job_def: str, bucket: str, layer: str = 'counts', ): self.job_queue = job_queue self.job_def = job_def self.bucket = bucket self.layer = layer def mast( self, adata: AnnData, keys: Sequence[str], group: str, fdr: float, lfc: float, covs: str = '', bys: Optional[Sequence[Tuple[str, Sequence[str]]]] = None, min_perc: Optional[Union[float, Dict[str, float]]] = None, on_total: Optional[bool] = False, min_cells_limit: Optional[int] = 3, jobs: int = 1, ) -> Generator[ Tuple[ Dict[str, DataFrame], Dict[str, Dict[str, List[str]]], Optional[str], ], None, None, ]: # NOTE n_genes is always assumed as covariate if bys is None: if min_perc is not None: adata = adata.copy() if on_total: total_cells = adata.shape[0] else: total_cells = adata.obs[group].value_counts().min() min_cells = max(total_cells * min_perc, min_cells_limit) print( f'Filtering genes detected in fewer than {min_cells} cells' ) sc.pp.filter_genes(adata, min_cells=min_cells) enough_genes = adata.shape[1] > 0 job_collection = {} if enough_genes: job_collection = self._mast( job_collection, adata, covs, group, keys, jobs=jobs, ) else: print('Not enough genes, computation skipped') try: de, top = self.mast_prep_output(job_collection, lfc, fdr) except ClientError as e: raise MASTCollectionError(e, job_collection) from e except Exception as e: raise MASTCollectionError(e.message, job_collection) from e yield de, top, None else: for by, groups in bys: job_collection = {} for b in groups: adata_b = adata[adata.obs[by] == b].copy() if min_perc is not None: if on_total: total_cells = adata_b.shape[0] else: total_cells = adata_b.obs[group].value_counts( ).min() min_cells = max( total_cells * min_perc[b], min_cells_limit ) print( 'Filtering genes detected in fewer ' f'than {min_cells} cells' ) sc.pp.filter_genes( adata_b, min_cells=min_cells, ) enough_groups = ( adata_b.obs[group].value_counts() >= 3 ).sum() > 1 enough_genes = adata_b.shape[1] > 0 if enough_groups and enough_genes: job_collection = self._mast( job_collection, adata_b, covs, group, keys, by=by, b=b, jobs=jobs, ) else: print(f'Computation for {b} skipped') try: de, top = self.mast_prep_output(job_collection, lfc, fdr) except ClientError as e: raise MASTCollectionError(e, job_collection) from e except Exception as e: raise MASTCollectionError(e.message, job_collection) from e yield de, top, by def _mast( self, job_collection: Dict[str, Dict[str, str]], adata: AnnData, covs: str, group: str, keys: Sequence[str], by: Optional[str] = None, b: Optional[str] = None, jobs: int = 1, ) -> Dict[str, Dict[str, str]]: if by is None: b = 'Sheet0' new_covs = BatchMAST._clean_covs(adata, covs, group, by=by) remote_dir, job_id, job_name, content = self.mast_compute( adata, keys, group=group, covs=new_covs, block=False, jobs=jobs, ) job_collection[job_id] = {'group': b, 'remote_dir': remote_dir} return job_collection def mast_prep_output( self, job_collection: Dict[str, Dict[str, str]], lfc: float, fdr: float, wait: float = 30, ) -> Tuple[DataFrame, Dict[str, Dict[str, List[str]]]]: de = {} top = {} for job_id, status, metadata, content in self.mast_collect( job_collection, wait=wait, ): if status == 'SUCCEEDED': b = metadata['group'] de[b] = content elif status == 'FAILED': print(f'Job Failed: group {metadata["group"]}') else: raise NotImplementedError(f'Status {status} not managed') top = BatchMAST.mast_filter(de, lfc, fdr) return de, top @staticmethod def mast_filter( de: Dict[str, DataFrame], lfc: float, fdr: float, ) -> Dict[str, Dict[str, List[str]]]: top = {} for b in de.keys(): cols = [ '_'.join(c.split('_')[:-1]) for c in de[b].columns[de[b].columns.str.endswith('_coef')] ] top[b] = {} for c in cols: top[b][c] = de[b][ (de[b][f'{c}_fdr'] < fdr) & (de[b][f'{c}_coef'] > lfc) ].sort_values([ f'{c}_fdr', f'{c}_coef' ], ascending=[True, False]).index.tolist() return top @staticmethod def _clean_covs( adata: AnnData, covs: str, group: str, by: Optional[str] = None, ) -> str: covs_s = covs.split('+')[1:] new_covs = '' for c in covs_s: # Including only covariates with more than 1 level # Otherwise, MAST will stop execution with error: # contrasts can be applied only to factors with 2 or more levels if c not in (group, by) and adata.obs[c].nunique() > 1: new_covs += f'+{c}' return new_covs def _mast_prep( self, adata: AnnData, remote_dir: str, keys: Sequence[str], group: str, covs: str = '', ready: Optional[Sequence[str]] = None, jobs: int = 1, ) -> str: s3 = bt.resource('s3') if ready is None: ready = [] with tempfile.TemporaryDirectory() as td: if 'mat' not in ready: local_mat = os.path.join(td, 'mat.fth') adata = adata.copy() adata.X = adata.layers[self.layer] sc.pp.normalize_total(adata, target_sum=1e6) sc.pp.log1p(adata, base=2) adata.to_df().reset_index().to_feather( local_mat, compression='uncompressed', ) remote_mat = os.path.join(remote_dir, 'mat.fth') print(f'Uploading matrix ({adata.shape}) to s3...') s3.meta.client.upload_file(local_mat, self.bucket, remote_mat) if 'cdat' not in ready: local_cdat = os.path.join(td, 'cdat.csv') adata.obs[keys].to_csv(local_cdat) remote_cdat = os.path.join(remote_dir, 'cdat.csv') print('Uploading metadata to s3...') s3.meta.client.upload_file( local_cdat, self.bucket, remote_cdat) remote = os.path.join(self.bucket, remote_dir) manifest = '\n'.join([ f'WORKSPACE={remote}', 'BATCH_INDEX_OFFSET=0', 'CDAT=cdat.csv', 'MAT=mat.fth', f'GROUP={group}', 'OUT_NAME=out.csv', f'MODEL=\'~group+n_genes{covs}\'', f'JOBS={jobs}', ]) local_manifest = os.path.join(td, 'manifest.txt') with open(local_manifest, 'w') as m: m.write(manifest + '\n') remote_manifest = os.path.join(remote_dir, 'manifest.txt')
local_manifest, self.bucket, remote_manifest, ) return remote_manifest def _mast_submit( self, manifest: str, block: bool = False, job_name: str = 'mast', ) -> str: batch = bt.client('batch') job_manifest = f's3://{os.path.join(self.bucket, manifest)}' job_id = None try: print( f'Submitting job {job_name} to the job queue {self.job_queue}' ) submit_job_response = batch.submit_job( jobName=job_name, jobQueue=self.job_queue, jobDefinition=self.job_def, containerOverrides={'command': [job_manifest]} ) job_id = submit_job_response['jobId'] print( f'Submitted job {job_name} {job_id} to the job queue' f' {self.job_queue}' ) except Exception as err: print(f'error: {str(err)}') return job_id def mast_compute( self, adata: AnnData, keys: Sequence[str], group: str, covs: str = '', block: bool = False, remote_dir: Optional[str] = None, jobs: int = 1, ) -> Tuple[str, str, str, Optional[DataFrame]]: content = None if remote_dir is None: remote_dir = os.path.join('mast', str(uuid.uuid4())) ready = [] else: ready = ['mat', 'cdat'] manifest = self._mast_prep( adata, remote_dir, keys, group, covs=covs, ready=ready, jobs=jobs, ) job_name = f'mast-{"".join(filter(str.isalnum, group))}-{"".join(filter(str.isalnum, covs))}' job_id = self._mast_submit( manifest, block=block, job_name=job_name, ) if block: status = BatchMAST._batch_job_status(job_id, wait=60) if status == 'SUCCEEDED': content = self._mast_results(remote_dir) return remote_dir, job_id, job_name, content def mast_collect( self, collection: Dict[str, Dict[str, str]], wait: int = 30, ) -> Generator[ Tuple[str, str, Dict[str, str], Optional[DataFrame]], None, None, ]: while wait and len(collection) > 0: time.sleep(wait) for job_id in list(collection.keys()): status = BatchMAST._batch_job_status(job_id) if status == 'SUCCEEDED': remote_dir = collection[job_id]['remote_dir'] content = self._mast_results(remote_dir) yield job_id, status, collection.pop(job_id), content elif status == 'FAILED': yield job_id, status, collection.pop(job_id), None @staticmethod def _batch_job_status( job_id: str, wait: int = 0, verbose: bool = False, ) -> str: batch = bt.client('batch') describe_jobs_response = batch.describe_jobs(jobs=[job_id]) status = describe_jobs_response['jobs'][0]['status'] if verbose: print(status) while wait: time.sleep(wait) describe_jobs_response = batch.describe_jobs(jobs=[job_id]) new_status = describe_jobs_response['jobs'][0]['status'] if new_status != status: status = new_status if verbose: print(status) if status == 'SUCCEEDED' or status == 'FAILED': break return status def _mast_results( self, remote_dir: str, ) -> DataFrame: s3 = bt.resource('s3') with tempfile.TemporaryDirectory() as td: remote_out = os.path.join(remote_dir, 'out.csv') local_out = os.path.join(td, 'out.csv') s3.Bucket(self.bucket).download_file(remote_out, local_out) content = pd.read_csv(local_out, index_col=0) return content @staticmethod def mast_to_excel( de: Dict[str, DataFrame], fname: str, top: Optional[Dict[str, Dict[str, List[str]]]] = None, top_prefix: str = None, only_top: bool = False, ): if not only_top: writer = pd.ExcelWriter( f'{fname}.xlsx', engine='xlsxwriter' ) for s in de.keys(): de[s].to_excel(writer, sheet_name=str(s)) writer.save() if top is not None: if top_prefix is None: top_prefix = '' else: top_prefix = f'{top_prefix}.' writer = pd.ExcelWriter( f'{fname}.{top_prefix}top.xlsx', engine='xlsxwriter' ) for s in top.keys(): pd.DataFrame.from_dict( top[s], orient='index', ).T.fillna('').to_excel( writer, sheet_name=str(s), index=False ) writer.save()
print('Uploading manifest to s3...') s3.meta.client.upload_file(
random_line_split
pybatch_mast.py
"""Main module.""" from typing import Optional, Generator, Tuple, List, Sequence, Dict, Any, Union from anndata import AnnData from pandas import DataFrame import boto3 as bt from botocore.exceptions import ClientError import os import pandas as pd import scanpy as sc import tempfile import time import uuid class MASTCollectionError(Exception): def __init__( self, message: str, job_collection, ): # Call the base class constructor with the parameters it needs super().__init__(message) self.jc = job_collection class BatchMAST(): def __init__( self, job_queue: str, job_def: str, bucket: str, layer: str = 'counts', ): self.job_queue = job_queue self.job_def = job_def self.bucket = bucket self.layer = layer def mast( self, adata: AnnData, keys: Sequence[str], group: str, fdr: float, lfc: float, covs: str = '', bys: Optional[Sequence[Tuple[str, Sequence[str]]]] = None, min_perc: Optional[Union[float, Dict[str, float]]] = None, on_total: Optional[bool] = False, min_cells_limit: Optional[int] = 3, jobs: int = 1, ) -> Generator[ Tuple[ Dict[str, DataFrame], Dict[str, Dict[str, List[str]]], Optional[str], ], None, None, ]: # NOTE n_genes is always assumed as covariate if bys is None: if min_perc is not None: adata = adata.copy() if on_total: total_cells = adata.shape[0] else: total_cells = adata.obs[group].value_counts().min() min_cells = max(total_cells * min_perc, min_cells_limit) print( f'Filtering genes detected in fewer than {min_cells} cells' ) sc.pp.filter_genes(adata, min_cells=min_cells) enough_genes = adata.shape[1] > 0 job_collection = {} if enough_genes: job_collection = self._mast( job_collection, adata, covs, group, keys, jobs=jobs, ) else:
try: de, top = self.mast_prep_output(job_collection, lfc, fdr) except ClientError as e: raise MASTCollectionError(e, job_collection) from e except Exception as e: raise MASTCollectionError(e.message, job_collection) from e yield de, top, None else: for by, groups in bys: job_collection = {} for b in groups: adata_b = adata[adata.obs[by] == b].copy() if min_perc is not None: if on_total: total_cells = adata_b.shape[0] else: total_cells = adata_b.obs[group].value_counts( ).min() min_cells = max( total_cells * min_perc[b], min_cells_limit ) print( 'Filtering genes detected in fewer ' f'than {min_cells} cells' ) sc.pp.filter_genes( adata_b, min_cells=min_cells, ) enough_groups = ( adata_b.obs[group].value_counts() >= 3 ).sum() > 1 enough_genes = adata_b.shape[1] > 0 if enough_groups and enough_genes: job_collection = self._mast( job_collection, adata_b, covs, group, keys, by=by, b=b, jobs=jobs, ) else: print(f'Computation for {b} skipped') try: de, top = self.mast_prep_output(job_collection, lfc, fdr) except ClientError as e: raise MASTCollectionError(e, job_collection) from e except Exception as e: raise MASTCollectionError(e.message, job_collection) from e yield de, top, by def _mast( self, job_collection: Dict[str, Dict[str, str]], adata: AnnData, covs: str, group: str, keys: Sequence[str], by: Optional[str] = None, b: Optional[str] = None, jobs: int = 1, ) -> Dict[str, Dict[str, str]]: if by is None: b = 'Sheet0' new_covs = BatchMAST._clean_covs(adata, covs, group, by=by) remote_dir, job_id, job_name, content = self.mast_compute( adata, keys, group=group, covs=new_covs, block=False, jobs=jobs, ) job_collection[job_id] = {'group': b, 'remote_dir': remote_dir} return job_collection def mast_prep_output( self, job_collection: Dict[str, Dict[str, str]], lfc: float, fdr: float, wait: float = 30, ) -> Tuple[DataFrame, Dict[str, Dict[str, List[str]]]]: de = {} top = {} for job_id, status, metadata, content in self.mast_collect( job_collection, wait=wait, ): if status == 'SUCCEEDED': b = metadata['group'] de[b] = content elif status == 'FAILED': print(f'Job Failed: group {metadata["group"]}') else: raise NotImplementedError(f'Status {status} not managed') top = BatchMAST.mast_filter(de, lfc, fdr) return de, top @staticmethod def mast_filter( de: Dict[str, DataFrame], lfc: float, fdr: float, ) -> Dict[str, Dict[str, List[str]]]: top = {} for b in de.keys(): cols = [ '_'.join(c.split('_')[:-1]) for c in de[b].columns[de[b].columns.str.endswith('_coef')] ] top[b] = {} for c in cols: top[b][c] = de[b][ (de[b][f'{c}_fdr'] < fdr) & (de[b][f'{c}_coef'] > lfc) ].sort_values([ f'{c}_fdr', f'{c}_coef' ], ascending=[True, False]).index.tolist() return top @staticmethod def _clean_covs( adata: AnnData, covs: str, group: str, by: Optional[str] = None, ) -> str: covs_s = covs.split('+')[1:] new_covs = '' for c in covs_s: # Including only covariates with more than 1 level # Otherwise, MAST will stop execution with error: # contrasts can be applied only to factors with 2 or more levels if c not in (group, by) and adata.obs[c].nunique() > 1: new_covs += f'+{c}' return new_covs def _mast_prep( self, adata: AnnData, remote_dir: str, keys: Sequence[str], group: str, covs: str = '', ready: Optional[Sequence[str]] = None, jobs: int = 1, ) -> str: s3 = bt.resource('s3') if ready is None: ready = [] with tempfile.TemporaryDirectory() as td: if 'mat' not in ready: local_mat = os.path.join(td, 'mat.fth') adata = adata.copy() adata.X = adata.layers[self.layer] sc.pp.normalize_total(adata, target_sum=1e6) sc.pp.log1p(adata, base=2) adata.to_df().reset_index().to_feather( local_mat, compression='uncompressed', ) remote_mat = os.path.join(remote_dir, 'mat.fth') print(f'Uploading matrix ({adata.shape}) to s3...') s3.meta.client.upload_file(local_mat, self.bucket, remote_mat) if 'cdat' not in ready: local_cdat = os.path.join(td, 'cdat.csv') adata.obs[keys].to_csv(local_cdat) remote_cdat = os.path.join(remote_dir, 'cdat.csv') print('Uploading metadata to s3...') s3.meta.client.upload_file( local_cdat, self.bucket, remote_cdat) remote = os.path.join(self.bucket, remote_dir) manifest = '\n'.join([ f'WORKSPACE={remote}', 'BATCH_INDEX_OFFSET=0', 'CDAT=cdat.csv', 'MAT=mat.fth', f'GROUP={group}', 'OUT_NAME=out.csv', f'MODEL=\'~group+n_genes{covs}\'', f'JOBS={jobs}', ]) local_manifest = os.path.join(td, 'manifest.txt') with open(local_manifest, 'w') as m: m.write(manifest + '\n') remote_manifest = os.path.join(remote_dir, 'manifest.txt') print('Uploading manifest to s3...') s3.meta.client.upload_file( local_manifest, self.bucket, remote_manifest, ) return remote_manifest def _mast_submit( self, manifest: str, block: bool = False, job_name: str = 'mast', ) -> str: batch = bt.client('batch') job_manifest = f's3://{os.path.join(self.bucket, manifest)}' job_id = None try: print( f'Submitting job {job_name} to the job queue {self.job_queue}' ) submit_job_response = batch.submit_job( jobName=job_name, jobQueue=self.job_queue, jobDefinition=self.job_def, containerOverrides={'command': [job_manifest]} ) job_id = submit_job_response['jobId'] print( f'Submitted job {job_name} {job_id} to the job queue' f' {self.job_queue}' ) except Exception as err: print(f'error: {str(err)}') return job_id def mast_compute( self, adata: AnnData, keys: Sequence[str], group: str, covs: str = '', block: bool = False, remote_dir: Optional[str] = None, jobs: int = 1, ) -> Tuple[str, str, str, Optional[DataFrame]]: content = None if remote_dir is None: remote_dir = os.path.join('mast', str(uuid.uuid4())) ready = [] else: ready = ['mat', 'cdat'] manifest = self._mast_prep( adata, remote_dir, keys, group, covs=covs, ready=ready, jobs=jobs, ) job_name = f'mast-{"".join(filter(str.isalnum, group))}-{"".join(filter(str.isalnum, covs))}' job_id = self._mast_submit( manifest, block=block, job_name=job_name, ) if block: status = BatchMAST._batch_job_status(job_id, wait=60) if status == 'SUCCEEDED': content = self._mast_results(remote_dir) return remote_dir, job_id, job_name, content def mast_collect( self, collection: Dict[str, Dict[str, str]], wait: int = 30, ) -> Generator[ Tuple[str, str, Dict[str, str], Optional[DataFrame]], None, None, ]: while wait and len(collection) > 0: time.sleep(wait) for job_id in list(collection.keys()): status = BatchMAST._batch_job_status(job_id) if status == 'SUCCEEDED': remote_dir = collection[job_id]['remote_dir'] content = self._mast_results(remote_dir) yield job_id, status, collection.pop(job_id), content elif status == 'FAILED': yield job_id, status, collection.pop(job_id), None @staticmethod def _batch_job_status( job_id: str, wait: int = 0, verbose: bool = False, ) -> str: batch = bt.client('batch') describe_jobs_response = batch.describe_jobs(jobs=[job_id]) status = describe_jobs_response['jobs'][0]['status'] if verbose: print(status) while wait: time.sleep(wait) describe_jobs_response = batch.describe_jobs(jobs=[job_id]) new_status = describe_jobs_response['jobs'][0]['status'] if new_status != status: status = new_status if verbose: print(status) if status == 'SUCCEEDED' or status == 'FAILED': break return status def _mast_results( self, remote_dir: str, ) -> DataFrame: s3 = bt.resource('s3') with tempfile.TemporaryDirectory() as td: remote_out = os.path.join(remote_dir, 'out.csv') local_out = os.path.join(td, 'out.csv') s3.Bucket(self.bucket).download_file(remote_out, local_out) content = pd.read_csv(local_out, index_col=0) return content @staticmethod def mast_to_excel( de: Dict[str, DataFrame], fname: str, top: Optional[Dict[str, Dict[str, List[str]]]] = None, top_prefix: str = None, only_top: bool = False, ): if not only_top: writer = pd.ExcelWriter( f'{fname}.xlsx', engine='xlsxwriter' ) for s in de.keys(): de[s].to_excel(writer, sheet_name=str(s)) writer.save() if top is not None: if top_prefix is None: top_prefix = '' else: top_prefix = f'{top_prefix}.' writer = pd.ExcelWriter( f'{fname}.{top_prefix}top.xlsx', engine='xlsxwriter' ) for s in top.keys(): pd.DataFrame.from_dict( top[s], orient='index', ).T.fillna('').to_excel( writer, sheet_name=str(s), index=False ) writer.save()
print('Not enough genes, computation skipped')
conditional_block
imap-client.js
var IMAP_Client = Extends(TCP_Client, new Class({ USE_BLOCKS: true, mailboxes: { inbox: "inbox", outbox: "inbox.Sent", contacts: "inbox.Contacts" }, contact_fields: ["Name", "Long", "Short", "Type"], initialize: function(URL, port){ this.counter = 0; this.patterns = [ /\*\sOK\s\[CAPABILITY.*\].*/, this.onConnectEstablished, /\*\sERROR\s.*/, this.onConnectFailed ]; this.URL = URL; this.port = port; this.queue = []; this.checkNewMail(); }, checkNewMail: function(){ if(this.connected) this.status(); setTimeout(F(this, this.checkNewMail), 60000); }, block: function(blocked, extra){ if(extra) this.blocked_by = extra; this.blocked = blocked; this.onStatus("block", blocked); }, // request = {command: ..., params: ..., .....} exec: function(request){ if(this.USE_BLOCKS && this.blocked){ this.queue.push(request); return; } this.counter++; var prefix = ("000" + String(this.counter)); prefix = prefix.substring(prefix.length - 3); prefix = "a" + prefix + " "; this.block(true, Apply(request, {prefix: prefix})); this.parser = new IMAP_Parser( request, prefix, F(this, this.onEvent), F(this, this.dropParser) ); this.write(prefix + request.command + " " + (request.params || "")); }, releaseQueue: function(){ var request = this.queue.shift(); if(request) this.exec(request); }, dropParser: function(){ this.parser = null; }, onEvent: function(event, message, data, request){ log("onEvent: " + event); log(data); this.block(false); switch(event){ case "onLogin": this.onLogin(true); //this.select(); break; case "onLoginFailed": this.onLogin(false); break; case "onStatus": this.onUnseen(data[0].matches[0]); break; case "onSelect": var total = { exists: 0, recent: 0 }; for(var i = 0; i < data.length; i++){ for(var k in total){ if(data[i].name == k) total[k] = data[i].matches; } } Each(this.mailboxes, function(m, k){ if(request.params == m) this.mailbox = k; }, this); this.onMailboxTotal(this.mailbox, total.exists, total.recent); break; case "noMailbox": this.createMailbox(request.params); break; case "onCreate": this.select(request.params); break; case "onFetch": var list = []; var mail = {}; for(var i = 0; i < data.length; i++){ if(data[i].name == "list"){ var mail = this.parseEnvelope(data[i].matches); //var headers = this.parseHeaders(data[i].data); list.push(mail); } else if(data[i].name == "contact"){ var item = { id: data[i].matches[0] }; var contact = this.parseContacts(data[i].data); contact.Mail = this.cutFreemailAddress(contact.Long); log(contact); list.push(Apply(item, contact)); } else if(data[i].name == "mail"){ var n = data[i].data.indexOf("\r\n\r\n"); mail.headers = this.parseHeaders( data[i].data.substring(0, n) ); var body = data[i].data.substring(n + 4); if(mail.headers["Content-Type"] && mail.headers["Content-Type"].match(/^Multipart\/Mixed/)){ var boundary, m; if(m = /boundary="(.*?)"/.exec(mail.headers["Content-Type"])){ boundary = m[1]; var parts = this.parseAttachments(boundary, body); mail.body = parts[1].body; mail.attachments = parts.slice(2); } } else { mail.body = body; } } } if(request.type == "mail") this.onFetch(Apply(mail, {mailbox: this.mailbox})); else this.onMailboxData(list, request.mailbox); break; /* case "onStore": this.expunge(); break; */ } if(request.callback) request.callback(); if(!this.blocked) this.releaseQueue(); }, parseAttachments: function(boundary, body){ var parts = body.split("--" + boundary); for(var k in parts){ var n = parts[k].indexOf("\r\n\r\n"); var headers = this.parseHeaders( parts[k].substring(0, n) ); var body = parts[k].substring(n + 4); parts[k] = { body: body, headers: headers }; } return parts; }, parseHeaders: function(headers){ // From: Dave Baker <[email protected]>\r\nSubject: Welcome to Freemail!\r\n\r\n var fields = ["To", "From", "Subject", "Date", "Content-Type"]; var result = {}; var m; for(var k in fields){ var re = new RegExp("^" + fields[k] + ":\\s?(.*)", "m"); if(m = re.exec(headers)) result[fields[k]] = m[1];
parseEnvelope: function(matches){ var fields = qw("id flags date subject from to"); var data = {}; Every(fields, function(k, i){ if(k == "from" || k == "to"){ var m = /(NIL|\".*?\")\s+(NIL|\".*?\")\s+(NIL|\".*?\")\s+(NIL|\".*?\")/ .exec(matches[i]).slice(1); Every(m, function(v, i){ m[i] = v == "NIL" ? null : v.substring(0, v.length - 1).substring(1); }); data[k] = { name: m[0], mail: this.cutFreemailAddress(m[2] + "@" + m[3]), }; } else if(k == "date" || k == "subject"){ if(matches[i] != "NIL") data[k] = matches[i].substring(0, matches[i].length - 1).substring(1); } else if(k == "flags"){ var f = matches[i].split(/\s+/); data[k] = {}; Every(f, function(v){ if(v) data[k][v.substring(1)] = true; }); } else { data[k] = matches[i]; } }, this); return data; }, parseContacts: function(data){ var result = {}; var m; var n = data.indexOf("\r\n\r\n"); result.Notes = data.substring(n + 4); data = data.substring(0, n); for(var k in this.contact_fields){ var re = new RegExp("^" + this.contact_fields[k] + ":\\s(.*)", "m"); if(m = re.exec(data)) result[this.contact_fields[k]] = m[1]; } return result; }, cutFreemailAddress: function(address){ if(address && (m = /([\w\d\_]+\@)([\w\d\_]{30,154})(\.freemail)/.exec(address))){ var key = m[2]; //var cutted = m[1] + key.substring(0, 3) + "..." + key.substring(key.length - 3, key.length) + m[3]; var cutted = m[1] + key.substring(0, 6) + ".." + m[3]; address = address.replace(m[0], cutted); } return address; }, // ****** Handlers ******* onConnect: function(){ //log("onConnect"); this.onStatus("connected"); }, onConnectFailed: function(){ //log("onConnectFailed"); this.onStatus("failed"); }, onLogin: function(logged){ //log(logged ? "Logged in" : "Login failed"); this.status(); this.onStatus("logged", logged); }, onMailboxTotal: function(mailbox, exists, recent){ //log("exists/recent = " + exists + "/" + recent); this.onStatus("total", { exists: exists, recent: recent, mailbox: mailbox }); }, onMailboxData: function(list, mailbox){ //log(list); this.onStatus("data", { list: list, mailbox: mailbox }); }, onFetch: function(mail){ //log(mail); this.onStatus("mail", mail); }, onConnectEstablished: function(){ this.onStatus("ready"); if(this.auth) this.login(this.auth.login, this.auth.password); }, onUnseen: function(count){ this.onStatus("unseen", count); }, onDisconnect: function(){ this.block(false); this.parser = null; }, onStatus: function(status, data){}, // OCERRIDE // ******* PUBLIC ******** login: function(login, password){ this.auth = { login: login, password: password }; if(this.connected){ log("Logging with session " + this.session + "... "); this.exec({ command: "LOGIN", params: login + " " + password }); } else { /* this.onConnectEstablished = F(this, function(){ this.onStatus("ready"); this.login(); }); */ this.connect(); } }, logout: function(){ this.auth = null; this.exec({ command: "LOGOUT" }); this.disconnect(); }, // mailbox = inbox | inbox.Sent | inbox.Contacts select: function(mailbox, callback){ //this.mailbox = mailbox || this.mailbox; this.exec({ command: "SELECT", params: mailbox || this.mailbox, callback: F(this, this.expunge, [callback]) }); }, fetchMailbox: function(name, callback){ var mailbox = this.mailboxes[name]; this.select(mailbox, name == "contacts" ? F(this,this.fetchContacts, [callback]) : F(this, this.fetchAll, [name, callback]) ); }, fetchAll: function(mailbox, callback){ this.exec({ command: "FETCH", //params: "1:* (FLAGS BODY[HEADER.FIELDS (FROM SUBJECT DATE)])", params: "1:* (FLAGS ENVELOPE)", mailbox: mailbox, callback: callback }); }, fetch: function(n, mailbox){ var _fetch = F(this, this.exec, [{ command: "FETCH", params: n + " (FLAGS INTERNALDATE ENVELOPE BODY[])", mailbox: mailbox, type: "mail" }]); if(this.mailbox == mailbox) _fetch(); else this.select(mailbox, _fetch); }, fetchContacts: function(callback){ this.exec({ command: "FETCH", params: "1:* (BODY[])", mailbox: "contacts", callback: callback }); }, saveContact: function(contact, callback){ var body = "\r\n"; Every(this.contact_fields, function(name){ if(contact[name]) body += name + ": " + contact[name] + "\r\n"; }); body += "\r\n" + contact.Notes; /* SELECT, EXPUNGE ? STORE n FLAGS+ (\Deleted) ? EXPUNGE APPEND FETCH */ var append = F(this, this.append, [ this.mailboxes.contacts, body, F(this, this.fetchContacts, [callback]) ]); var remove = F(this, this.remove, [ contact.id, F(this, this.expunge, [append]) ]); this.select(this.mailboxes.contacts, contact.id == null ? append : remove); }, removeContact: function(id, callback){ /* SELECT, EXPUNGE STORE n FLAGS+ (\Deleted) EXPUNGE FETCH */ this.select(this.mailboxes.contacts, F(this, this.remove, [ id, /*F(this, this.expunge, [ F(this, this.fetchContacts, [callback]) ])*/ callback ])); }, addMail: function(mailbox, mail){ var mailbody = "\r\n"; for(var k in mail.headers){ mailbody += k + ": " + mail.headers[k] + "\r\n"; } mailbody += "\r\n" + mail.text; log("trying to save outgoing mail: " + mailbody); this.append(this.mailboxes[mailbox], mailbody); }, append: function(mailbox, mail, callback){ this.exec({ command: "APPEND", params: mailbox + " (\\Seen) {" + this.length2(mail) + "}" + mail, callback: callback }); }, length2: function(s){ var len = 0; for(var i = 0; i < s.length; i++){ if(s.charCodeAt(i) >= 128) len += 2; else len++; } //alert(len + " / " + s.length); return len; }, remove: function(id, callback){ this.store(id, true, "Deleted", callback); /* this.exec({ command: "STORE", params: id + " +FLAGS (\\Deleted)", callback: callback }); */ }, store: function(id, add, flag, callback){ this.exec({ command: "STORE", params: id + " " + (add ? "+" : "-") + "FLAGS (\\" + flag + ")", callback: callback }); }, expunge: function(callback){ this.exec({ command: "EXPUNGE", callback: callback }); }, createMailbox: function(mailbox){ this.exec({ command: "CREATE", params: mailbox }); }, status: function(){ this.exec({ command: "STATUS", params: this.mailboxes.inbox + " (Unseen)" }); } }));
} return result; },
random_line_split
kubernetes.go
// Copyright (c) 2017 Pulcy. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "encoding/json" "fmt" "io/ioutil" "net" "net/http" "net/url" "os" "strconv" "sync" "github.com/juju/errgo" "github.com/op/go-logging" "github.com/spf13/pflag" k8s "github.com/YakLabs/k8s-client" k8s_http "github.com/YakLabs/k8s-client/http" api "github.com/pulcy/prometheus-conf-api" "github.com/pulcy/prometheus-conf/service" "github.com/pulcy/prometheus-conf/util" ) var ( maskAny = errgo.MaskFunc(errgo.Any) ) const ( logName = "kubernetes" metricsAnnotation = "j2.pulcy.com/metrics" maxRecentErrors = 30 ) type k8sPlugin struct { LogLevel string ETCDTLSConfig service.TLSConfig KubeletTLSConfig service.TLSConfig log *logging.Logger client k8s.Client lastUpdate *k8sUpdate recentErrors int nodeExporterPort int } type k8sUpdate struct { log *logging.Logger nodeExporterPort int nodes []k8s.Node services []k8s.Service etcdTLSConfig service.TLSConfig kubeletTLSConfig service.TLSConfig } func init() { service.RegisterPlugin("kubernetes", &k8sPlugin{ log: logging.MustGetLogger(logName), }) } // Configure the command line flags needed by the plugin. func (p *k8sPlugin) Setup(flagSet *pflag.FlagSet) { flagSet.StringVar(&p.ETCDTLSConfig.CAFile, "kubernetes-etcd-ca-file", "", "CA certificate used by ETCD") flagSet.StringVar(&p.ETCDTLSConfig.CertFile, "kubernetes-etcd-cert-file", "", "Public key file used by ETCD") flagSet.StringVar(&p.ETCDTLSConfig.KeyFile, "kubernetes-etcd-key-file", "", "Private key file used by ETCD") flagSet.StringVar(&p.KubeletTLSConfig.CAFile, "kubelet-ca-file", "", "CA certificate used by Kubelet") flagSet.StringVar(&p.KubeletTLSConfig.CertFile, "kubelet-cert-file", "", "Public key file used by Kubelet") flagSet.StringVar(&p.KubeletTLSConfig.KeyFile, "kubelet-key-file", "", "Private key file used by Kubelet") flagSet.StringVar(&p.LogLevel, "kubernetes-log-level", "", "Log level of kubernetes plugin") } // Start the plugin. Send a value on the given channel to trigger an update of the configuration. func (p *k8sPlugin) Start(config service.ServiceConfig, trigger chan string) error { if err := util.SetLogLevel(p.LogLevel, config.LogLevel, logName); err != nil { return maskAny(err) } // Setup kubernetes client p.nodeExporterPort = config.NodeExporterPort c, err := k8s_http.NewInCluster() if err != nil { p.log.Infof("No kubernetes available: %v", err) return nil } p.client = c // Watch nodes for changes go func() { for { nodeEvents := make(chan k8s.NodeWatchEvent) go func() { for evt := range nodeEvents { if evt.Type() == k8s.WatchEventTypeAdded || evt.Type() == k8s.WatchEventTypeDeleted { p.log.Debugf("got node event of type %s", evt.Type()) trigger <- fmt.Sprintf("node-%s", evt.Type()) } } }() if err := p.client.WatchNodes(nil, nodeEvents); err != nil { p.log.Errorf("failed to watch nodes: %#v", err) } } }() // Watch services for changes go func() { for { serviceEvents := make(chan k8s.ServiceWatchEvent) go func() { for evt := range serviceEvents { p.log.Debugf("got service event of type %s", evt.Type()) trigger <- fmt.Sprintf("service-%s", evt.Type()) } }() if err := p.client.WatchServices("", nil, serviceEvents); err != nil { p.log.Errorf("failed to watch services: %#v", err) } } }() // No custom triggers here, just update once in a while. return nil } func (p *k8sPlugin) Update() (service.PluginUpdate, error) { if p.client == nil { return nil, nil } // Get nodes p.log.Debugf("fetching kubernetes nodes") nodes, nodesErr := p.client.ListNodes(nil) // Get services p.log.Debugf("fetching kubernetes services") services, servicesErr := p.client.ListServices("", nil) if nodesErr != nil || servicesErr != nil { if nodesErr != nil { p.log.Warningf("Failed to fetch kubernetes nodes: %#v (using previous ones)", nodesErr) } if servicesErr != nil { p.log.Warningf("Failed to fetch kubernetes services: %#v (using previous ones)", servicesErr) } p.recentErrors++ if p.recentErrors > maxRecentErrors { p.log.Warningf("Too many recent kubernetes errors, restarting") os.Exit(1) } return p.lastUpdate, nil } else { p.recentErrors = 0 update := &k8sUpdate{ log: p.log, nodeExporterPort: p.nodeExporterPort, nodes: nodes.Items, services: services.Items, etcdTLSConfig: p.ETCDTLSConfig, kubeletTLSConfig: p.KubeletTLSConfig, } p.lastUpdate = update return update, nil } } // Extract data from fleet to create node_exporter targets func (p *k8sUpdate) CreateNodes() ([]service.ScrapeConfig, error) { // Build scrape config list scNode := service.StaticConfig{} scNode.Label("source", "node") scEtcd := service.StaticConfig{} scEtcd.Label("source", "etcd") for _, node := range p.nodes { for _, addr := range node.Status.Addresses { if addr.Type == "InternalIP" { ip := addr.Address p.log.Debugf("found kubernetes node %s", ip) scNode.Targets = append(scNode.Targets, fmt.Sprintf("%s:%d", ip, p.nodeExporterPort)) if node.Labels["core"] == "true" { scEtcd.Targets = append(scEtcd.Targets, fmt.Sprintf("%s:2379", ip)) } } } } scrapeConfigNode := service.ScrapeConfig{ JobName: "node", StaticConfigs: []service.StaticConfig{scNode}, RelabelConfigs: []service.RelabelConfig{ service.RelabelConfig{ SourceLabels: []string{"__address__"}, Action: "replace", TargetLabel: "instance", Regex: `(.+)(?::)(\d+)`, Replacement: "$1", }, service.RelabelConfig{ SourceLabels: []string{"__address__"}, Action: "replace", TargetLabel: "port", Regex: `(.+)(?::)(\d+)`, Replacement: "$2", }, }, } scrapeConfigETCD := service.ScrapeConfig{ JobName: "etcd", StaticConfigs: []service.StaticConfig{scEtcd}, RelabelConfigs: []service.RelabelConfig{ service.RelabelConfig{ Action: "labeldrop", Regex: "etcd_debugging.*", }, service.RelabelConfig{ SourceLabels: []string{"__address__"}, Action: "replace", TargetLabel: "instance", Regex: `(.+)(?::)(\d+)`, Replacement: "$1", }, service.RelabelConfig{ SourceLabels: []string{"__address__"}, Action: "replace", TargetLabel: "port", Regex: `(.+)(?::)(\d+)`, Replacement: "$2", }, }, } if p.etcdTLSConfig.IsConfigured() { scrapeConfigETCD.Scheme = "https" scrapeConfigETCD.TLSConfig = &service.TLSConfig{ CAFile: p.etcdTLSConfig.CAFile, CertFile: p.etcdTLSConfig.CertFile, KeyFile: p.etcdTLSConfig.KeyFile, InsecureSkipVerify: true, } } scrapeConfigK8sNodes := service.ScrapeConfig{ JobName: "kubernetes-nodes", ScrapeInterval: "5m", KubernetesConfigs: []service.KubernetesSDConfig{ service.KubernetesSDConfig{ Role: "node", }, }, RelabelConfigs: []service.RelabelConfig{ service.RelabelConfig{ Action: "labelmap", Regex: "__meta_kubernetes_node_label_(.+)", }, }, } if p.kubeletTLSConfig.IsConfigured() { scrapeConfigK8sNodes.Scheme = "https" scrapeConfigK8sNodes.TLSConfig = &service.TLSConfig{ CAFile: p.kubeletTLSConfig.CAFile, CertFile: p.kubeletTLSConfig.CertFile, KeyFile: p.kubeletTLSConfig.KeyFile, InsecureSkipVerify: true, } } scrapeConfigK8sEndpoinds := service.ScrapeConfig{ JobName: "kubernetes-endpoints", KubernetesConfigs: []service.KubernetesSDConfig{ service.KubernetesSDConfig{ Role: "endpoints", }, }, RelabelConfigs: []service.RelabelConfig{ service.RelabelConfig{ SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_scrape"}, Action: "keep", Regex: "true", }, service.RelabelConfig{ SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_scheme"}, Action: "replace", TargetLabel: "__scheme__", Regex: "(https?)", }, service.RelabelConfig{ SourceLabels: []string{"__meta_kubernetes_service_annotation_prometheus_io_path"}, Action: "replace", TargetLabel: "__metrics_path__", Regex: "(.+)", }, service.RelabelConfig{ SourceLabels: []string{"__address__", "__meta_kubernetes_service_annotation_prometheus_io_port"}, Action: "replace", TargetLabel: "__address__", Regex: `(.+)(?::\d+);(\d+)`, Replacement: "$1:$2", }, service.RelabelConfig{ Action: "labelmap", Regex: "__meta_kubernetes_service_label_(.+)", }, service.RelabelConfig{ SourceLabels: []string{"__meta_kubernetes_namespace"}, Action: "replace", TargetLabel: "kubernetes_namespace", }, service.RelabelConfig{ SourceLabels: []string{"__meta_kubernetes_pod_name"}, Action: "replace", TargetLabel: "kubernetes_pod_name", }, service.RelabelConfig{ SourceLabels: []string{"__address__"}, Action: "replace", TargetLabel: "instance", Regex: `(.+)(?::)(\d+)`, Replacement: "$1", }, service.RelabelConfig{ SourceLabels: []string{"__address__"}, Action: "replace", TargetLabel: "port", Regex: `(.+)(?::)(\d+)`, Replacement: "$2", }, service.RelabelConfig{ SourceLabels: []string{"j2_job_name"}, Action: "replace", TargetLabel: "job", }, service.RelabelConfig{ SourceLabels: []string{"j2_taskgroup_name"}, Action: "replace", TargetLabel: "taskgroup", }, }, } return []service.ScrapeConfig{scrapeConfigNode, scrapeConfigETCD, scrapeConfigK8sNodes, scrapeConfigK8sEndpoinds}, nil } // CreateRules creates all rules this plugin is aware of. // The returns string list should contain the content of the various rules. func (p *k8sUpdate) CreateRules() ([]string, error) { // Build URL list var urls []string for _, svc := range p.services { ann, ok := svc.Annotations[metricsAnnotation] if !ok || ann == "" { continue } var metricsRecords []api.MetricsServiceRecord if err := json.Unmarshal([]byte(ann), &metricsRecords); err != nil { p.log.Errorf("Failed to unmarshal metrics annotation in service '%s.%s': %#v", svc.Namespace, svc.Name, err) continue } // Get service IP if svc.Spec.Type != k8s.ServiceTypeClusterIP { p.log.Errorf("Cannot put metrics rules in services of type other than ClusterIP ('%s.%s')", svc.Namespace, svc.Name) continue } clusterIP := svc.Spec.ClusterIP // Collect URLs for _, m := range metricsRecords { if m.RulesPath == ""
u := url.URL{ Scheme: "http", Host: net.JoinHostPort(clusterIP, strconv.Itoa(m.ServicePort)), Path: m.RulesPath, } urls = append(urls, u.String()) } } if len(urls) == 0 { return nil, nil } // Fetch rules from URLs rulesChan := make(chan string, len(urls)) wg := sync.WaitGroup{} for _, url := range urls { wg.Add(1) go func(url string) { defer wg.Done() p.log.Debugf("fetching rules from %s", url) resp, err := http.Get(url) if err != nil { p.log.Errorf("Failed to fetch rule from '%s': %#v", url, err) return } if resp.StatusCode < 200 || resp.StatusCode >= 300 { p.log.Errorf("Failed to fetch rule from '%s': Status %d", url, resp.StatusCode) return } defer resp.Body.Close() raw, err := ioutil.ReadAll(resp.Body) if err != nil { p.log.Errorf("Failed to read rule from '%s': %#v", url, err) return } rulesChan <- string(raw) p.log.Debugf("done fetching rules from %s", url) }(url) } wg.Wait() close(rulesChan) var result []string for rule := range rulesChan { result = append(result, rule) } p.log.Debugf("Found %d rules", len(result)) return result, nil }
{ continue }
conditional_block