file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | extern crate csv;
extern crate serde;
// This lets us write `#[derive(Deserialize)]`.
#[macro_use]
extern crate serde_derive;
use std::collections::HashMap;
use std::env;
use std::fs::File;
use std::io;
use std::{error::Error, ffi::OsString, process};
// fn main_not_recover() {
// println!("Hello, world!");
// let mut rds = csv::Reader::from_reader(io::stdin());
// for result in rds.records() {
// // expectは、Error時にpanicを発生させるので、バッドプラクティスである。
// let record = result.expect("a csv record");
// println!("{:?}", record);
// }
// }
fn main() {
println!("Hello, world!");
match performance_up_read_csv_to_model() {
Ok(count) => println!("{:?}", count),
Err(err) => {
println!("{}", err);
process::exit(1);
}
}
}
// error 処理の練習
fn main_recorver() {
println!("Hellssdfgsdf");
let mut rds = csv::Reader::from_reader(io::stdin());
for result in rds.records() {
match result {
Ok(r) => println!("{:?}", r),
// こうすることで、回復可能なエラー処理になる。
Err(e) => println!("{:?}", e),
}
}
}
// read and write csv test
fn main_csv() {
println!("Hello, world!");
// if let 文で、Errの場合のみの処理を、{}内に記載できる。<これ便利だ!
if let Err(err) = read_and_write_csv_model() {
println!("{}", err);
process::exit(1);
}
}
fn run_match() -> Result<(), Box<dyn Error>> {
let mut rds = csv::Reader::from_reader(io::stdin());
for result in rds.records() {
match result {
// 先に書いて、returnするんだって。
Err(e) => return Err(From::from(e)),
Ok(r) => println!("{:?}", r),
}
}
Ok(())
}
fn run_question() -> Result<(), Box<dyn Error>> {
let mut rds = csv::Reader::from_reader(io::stdin());
for result in rds.records() {
// ?を使うことで可読性が上がる!
let a = result?;
println!("{:?}", a);
}
Ok(())
}
fn read_csv_file() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let file = File::open(file_path)?;
let mut rdr = csv::Reader::from_reader(file);
// ここでヘッダーを読み込みたいとする。
// ① clone()する。
// ただし、メモリにコピーをとる代償が伴う。
// let headers = rdr.headers()?.clone();
{
// lifetimeのために、この呼び出しはそれ所有スコープでネストされている。
// ② スコープをネストさせる。
// 所有権が奪われて、以降のイテレーションができなくなる。
// <なるほど。逆にこういうテクニックがあるということか。
let headers = rdr.headers()?;
println!("{:?}", headers);
}
for result in rdr.records() {
let record = result?;
println!("{:?}", record);
}
Ok(())
}
fn read_csv_file2() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.records() {
let record = result?;
println!("{:?}", record);
}
Ok(())
}
fn get_file_path() -> Result<OsString, Box<dyn Error>> {
match env::args_os().nth(1) {
None => Err(From::from("expected 1 argument, but got none")),
Some(file_path) => Ok(file_path),
}
}
fn read_csv_file3() {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.delimiter(b';')
.double_quote(false)
.flexible(true)
.comment(Some(b'#'))
.from_reader(io::stdin());
// setting可能。<柔軟
}
type Record = (String, String, Option<u64>, f64, f64);
fn read_csv_file4() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record3 = result?;
println!("{:?}", record);
}
Ok(())
}
type Record2 = HashMap<String, String>;
fn read_csv_file5() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record2 = result?;
println!("{:?}", record);
}
Ok(())
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct Record3 {
latitude: f64,
longitude: f64,
// error時に、自動的にNoneにしてくれるオプション
#[serde(deserialize_with = "csv::invalid_option")]
population: Option<f64>,
city: String,
state: String,
}
fn write_csv() -> Result<(), Box<dyn Error>> {
// let mut wtr = csv::Writer::from_writer(io::stdout());
let mut wtr = csv::WriterBuilder::new()
.delimiter(b'\t')
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(io::stdout());
// AsRef<[u8]>境界はString, &str, Vec<u8>のような型がすべて条件を満たすため有用である。
wtr.write_record(&["City", "State", "Population", "Latitude", "Longitude"])?;
wtr.write_record(&["Davidsons Landing", "AK", "", "65.2419444", "-165.2716667"])?;
wtr.write_record(&["Kenai", "AK", "7610", "60.5544444", "-151.2583333"])?;
wtr.write_record(&["Oakman", "AL", "", "33.7133333", "-87.3886111"])?;
wtr.flush()?;
Ok(())
}
// borrowされた&strを、ownedなString型で置き換えるということは、
// レコードを書き込むたびにcityとstate双方の新しいStringをアロケートしなければならないことを意味する。
// これでも書き込みはできるにはできるのだが、メモリとパフォーマンスを少しばかり無駄遣いしている。
#[derive(Debug, Serialize)]
#[serde(rename_all = "PascalCase")]
struct WriteRecord<'a> {
city: &'a str,
state: &'a str,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
fn write_csv2() -> Result<(), Box<dyn Error>> {
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.serialize(WriteRecord {
city: "Davidsons Landing",
state: "AK",
population: None,
latitude: 65.2419444,
longitude: -165.2716667,
})?;
wtr.serialize(WriteRecord {
city: "Kenai",
state: "AK",
population: Some(7610),
latitude: 60.5544444,
longitude: -151.2583333,
})?;
wtr.serialize(WriteRecord {
city: "Oakman",
state: "AL",
population: None,
latitude: 33.7133333,
longitude: -87.3886111,
})?;
wtr.flush()?;
Ok(())
}
fn read_and_write_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args_os().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.headers()?)?;
for result in rdr.records() {
let record = result?;
if record.iter().any(|r| r == &argss) {
wtr.write_record(&record);
}
}
wtr.flush()?;
Ok(())
}
// utf-8に変換できない場合の対処法。
// byteで読み込む!!!
fn read_and_write_byte_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.byte_headers()?)?;
for result in rdr.byte_records() {
let record = result?;
// argss.as_bytes() 戻りが、参照なのね。
if record.iter().any(|r| r == argss.as_bytes()) {
wtr.write_record(&record);
}
}
wtr.flush()?;
Ok(())
}
// 前回の例と違い、デシリアライズとシリアライズ両方をderiveする
// これは型から自動的にデシリアライズとシリアライズを行えるということである
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "PascalCase")]
struct RecordMulti {
city: String,
state: String,
population: Option<u64>,
latitude: f64,
}
fn read_and_write_csv_model() -> Result<(), Box<dyn Error>> {
// クエリとなる固定引数を受け取る
// もし引数が与えられないか整数でない場合はエラーを返す
let minimum_pop: u64 = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(arg) => arg.parse::<u64>()?,
};
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
for result in rdr.deserialize() {
let record: RecordMulti = result?;
if record.population.map_or(false, |f| f >= minimum_pop) {
wtr.serialize(&record)?;
}
}
wtr.flush()?;
Ok(())
}
// ./csv_example < worldcitiespop.csv 2.12s user 0.09s system 70% cpu 3.125 total
fn performance_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.records() {
let record = result?;
if &record[0] == "us" && &record[3] == "MA" {
count += 1;
}
}
Ok(count)
}
// ./csv_example < worldcitiespop.csv 1.69s user 0.05s system 34% | リにアロケーションする。読み込まれるたびに上書きされていくため、高速化する。
let mut record = csv::ByteRecord::new();
let mut count = 0;
while reader.read_byte_record(&mut record)? {
if &record[0] == b"us" && &record[3] == b"MA" {
count += 1;
}
}
Ok(count)
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct RecordPerformance {
country: String,
city: String,
accent_city: String,
region: String,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
// ./csv_example < worldcitiespop.csv 3.66s user 0.11s system 85% cpu 4.396 total
fn performance_read_csv_to_model() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.deserialize() {
let record: RecordPerformance = result?;
if &record.country == "us" && &record.region == "MA" {
count += 1;
}
}
Ok(count)
}
// 生存期間をつけて、さらに参照型のstrに変更する。
//tutorial-perf-serde-02.rs
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct RecordPerfomanceUp<'a> {
city: &'a str,
country: &'a str,
accent_city: &'a str,
region: &'a str,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
// ./csv_example < worldcitiespop.csv 1.14s user 0.04s system 97% cpu 1.216 total
fn performance_up_read_csv_to_model() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut raw_record = csv::StringRecord::new();
let headers = reader.headers()?.clone();
let mut count = 0;
// while reader.read_record(&mut raw_record)? {
// let record: RecordPerfomanceUp = raw_record.deserialize(Some(&headers))?;
// if record.country == "us" && record.region == "MA" {
// count += 1;
// }
// }
for result in reader.deserialize() {
let record: RecordPerformance = result?;
if record.country == "us" && record.region == "MA" {
count += 1;
}
}
Ok(count)
}
| cpu 5.094 total
// String からbyteで処理をするように変更した。
fn performance2_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.byte_records() {
let record = result?;
if &record[0] == b"us" && &record[3] == b"MA" {
count += 1;
}
}
Ok(count)
}
// ./csv_example < worldcitiespop.csv 0.44s user 0.04s system 22% cpu 2.142 total
// reader.record()は、イテレータをどんどん返す(アロケートしながら)
// だから、1回だけにして、アロケーションの回数を減らす。
fn performance3_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
// 一度だけ、メモ | identifier_body |
main.rs | extern crate csv;
extern crate serde;
// This lets us write `#[derive(Deserialize)]`.
#[macro_use]
extern crate serde_derive;
use std::collections::HashMap;
use std::env;
use std::fs::File;
use std::io;
use std::{error::Error, ffi::OsString, process};
// fn main_not_recover() {
// println!("Hello, world!");
// let mut rds = csv::Reader::from_reader(io::stdin());
// for result in rds.records() {
// // expectは、Error時にpanicを発生させるので、バッドプラクティスである。
// let record = result.expect("a csv record");
// println!("{:?}", record);
// }
// }
fn main() {
println!("Hello, world!");
match performance_up_read_csv_to_model() {
Ok(count) => println!("{:?}", count),
Err(err) => {
println!("{}", err);
process::exit(1);
}
}
}
// error 処理の練習
fn main_recorver() {
println!("Hellssdfgsdf");
let mut rds = csv::Reader::from_reader(io::stdin());
for result in rds.records() {
match result {
Ok(r) => println!("{:?}", r),
// こうすることで、回復可能なエラー処理になる。
Err(e) => println!("{:?}", e),
}
}
}
// read and write csv test
fn main_csv() {
println!("Hello, world!");
// if let 文で、Errの場合のみの処理を、{}内に記載できる。<これ便利だ!
if let Err(err) = read_and_write_csv_model() {
println!("{}", err);
process::exit(1);
}
}
fn run_match() -> Result<(), Box<dyn Error>> {
let mut rds = csv::Reader::from_reader(io::stdin());
for result in rds.records() {
match result {
// 先に書いて、returnするんだって。
Err(e) => return Err(From::from(e)),
Ok(r) => println!("{:?}", r),
}
}
Ok(())
}
fn run_question() -> Result<(), Box<dyn Error>> {
let mut rds = csv::Reader::from_reader(io::stdin());
for result in rds.records() {
// ?を使うことで可読性が上がる!
let a = result?;
println!("{:?}", a);
}
Ok(())
}
fn read_csv_file() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let file = File::open(file_path)?;
let mut rdr = csv::Reader::from_reader(file);
// ここでヘッダーを読み込みたいとする。
// ① clone()する。
// ただし、メモリにコピーをとる代償が伴う。
// let headers = rdr.headers()?.clone();
{
// lifetimeのために、この呼び出しはそれ所有スコープでネストされている。
// ② スコープをネストさせる。
// 所有権が奪われて、以降のイテレーションができなくなる。
// <なるほど。逆にこういうテクニックがあるということか。
let headers = rdr.headers()?;
println!("{:?}", headers);
}
for result in rdr.records() {
let record = result?;
println!("{:?}", record);
}
Ok(())
}
fn read_csv_file2() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.records() {
let record = result?;
println!("{:?}", record);
}
Ok(())
}
fn get_file_path() -> Result<OsString, Box<dyn Error>> {
match env::args_os().nth(1) {
None => Err(From::from("expected 1 argument, but got none")),
Some(file_path) => Ok(file_path),
}
}
fn read_csv_file3() {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.delimiter(b';')
.double_quote(false)
.flexible(true)
.comment(Some(b'#'))
.from_reader(io::stdin());
// setting可能。<柔軟
}
type Record = (String, String, Option<u64>, f64, f64);
fn read_csv_file4() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record3 = result?;
println!("{:?}", record);
}
Ok(())
}
type Record2 = HashMap<String, String>;
fn read_csv_file5() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record2 = result?;
println!("{:?}", record);
}
Ok(())
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct Record3 {
latitude: f64,
longitude: f64,
// error時に、自動的にNoneにしてくれるオプション
#[serde(deserialize_with = "csv::invalid_option")]
population: Option<f64>,
city: String,
state: String,
}
fn write_csv() -> Result<(), Box<dyn Error>> {
// let mut wtr = csv::Writer::from_writer(io::stdout());
let mut wtr = csv::WriterBuilder::new()
.delimiter(b'\t')
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(io::stdout());
// AsRef<[u8]>境界はString, &str, Vec<u8>のような型がすべて条件を満たすため有用である。
wtr.write_record(&["City", "State", "Population", "Latitude", "Longitude"])?;
wtr.write_record(&["Davidsons Landing", "AK", "", "65.2419444", "-165.2716667"])?;
wtr.write_record(&["Kenai", "AK", "7610", "60.5544444", "-151.2583333"])?;
wtr.write_record(&["Oakman", "AL", "", "33.7133333", "-87.3886111"])?;
wtr.flush()?;
Ok(())
}
// borrowされた&strを、ownedなString型で置き換えるということは、
// レコードを書き込むたびにcityとstate双方の新しいStringをアロケートしなければならないことを意味する。
// これでも書き込みはできるにはできるのだが、メモリとパフォーマンスを少しばかり無駄遣いしている。
#[derive(Debug, Serialize)]
#[serde(rename_all = "PascalCase")]
struct WriteRecord<'a> {
city: &'a str,
state: &'a str,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
fn write_csv2() -> Result<(), Box<dyn Error>> {
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.serialize(WriteRecord {
city: "Davidsons Landing",
state: "AK",
population: None,
latitude: 65.2419444,
longitude: -165.2716667,
})?;
wtr.serialize(WriteRecord {
city: "Kenai",
state: "AK",
population: Some(7610),
latitude: 60.5544444,
longitude: -151.2583333,
})?;
wtr.serialize(WriteRecord {
city: "Oakman",
state: "AL",
population: None,
latitude: 33.7133333,
longitude: -87.3886111,
})?;
wtr.flush()?;
Ok(())
}
fn read_and_write_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args_os().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.headers()?)?;
for result in rdr.records() {
let record = result?;
if record.iter().any(|r| r == &argss) {
wtr.write_record(&record);
} | }
// utf-8に変換できない場合の対処法。
// byteで読み込む!!!
fn read_and_write_byte_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.byte_headers()?)?;
for result in rdr.byte_records() {
let record = result?;
// argss.as_bytes() 戻りが、参照なのね。
if record.iter().any(|r| r == argss.as_bytes()) {
wtr.write_record(&record);
}
}
wtr.flush()?;
Ok(())
}
// 前回の例と違い、デシリアライズとシリアライズ両方をderiveする
// これは型から自動的にデシリアライズとシリアライズを行えるということである
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "PascalCase")]
struct RecordMulti {
city: String,
state: String,
population: Option<u64>,
latitude: f64,
}
fn read_and_write_csv_model() -> Result<(), Box<dyn Error>> {
// クエリとなる固定引数を受け取る
// もし引数が与えられないか整数でない場合はエラーを返す
let minimum_pop: u64 = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(arg) => arg.parse::<u64>()?,
};
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
for result in rdr.deserialize() {
let record: RecordMulti = result?;
if record.population.map_or(false, |f| f >= minimum_pop) {
wtr.serialize(&record)?;
}
}
wtr.flush()?;
Ok(())
}
// ./csv_example < worldcitiespop.csv 2.12s user 0.09s system 70% cpu 3.125 total
fn performance_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.records() {
let record = result?;
if &record[0] == "us" && &record[3] == "MA" {
count += 1;
}
}
Ok(count)
}
// ./csv_example < worldcitiespop.csv 1.69s user 0.05s system 34% cpu 5.094 total
// String からbyteで処理をするように変更した。
fn performance2_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.byte_records() {
let record = result?;
if &record[0] == b"us" && &record[3] == b"MA" {
count += 1;
}
}
Ok(count)
}
// ./csv_example < worldcitiespop.csv 0.44s user 0.04s system 22% cpu 2.142 total
// reader.record()は、イテレータをどんどん返す(アロケートしながら)
// だから、1回だけにして、アロケーションの回数を減らす。
fn performance3_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
// 一度だけ、メモリにアロケーションする。読み込まれるたびに上書きされていくため、高速化する。
let mut record = csv::ByteRecord::new();
let mut count = 0;
while reader.read_byte_record(&mut record)? {
if &record[0] == b"us" && &record[3] == b"MA" {
count += 1;
}
}
Ok(count)
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct RecordPerformance {
country: String,
city: String,
accent_city: String,
region: String,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
// ./csv_example < worldcitiespop.csv 3.66s user 0.11s system 85% cpu 4.396 total
fn performance_read_csv_to_model() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.deserialize() {
let record: RecordPerformance = result?;
if &record.country == "us" && &record.region == "MA" {
count += 1;
}
}
Ok(count)
}
// 生存期間をつけて、さらに参照型のstrに変更する。
//tutorial-perf-serde-02.rs
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct RecordPerfomanceUp<'a> {
city: &'a str,
country: &'a str,
accent_city: &'a str,
region: &'a str,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
// ./csv_example < worldcitiespop.csv 1.14s user 0.04s system 97% cpu 1.216 total
fn performance_up_read_csv_to_model() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut raw_record = csv::StringRecord::new();
let headers = reader.headers()?.clone();
let mut count = 0;
// while reader.read_record(&mut raw_record)? {
// let record: RecordPerfomanceUp = raw_record.deserialize(Some(&headers))?;
// if record.country == "us" && record.region == "MA" {
// count += 1;
// }
// }
for result in reader.deserialize() {
let record: RecordPerformance = result?;
if record.country == "us" && record.region == "MA" {
count += 1;
}
}
Ok(count)
} | }
wtr.flush()?;
Ok(()) | random_line_split |
main.rs | extern crate csv;
extern crate serde;
// This lets us write `#[derive(Deserialize)]`.
#[macro_use]
extern crate serde_derive;
use std::collections::HashMap;
use std::env;
use std::fs::File;
use std::io;
use std::{error::Error, ffi::OsString, process};
// fn main_not_recover() {
// println!("Hello, world!");
// let mut rds = csv::Reader::from_reader(io::stdin());
// for result in rds.records() {
// // expectは、Error時にpanicを発生させるので、バッドプラクティスである。
// let record = result.expect("a csv record");
// println!("{:?}", record);
// }
// }
fn main() {
println!("Hello, world!");
match performance_up_read_csv_to_model() {
Ok(count) => println!("{:?}", count),
Err(err) => {
println!("{}", err);
process::exit(1);
}
}
}
// error 処理の練習
fn main_recorver() {
println!("Hellssdfgsdf");
let mut rds = csv::Reader::from_reader(io::stdin());
for result in rds.records() {
match result {
Ok(r) => println!("{:?}", r),
// こうすることで、回復可能なエラー処理になる。
Err(e) => println!("{:?}", e),
}
}
}
// read and write csv test
fn main_csv() {
println!("Hello, world!");
// if let 文で、Errの場合のみの処理を、{}内に記載できる。<これ便利だ!
if let Err(err) = read_and_write_csv_model() {
println!("{}", err);
process::exit(1);
}
}
fn run_match() -> Result<(), Box<dyn Error>> {
let mut rds = csv::Reader::from_reader(io::stdin());
for result in rds.records() {
match result {
// 先に書いて、returnするんだって。
Err(e) => return Err(From::from(e)),
Ok(r) => println!("{:?}", r),
}
}
Ok(())
}
fn run_question() -> Result<(), Box<dyn Error>> {
let mut rds = csv::Reader::from_reader(io::stdin());
for result in rds.records() {
// ?を使うことで可読性が上がる!
let a = result?;
println!("{:?}", a);
}
Ok(())
}
fn read_csv_file() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let file = File::open(file_path)?;
let mut rdr = csv::Reader::from_reader(file);
// ここでヘッダーを読み込みたいとする。
// ① clone()する。
// ただし、メモリにコピーをとる代償が伴う。
// let headers = rdr.headers()?.clone();
{
// lifetimeのために、この呼び出しはそれ所有スコープでネストされている。
// ② スコープをネストさせる。
// 所有権が奪われて、以降のイテレーションができなくなる。
// <なるほど。逆にこういうテクニックがあるということか。
let headers = rdr.headers()?;
println!("{:?}", headers);
}
for result in rdr.records() {
let record = result?;
println!("{:?}", record);
}
Ok(())
}
fn read_csv_file2() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.records() {
let record = result?;
println!("{:?}", record);
}
Ok(())
}
fn get_file_path() -> Result<OsString, Box<dyn Error>> {
match env::args_os().nth(1) {
None => Err(From::from("expected 1 argument, but got none")),
Some(file_path) => Ok(file_path),
}
}
fn read_csv_file3() {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.delimiter(b';')
.double_quote(false)
.flexible(true)
.comment(Some(b'#'))
.from_reader(io::stdin());
// setting可能。<柔軟
}
type Record = (String, String, Option<u64>, f64, f64);
fn read_csv_file4() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record3 = result?;
println!("{:?}", record);
}
Ok(())
}
type Record2 = HashMap<String, String>;
fn read_csv_file5() -> Result<(), Box<dyn Error>> {
let file_path = get_file_path()?;
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_path(file_path)?;
for result in rdr.deserialize() {
let record: Record2 = result?;
println!("{:?}", record);
}
Ok(())
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct Record3 {
latitude: f64,
longitude: f64,
// error時に、自動的にNoneにしてくれるオプション
#[serde(deserialize_with = "csv::invalid_option")]
population: Option<f64>,
city: String,
state: String,
}
fn write_csv() -> Result<(), Box<dyn Error>> {
// let mut wtr = csv::Writer::from_writer(io::stdout());
let mut wtr = csv::WriterBuilder::new()
.delimiter(b'\t')
.quote_style(csv::QuoteStyle::NonNumeric)
.from_writer(io::stdout());
// AsRef<[u8]>境界はString, &str, Vec<u8>のような型がすべて条件を満たすため有用である。
wtr.write_record(&["City", "State", "Population", "Latitude", "Longitude"])?;
wtr.write_record(&["Davidsons Landing", "AK", "", "65.2419444", "-165.2716667"])?;
wtr.write_record(&["Kenai", "AK", "7610", "60.5544444", "-151.2583333"])?;
wtr.write_record(&["Oakman", "AL", "", "33.7133333", "-87.3886111"])?;
wtr.flush()?;
Ok(())
}
// borrowされた&strを、ownedなString型で置き換えるということは、
// レコードを書き込むたびにcityとstate双方の新しいStringをアロケートしなければならないことを意味する。
// これでも書き込みはできるにはできるのだが、メモリとパフォーマンスを少しばかり無駄遣いしている。
#[derive(Debug, Serialize)]
#[serde(rename_all = "PascalCase")]
struct WriteRecord<'a> {
city: &'a str,
state: &'a str,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
fn write_csv2() -> Result<(), Box<dyn Error>> {
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.serialize(WriteRecord {
city: "Davidsons Landing",
state: "AK",
population: None,
latitude: 65.2419444,
longitude: -165.2716667,
})?;
wtr.serialize(WriteRecord {
city: "Kenai",
state: "AK",
population: Some(7610),
latitude: 60.5544444,
longitude: -151.2583333,
})?;
wtr.serialize(WriteRecord {
city: "Oakman",
state: "AL",
population: None,
latitude: 33.7133333,
longitude: -87.3886111,
})?;
wtr.flush()?;
Ok(())
}
fn read_and_write_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args_os().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.headers()?)?;
for result in rdr.records() {
let record = result?;
if record.iter().any(|r| r == &argss) {
wtr.write_record(&record);
}
}
wtr.flush()?;
Ok(())
}
// utf-8に変換できない場合の対処法。
// byteで読み込む!!!
fn read_and_write_byte_csv() -> Result<(), Box<dyn Error>> {
let argss = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(argument) => argument,
};
// CSVリーダー(stdin)とCSVライター(stdout)を構築する
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
wtr.write_record(rdr.byte_headers()?)?;
for result in rdr.byte_records() {
let record = result?;
// argss.as_bytes() 戻りが、参照なのね。
if record.iter().any(|r| r == argss.as_bytes()) {
wtr.write_record(&record);
}
}
wtr.flush()?;
Ok(())
}
// 前回の例と違い、デシリアライズとシリアライズ両方をderiveする
// これは型から自動的にデシリアライズとシリアライズを行えるということである
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "PascalCase")]
struct RecordMulti {
city: String,
state: String,
population: Option<u64>,
latitude: f64,
}
fn read_and_write_csv_model() -> Result<(), Box<dyn Error>> {
// クエリとなる固定引数を受け取る
// もし引数が与えられないか整数でない場合はエラーを返す
let minimum_pop: u64 = match env::args().nth(1) {
None => return Err(From::from("expected 1 argument, but got none")),
Some(arg) => arg.parse::<u64>()?,
};
let mut rdr = csv::Reader::from_reader(io::stdin());
let mut wtr = csv::Writer::from_writer(io::stdout());
for result in rdr.deserialize() {
let record: RecordMulti = result?;
if record.population.map_or(false, |f| f >= minimum_pop) {
wtr.serialize(&record)?;
}
}
wtr.flush()?;
Ok(())
}
// ./csv_example < worldcitiespop.csv 2.12s user 0.09s system 70% cpu 3.125 total
fn performance_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.records() {
let record = result?;
if &rec | us" && &record[3] == "MA" {
count += 1;
}
}
Ok(count)
}
// ./csv_example < worldcitiespop.csv 1.69s user 0.05s system 34% cpu 5.094 total
// String からbyteで処理をするように変更した。
fn performance2_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.byte_records() {
let record = result?;
if &record[0] == b"us" && &record[3] == b"MA" {
count += 1;
}
}
Ok(count)
}
// ./csv_example < worldcitiespop.csv 0.44s user 0.04s system 22% cpu 2.142 total
// reader.record()は、イテレータをどんどん返す(アロケートしながら)
// だから、1回だけにして、アロケーションの回数を減らす。
fn performance3_read_csv() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
// 一度だけ、メモリにアロケーションする。読み込まれるたびに上書きされていくため、高速化する。
let mut record = csv::ByteRecord::new();
let mut count = 0;
while reader.read_byte_record(&mut record)? {
if &record[0] == b"us" && &record[3] == b"MA" {
count += 1;
}
}
Ok(count)
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct RecordPerformance {
country: String,
city: String,
accent_city: String,
region: String,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
// ./csv_example < worldcitiespop.csv 3.66s user 0.11s system 85% cpu 4.396 total
fn performance_read_csv_to_model() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut count = 0;
for result in reader.deserialize() {
let record: RecordPerformance = result?;
if &record.country == "us" && &record.region == "MA" {
count += 1;
}
}
Ok(count)
}
// 生存期間をつけて、さらに参照型のstrに変更する。
//tutorial-perf-serde-02.rs
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct RecordPerfomanceUp<'a> {
city: &'a str,
country: &'a str,
accent_city: &'a str,
region: &'a str,
population: Option<u64>,
latitude: f64,
longitude: f64,
}
// ./csv_example < worldcitiespop.csv 1.14s user 0.04s system 97% cpu 1.216 total
fn performance_up_read_csv_to_model() -> Result<u64, Box<dyn Error>> {
let mut reader = csv::Reader::from_reader(io::stdin());
let mut raw_record = csv::StringRecord::new();
let headers = reader.headers()?.clone();
let mut count = 0;
// while reader.read_record(&mut raw_record)? {
// let record: RecordPerfomanceUp = raw_record.deserialize(Some(&headers))?;
// if record.country == "us" && record.region == "MA" {
// count += 1;
// }
// }
for result in reader.deserialize() {
let record: RecordPerformance = result?;
if record.country == "us" && record.region == "MA" {
count += 1;
}
}
Ok(count)
}
| ord[0] == " | identifier_name |
__init__.py | import logging
import pprint
import os
import requests
import collections
import json
from datetime import *
from dateutil.relativedelta import *
from enum import Enum
import string
import random
from requests.auth import HTTPDigestAuth
import urllib.request
import kubernetes
# These two lines enable debugging at httplib level (requests->urllib3->http.client)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which != logged.
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
logger = logging.getLogger(__name__)
class ApiVersion(Enum):
A1 = '/api/atlas/v1.0'
A2 = '/api/atlas/v2'
CM1 = '/api/public/v1.0'
class AtlasEnvironment(Enum):
PRODUCTION = 'https://cloud.mongodb.com'
STAGING = 'https://cloud-qa.mongodb.com'
class Env(object):
PUBLIC_KEY = "MCLI_PUBLIC_API_KEY"
PRIVATE_KEY = "MCLI_PRIVATE_API_KEY"
ORG_ID = "MCLI_ORG_ID"
PROJECT_ID = "MCLI_PROJECT_ID"
@staticmethod
def get(key):
return os.getenv(key)
class AtlasClient(object):
def __init__(self,public_key=Env.get(Env.PUBLIC_KEY)
,private_key=Env.get(Env.PRIVATE_KEY)
,org_id=Env.get(Env.ORG_ID)
,project_id=Env.get(Env.PROJECT_ID)
,base_url="https://cloud.mongodb.com"):
""" Constructor - pass in username/apikey or public/private key pair for
MongoDB Atlas. Override `base_url` for use with an instance of
MongoDB Ops Manager.
"""
self.public_key = public_key
self.private_key = private_key
self.org_id = org_id
self.project_id = project_id
if isinstance(base_url,AtlasEnvironment):
self.base_url = base_url.value
else:
self.base_url = base_url
self.api_root = '{}{}'.format(base_url,ApiVersion.A1.value)
def pyatlas_config(self,verbose=True):
config = {
Env.PUBLIC_KEY : self.public_key,
Env.PRIVATE_KEY : self.private_key,
Env.ORG_ID : self.org_id,
Env.PROJECT_ID : self.project_id
}
print(f'pyatlas_config={config}')
return config
def get_digest(self):
return HTTPDigestAuth(self.public_key,self.private_key)
# API
def organizations(self):
""" Return a list of organzations available to current api user.
"""
return self.get('{}/orgs'.format(ApiVersion.A1.value))
def projects(self):
""" Alias for groups()
"""
return self.groups()
def groups(self):
""" Return list of groups for this public key.
"""
#return self.get('{}/groups'.format(ApiVersion.A1.value))
return self.get('{}/groups'.format(ApiVersion.CM1.value))
def users(self,org_id=None):
""" Return list of users for this organization.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))
## Database Users ##
def database_users(self,project_id=''):
""" GET /api/atlas/v1.0/groups/{GROUP-ID}/databaseUsers
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers')
def create_project(self
,name
,org_id=None):
if org_id is None:
org_id = self.org_id
project_data = { "name" : name }
if org_id:
project_data["orgId"] = org_id
pprint.pprint(project_data)
project = self.post(f'{ApiVersion.CM1.value}/groups'
,body=project_data)
pprint.pprint(project)
print(f'>>>>> {project.keys()}')
self.project_id = project['content']['id']
self.org_id = project['content']['orgId']
return project
def delete_project(self,name):
project = self.project_by_name(project_name=name)
group_id = project['content']['id']
logger.info(f'>>>>>>> delete_project name={name} group_id={group_id}')
response = self.delete(f'{ApiVersion.CM1.value}/groups/{group_id}')
return response
## TODO - need to expose this in the AutomaticKeyMachine api --
def new_project_apikey(self,ip_address=None,project_id=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
if project_id is None:
if self.project_id is None:
raise "No project_id found. Pass or set when creating client"
project_id=self.project_id
apikey = self.create_project_apikey(project_id=project_id)
whitelist = self.add_project_apikey_whitelist(apikey_id=apikey.id
,ip_address=ip_address
,project_id=project_id)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_project_apikey(self
,project_id
,description='pyatlas generated project apikey'
,roles='PROJECT_OWNER'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
| data = [ { 'ipAddress' : ip_address, 'comment' : 'pyatlas generated whitelist entry' } ]
pprint.pprint(data)
target = f'{ApiVersion.A1.value}/groups/{self.project_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_project_apikey_whitelist(self
,apikey_id
,ip_address
,project_id):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def new_org_apikey(self,ip_address):
apikey = self.create_org_apikey()
whitelist = self.add_org_apikey_whitelist(apikey.id
,ip_address)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_org_apikey(self
,description='pyatlas generated org apikey'
,roles='ORG_GROUP_CREATOR'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_org_apikey_whitelist(self
,apikey_id
,ip_address):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def create_database_user(self,db_user={},project_id=''):
""" Create a new db user
"""
project_id = project_id if project_id != '' else self.__project_id
logger.info(f'create_database_user {db_user} {project_id}')
res = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers',body=db_user)
def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):
""" Returns a MongoDB connection string along with
a programat
x1. Need programatic api key to add ip to whitelist
2. Add ip to whitelist
3. Generate DB user x with prog api key
4. get cluster info
5. assemble connection string and return
"""
project_id = project_id if project_id != '' else self.__project_id
if ip_address == '':
headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response
ip = requests.get('http://ifconfig.co', headers)
ip_address = ip.text.rstrip()
logger.info(f'bind: looked up ip address: {ip_address}')
#key = self.create_programatic_apikey(description=description,project_id=project_id)
db_user = { 'username' : 'foo'
,'password' : 'changeme'
,'databaseName' : 'admin'
,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ]
}
user = self.create_database_user(db_user,project_id=project_id)
cluster = self.get_cluster(cluster_name)
cs = cluster['mongoURIWithOptions'].split('/',1)
#conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'
return conn_str
## Cluster APIs ##
def clusters(self,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return list of clusters for this organization.
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))
def get_cluster(self,cluster_name,project_id=''):
""" Return cluster by name for this organization.
"""
print( f'>>>>>>{self.project_id}')
if project_id == '':
project_id = self.project_id
return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))
def cluster_ready(self,cluster_name,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return True if and only if cluster stateName is `IDLE`.
"""
cluster = self.cluster(project_id,cluster_name)
pprint.pprint(cluster)
return cluster['stateName'] == 'IDLE'
def create_cluster(self, provision_details, project_id=""):
""" Create a cluster.
The provision_details should be instanace of the
AtlasCluster CRD
"""
response = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/clusters'
,body=provision_details)
return response
def delete_cluster(self, cluster_name, project_id ="" ):
""" Delete the cluster.
"""
response = self.delete(f'{ApiVersion.A1.value}/groups/{project_id}/clusters/{cluster_name}')
return response
## Fidicuary APIs ##
def invoices(self,org_id=None,invoice_id=''):
""" Return all invoices or a particular invoice.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))
def pending_invoice(self,org_id=None):
""" Return the pending invoice for this organization id.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/pending'.format(ApiVersion.A1.value,org_id))
def invoice_items(self,org_id=None,query={}):
""" Return the line items posted for the
given _date from the appropriate invoice.
"""
if org_id is None:
org_id = self.org_id
query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')
# Given a 'query_end_date' to find the invoice containing the
# line items for that date we need to find the invoice which
# has 'endDate' equal to the end of the month of the `query_end_date`
query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))
target_invoices = []
invoices = self.invoices(org_id)
if self.verbose:
print('Searching invoices org_id={}'.format(org_id))
print('query={} query_end_date={}'.format(query,query_end_date))
print('Result keys: {}'.format( invoices['content'].keys() ))
print('Total result count: {}'.format( invoices['content']['totalCount'] ))
for invoice in invoices['content']['results']:
#pprint.pprint(invoice)
end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')
if self.verbose:
print('invoice({})[\'endDate\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))
if end_date == query_first_next_month:
target_invoices.append(invoice)
if self.verbose:
print('Target invoices: {}'.format(target_invoices))
target_line_items = []
for invoice in target_invoices:
invoice_details = self.invoices(org_id,invoice['id'])
print('invoice_details: {}'.format(invoice_details))
for item in invoice_details['content']['lineItems']:
end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')
if end_date == query_end_date:
target_line_items.append(item)
if self.verbose:
print('target_line_items: {}'.format(target_line_items))
return target_line_items
def summarize_invoice(line_items):
""" Return the sum total for a given list of invoice items.
"""
sku_summary = AtlasClient.summarize_invoice_items_by_sku(line_items)
total = 0
for item in sku_summary:
total += sku_summary[item]['totalPriceCents']
return total
def summarize_invoice_items_by_sku(line_items):
""" Return a dict summary of line items by SKU.
"""
sku_summary = {}
for item in line_items:
if item['sku'] not in sku_summary:
sku_summary[item['sku']]= { 'totalPriceCents' : 0 }
c = sku_summary[ item['sku'] ]['totalPriceCents'] + item['totalPriceCents']
si = { 'totalPriceCents' : c,
'sku' : item['sku'],
'endDate' : item['endDate'] }
sku_summary[ item['sku'] ] = si
return sku_summary
def project_by_name(self,project_name=''):
""" Return MongoDB Atlas Project/Group metadata for given project name.
"""
logger.debug(f'project_by_name project_name={project_name}')
return self.get('{}/groups/byName/{}'.format(ApiVersion.CM1.value,project_name))
def envelope(self,parameters={}):
parameters['envelope'] = True
return parameters
###
### raw HTTP methods
###
def get(self, path, parameters={}):
parameters = self.envelope(parameters)
url = '{}{}'.format(self.base_url,path)
logger.info('AtlasClient get:url={}'.format(url))
response= requests.get(url,params=parameters,auth=self.get_digest())
logger.info(f' response.json()={response.json()}')
response.raise_for_status()
return response.json()
def post(self, path, parameters={}, body={}):
headers = { "Content-Type" : "application/json" }
url = '{}{}'.format(self.base_url,path)
self.pyatlas_config()
print(f'url={url} path={path} base_url={self.base_url}')
pprint.pprint(body)
response = requests.post(url
,auth=self.get_digest()
,params=self.envelope(parameters)
,data=json.dumps(body)
,headers=headers)
pprint.pprint(response.json())
response.raise_for_status()
return response.json()
def delete(self, path, parameters={}):
headers = { "Content-Type" : "application/json" }
url = '{}{}'.format(self.base_url,path)
self.pyatlas_config()
print(f'url={url} path={path} base_url={self.base_url}')
response = requests.delete(url
,auth=self.get_digest()
,params=self.envelope(parameters)
,headers=headers)
pprint.pprint(response.json())
response.raise_for_status()
return response.json() | def add_whitelist_atlas_project(self, ip_address=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip | random_line_split |
__init__.py | import logging
import pprint
import os
import requests
import collections
import json
from datetime import *
from dateutil.relativedelta import *
from enum import Enum
import string
import random
from requests.auth import HTTPDigestAuth
import urllib.request
import kubernetes
# These two lines enable debugging at httplib level (requests->urllib3->http.client)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which != logged.
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
logger = logging.getLogger(__name__)
class ApiVersion(Enum):
A1 = '/api/atlas/v1.0'
A2 = '/api/atlas/v2'
CM1 = '/api/public/v1.0'
class AtlasEnvironment(Enum):
PRODUCTION = 'https://cloud.mongodb.com'
STAGING = 'https://cloud-qa.mongodb.com'
class Env(object):
PUBLIC_KEY = "MCLI_PUBLIC_API_KEY"
PRIVATE_KEY = "MCLI_PRIVATE_API_KEY"
ORG_ID = "MCLI_ORG_ID"
PROJECT_ID = "MCLI_PROJECT_ID"
@staticmethod
def get(key):
return os.getenv(key)
class AtlasClient(object):
def __init__(self,public_key=Env.get(Env.PUBLIC_KEY)
,private_key=Env.get(Env.PRIVATE_KEY)
,org_id=Env.get(Env.ORG_ID)
,project_id=Env.get(Env.PROJECT_ID)
,base_url="https://cloud.mongodb.com"):
""" Constructor - pass in username/apikey or public/private key pair for
MongoDB Atlas. Override `base_url` for use with an instance of
MongoDB Ops Manager.
"""
self.public_key = public_key
self.private_key = private_key
self.org_id = org_id
self.project_id = project_id
if isinstance(base_url,AtlasEnvironment):
self.base_url = base_url.value
else:
self.base_url = base_url
self.api_root = '{}{}'.format(base_url,ApiVersion.A1.value)
def pyatlas_config(self,verbose=True):
|
def get_digest(self):
return HTTPDigestAuth(self.public_key,self.private_key)
# API
def organizations(self):
""" Return a list of organzations available to current api user.
"""
return self.get('{}/orgs'.format(ApiVersion.A1.value))
def projects(self):
""" Alias for groups()
"""
return self.groups()
def groups(self):
""" Return list of groups for this public key.
"""
#return self.get('{}/groups'.format(ApiVersion.A1.value))
return self.get('{}/groups'.format(ApiVersion.CM1.value))
def users(self,org_id=None):
""" Return list of users for this organization.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))
## Database Users ##
def database_users(self,project_id=''):
""" GET /api/atlas/v1.0/groups/{GROUP-ID}/databaseUsers
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers')
def create_project(self
,name
,org_id=None):
if org_id is None:
org_id = self.org_id
project_data = { "name" : name }
if org_id:
project_data["orgId"] = org_id
pprint.pprint(project_data)
project = self.post(f'{ApiVersion.CM1.value}/groups'
,body=project_data)
pprint.pprint(project)
print(f'>>>>> {project.keys()}')
self.project_id = project['content']['id']
self.org_id = project['content']['orgId']
return project
def delete_project(self,name):
project = self.project_by_name(project_name=name)
group_id = project['content']['id']
logger.info(f'>>>>>>> delete_project name={name} group_id={group_id}')
response = self.delete(f'{ApiVersion.CM1.value}/groups/{group_id}')
return response
## TODO - need to expose this in the AutomaticKeyMachine api --
def new_project_apikey(self,ip_address=None,project_id=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
if project_id is None:
if self.project_id is None:
raise "No project_id found. Pass or set when creating client"
project_id=self.project_id
apikey = self.create_project_apikey(project_id=project_id)
whitelist = self.add_project_apikey_whitelist(apikey_id=apikey.id
,ip_address=ip_address
,project_id=project_id)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_project_apikey(self
,project_id
,description='pyatlas generated project apikey'
,roles='PROJECT_OWNER'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_whitelist_atlas_project(self, ip_address=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
data = [ { 'ipAddress' : ip_address, 'comment' : 'pyatlas generated whitelist entry' } ]
pprint.pprint(data)
target = f'{ApiVersion.A1.value}/groups/{self.project_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_project_apikey_whitelist(self
,apikey_id
,ip_address
,project_id):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def new_org_apikey(self,ip_address):
apikey = self.create_org_apikey()
whitelist = self.add_org_apikey_whitelist(apikey.id
,ip_address)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_org_apikey(self
,description='pyatlas generated org apikey'
,roles='ORG_GROUP_CREATOR'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_org_apikey_whitelist(self
,apikey_id
,ip_address):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def create_database_user(self,db_user={},project_id=''):
""" Create a new db user
"""
project_id = project_id if project_id != '' else self.__project_id
logger.info(f'create_database_user {db_user} {project_id}')
res = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers',body=db_user)
def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):
""" Returns a MongoDB connection string along with
a programat
x1. Need programatic api key to add ip to whitelist
2. Add ip to whitelist
3. Generate DB user x with prog api key
4. get cluster info
5. assemble connection string and return
"""
project_id = project_id if project_id != '' else self.__project_id
if ip_address == '':
headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response
ip = requests.get('http://ifconfig.co', headers)
ip_address = ip.text.rstrip()
logger.info(f'bind: looked up ip address: {ip_address}')
#key = self.create_programatic_apikey(description=description,project_id=project_id)
db_user = { 'username' : 'foo'
,'password' : 'changeme'
,'databaseName' : 'admin'
,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ]
}
user = self.create_database_user(db_user,project_id=project_id)
cluster = self.get_cluster(cluster_name)
cs = cluster['mongoURIWithOptions'].split('/',1)
#conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'
return conn_str
## Cluster APIs ##
def clusters(self,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return list of clusters for this organization.
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))
def get_cluster(self,cluster_name,project_id=''):
""" Return cluster by name for this organization.
"""
print( f'>>>>>>{self.project_id}')
if project_id == '':
project_id = self.project_id
return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))
def cluster_ready(self,cluster_name,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return True if and only if cluster stateName is `IDLE`.
"""
cluster = self.cluster(project_id,cluster_name)
pprint.pprint(cluster)
return cluster['stateName'] == 'IDLE'
def create_cluster(self, provision_details, project_id=""):
""" Create a cluster.
The provision_details should be instanace of the
AtlasCluster CRD
"""
response = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/clusters'
,body=provision_details)
return response
def delete_cluster(self, cluster_name, project_id ="" ):
""" Delete the cluster.
"""
response = self.delete(f'{ApiVersion.A1.value}/groups/{project_id}/clusters/{cluster_name}')
return response
## Fidicuary APIs ##
def invoices(self,org_id=None,invoice_id=''):
""" Return all invoices or a particular invoice.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))
def pending_invoice(self,org_id=None):
""" Return the pending invoice for this organization id.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/pending'.format(ApiVersion.A1.value,org_id))
def invoice_items(self,org_id=None,query={}):
""" Return the line items posted for the
given _date from the appropriate invoice.
"""
if org_id is None:
org_id = self.org_id
query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')
# Given a 'query_end_date' to find the invoice containing the
# line items for that date we need to find the invoice which
# has 'endDate' equal to the end of the month of the `query_end_date`
query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))
target_invoices = []
invoices = self.invoices(org_id)
if self.verbose:
print('Searching invoices org_id={}'.format(org_id))
print('query={} query_end_date={}'.format(query,query_end_date))
print('Result keys: {}'.format( invoices['content'].keys() ))
print('Total result count: {}'.format( invoices['content']['totalCount'] ))
for invoice in invoices['content']['results']:
#pprint.pprint(invoice)
end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')
if self.verbose:
print('invoice({})[\'endDate\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))
if end_date == query_first_next_month:
target_invoices.append(invoice)
if self.verbose:
print('Target invoices: {}'.format(target_invoices))
target_line_items = []
for invoice in target_invoices:
invoice_details = self.invoices(org_id,invoice['id'])
print('invoice_details: {}'.format(invoice_details))
for item in invoice_details['content']['lineItems']:
end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')
if end_date == query_end_date:
target_line_items.append(item)
if self.verbose:
print('target_line_items: {}'.format(target_line_items))
return target_line_items
def summarize_invoice(line_items):
""" Return the sum total for a given list of invoice items.
"""
sku_summary = AtlasClient.summarize_invoice_items_by_sku(line_items)
total = 0
for item in sku_summary:
total += sku_summary[item]['totalPriceCents']
return total
def summarize_invoice_items_by_sku(line_items):
""" Return a dict summary of line items by SKU.
"""
sku_summary = {}
for item in line_items:
if item['sku'] not in sku_summary:
sku_summary[item['sku']]= { 'totalPriceCents' : 0 }
c = sku_summary[ item['sku'] ]['totalPriceCents'] + item['totalPriceCents']
si = { 'totalPriceCents' : c,
'sku' : item['sku'],
'endDate' : item['endDate'] }
sku_summary[ item['sku'] ] = si
return sku_summary
def project_by_name(self,project_name=''):
""" Return MongoDB Atlas Project/Group metadata for given project name.
"""
logger.debug(f'project_by_name project_name={project_name}')
return self.get('{}/groups/byName/{}'.format(ApiVersion.CM1.value,project_name))
def envelope(self,parameters={}):
parameters['envelope'] = True
return parameters
###
### raw HTTP methods
###
def get(self, path, parameters={}):
parameters = self.envelope(parameters)
url = '{}{}'.format(self.base_url,path)
logger.info('AtlasClient get:url={}'.format(url))
response= requests.get(url,params=parameters,auth=self.get_digest())
logger.info(f' response.json()={response.json()}')
response.raise_for_status()
return response.json()
def post(self, path, parameters={}, body={}):
headers = { "Content-Type" : "application/json" }
url = '{}{}'.format(self.base_url,path)
self.pyatlas_config()
print(f'url={url} path={path} base_url={self.base_url}')
pprint.pprint(body)
response = requests.post(url
,auth=self.get_digest()
,params=self.envelope(parameters)
,data=json.dumps(body)
,headers=headers)
pprint.pprint(response.json())
response.raise_for_status()
return response.json()
def delete(self, path, parameters={}):
headers = { "Content-Type" : "application/json" }
url = '{}{}'.format(self.base_url,path)
self.pyatlas_config()
print(f'url={url} path={path} base_url={self.base_url}')
response = requests.delete(url
,auth=self.get_digest()
,params=self.envelope(parameters)
,headers=headers)
pprint.pprint(response.json())
response.raise_for_status()
return response.json()
| config = {
Env.PUBLIC_KEY : self.public_key,
Env.PRIVATE_KEY : self.private_key,
Env.ORG_ID : self.org_id,
Env.PROJECT_ID : self.project_id
}
print(f'pyatlas_config={config}')
return config | identifier_body |
__init__.py | import logging
import pprint
import os
import requests
import collections
import json
from datetime import *
from dateutil.relativedelta import *
from enum import Enum
import string
import random
from requests.auth import HTTPDigestAuth
import urllib.request
import kubernetes
# These two lines enable debugging at httplib level (requests->urllib3->http.client)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which != logged.
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
logger = logging.getLogger(__name__)
class ApiVersion(Enum):
A1 = '/api/atlas/v1.0'
A2 = '/api/atlas/v2'
CM1 = '/api/public/v1.0'
class AtlasEnvironment(Enum):
PRODUCTION = 'https://cloud.mongodb.com'
STAGING = 'https://cloud-qa.mongodb.com'
class Env(object):
PUBLIC_KEY = "MCLI_PUBLIC_API_KEY"
PRIVATE_KEY = "MCLI_PRIVATE_API_KEY"
ORG_ID = "MCLI_ORG_ID"
PROJECT_ID = "MCLI_PROJECT_ID"
@staticmethod
def get(key):
return os.getenv(key)
class AtlasClient(object):
def __init__(self,public_key=Env.get(Env.PUBLIC_KEY)
,private_key=Env.get(Env.PRIVATE_KEY)
,org_id=Env.get(Env.ORG_ID)
,project_id=Env.get(Env.PROJECT_ID)
,base_url="https://cloud.mongodb.com"):
""" Constructor - pass in username/apikey or public/private key pair for
MongoDB Atlas. Override `base_url` for use with an instance of
MongoDB Ops Manager.
"""
self.public_key = public_key
self.private_key = private_key
self.org_id = org_id
self.project_id = project_id
if isinstance(base_url,AtlasEnvironment):
self.base_url = base_url.value
else:
self.base_url = base_url
self.api_root = '{}{}'.format(base_url,ApiVersion.A1.value)
def pyatlas_config(self,verbose=True):
config = {
Env.PUBLIC_KEY : self.public_key,
Env.PRIVATE_KEY : self.private_key,
Env.ORG_ID : self.org_id,
Env.PROJECT_ID : self.project_id
}
print(f'pyatlas_config={config}')
return config
def get_digest(self):
return HTTPDigestAuth(self.public_key,self.private_key)
# API
def organizations(self):
""" Return a list of organzations available to current api user.
"""
return self.get('{}/orgs'.format(ApiVersion.A1.value))
def projects(self):
""" Alias for groups()
"""
return self.groups()
def groups(self):
""" Return list of groups for this public key.
"""
#return self.get('{}/groups'.format(ApiVersion.A1.value))
return self.get('{}/groups'.format(ApiVersion.CM1.value))
def users(self,org_id=None):
""" Return list of users for this organization.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))
## Database Users ##
def database_users(self,project_id=''):
""" GET /api/atlas/v1.0/groups/{GROUP-ID}/databaseUsers
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers')
def create_project(self
,name
,org_id=None):
if org_id is None:
org_id = self.org_id
project_data = { "name" : name }
if org_id:
project_data["orgId"] = org_id
pprint.pprint(project_data)
project = self.post(f'{ApiVersion.CM1.value}/groups'
,body=project_data)
pprint.pprint(project)
print(f'>>>>> {project.keys()}')
self.project_id = project['content']['id']
self.org_id = project['content']['orgId']
return project
def delete_project(self,name):
project = self.project_by_name(project_name=name)
group_id = project['content']['id']
logger.info(f'>>>>>>> delete_project name={name} group_id={group_id}')
response = self.delete(f'{ApiVersion.CM1.value}/groups/{group_id}')
return response
## TODO - need to expose this in the AutomaticKeyMachine api --
def new_project_apikey(self,ip_address=None,project_id=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
if project_id is None:
if self.project_id is None:
raise "No project_id found. Pass or set when creating client"
project_id=self.project_id
apikey = self.create_project_apikey(project_id=project_id)
whitelist = self.add_project_apikey_whitelist(apikey_id=apikey.id
,ip_address=ip_address
,project_id=project_id)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_project_apikey(self
,project_id
,description='pyatlas generated project apikey'
,roles='PROJECT_OWNER'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_whitelist_atlas_project(self, ip_address=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
data = [ { 'ipAddress' : ip_address, 'comment' : 'pyatlas generated whitelist entry' } ]
pprint.pprint(data)
target = f'{ApiVersion.A1.value}/groups/{self.project_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_project_apikey_whitelist(self
,apikey_id
,ip_address
,project_id):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def new_org_apikey(self,ip_address):
apikey = self.create_org_apikey()
whitelist = self.add_org_apikey_whitelist(apikey.id
,ip_address)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_org_apikey(self
,description='pyatlas generated org apikey'
,roles='ORG_GROUP_CREATOR'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_org_apikey_whitelist(self
,apikey_id
,ip_address):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def create_database_user(self,db_user={},project_id=''):
""" Create a new db user
"""
project_id = project_id if project_id != '' else self.__project_id
logger.info(f'create_database_user {db_user} {project_id}')
res = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers',body=db_user)
def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):
""" Returns a MongoDB connection string along with
a programat
x1. Need programatic api key to add ip to whitelist
2. Add ip to whitelist
3. Generate DB user x with prog api key
4. get cluster info
5. assemble connection string and return
"""
project_id = project_id if project_id != '' else self.__project_id
if ip_address == '':
headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response
ip = requests.get('http://ifconfig.co', headers)
ip_address = ip.text.rstrip()
logger.info(f'bind: looked up ip address: {ip_address}')
#key = self.create_programatic_apikey(description=description,project_id=project_id)
db_user = { 'username' : 'foo'
,'password' : 'changeme'
,'databaseName' : 'admin'
,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ]
}
user = self.create_database_user(db_user,project_id=project_id)
cluster = self.get_cluster(cluster_name)
cs = cluster['mongoURIWithOptions'].split('/',1)
#conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'
return conn_str
## Cluster APIs ##
def clusters(self,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return list of clusters for this organization.
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))
def get_cluster(self,cluster_name,project_id=''):
""" Return cluster by name for this organization.
"""
print( f'>>>>>>{self.project_id}')
if project_id == '':
project_id = self.project_id
return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))
def cluster_ready(self,cluster_name,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return True if and only if cluster stateName is `IDLE`.
"""
cluster = self.cluster(project_id,cluster_name)
pprint.pprint(cluster)
return cluster['stateName'] == 'IDLE'
def create_cluster(self, provision_details, project_id=""):
""" Create a cluster.
The provision_details should be instanace of the
AtlasCluster CRD
"""
response = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/clusters'
,body=provision_details)
return response
def delete_cluster(self, cluster_name, project_id ="" ):
""" Delete the cluster.
"""
response = self.delete(f'{ApiVersion.A1.value}/groups/{project_id}/clusters/{cluster_name}')
return response
## Fidicuary APIs ##
def invoices(self,org_id=None,invoice_id=''):
""" Return all invoices or a particular invoice.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))
def | (self,org_id=None):
""" Return the pending invoice for this organization id.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/pending'.format(ApiVersion.A1.value,org_id))
def invoice_items(self,org_id=None,query={}):
""" Return the line items posted for the
given _date from the appropriate invoice.
"""
if org_id is None:
org_id = self.org_id
query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')
# Given a 'query_end_date' to find the invoice containing the
# line items for that date we need to find the invoice which
# has 'endDate' equal to the end of the month of the `query_end_date`
query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))
target_invoices = []
invoices = self.invoices(org_id)
if self.verbose:
print('Searching invoices org_id={}'.format(org_id))
print('query={} query_end_date={}'.format(query,query_end_date))
print('Result keys: {}'.format( invoices['content'].keys() ))
print('Total result count: {}'.format( invoices['content']['totalCount'] ))
for invoice in invoices['content']['results']:
#pprint.pprint(invoice)
end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')
if self.verbose:
print('invoice({})[\'endDate\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))
if end_date == query_first_next_month:
target_invoices.append(invoice)
if self.verbose:
print('Target invoices: {}'.format(target_invoices))
target_line_items = []
for invoice in target_invoices:
invoice_details = self.invoices(org_id,invoice['id'])
print('invoice_details: {}'.format(invoice_details))
for item in invoice_details['content']['lineItems']:
end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')
if end_date == query_end_date:
target_line_items.append(item)
if self.verbose:
print('target_line_items: {}'.format(target_line_items))
return target_line_items
def summarize_invoice(line_items):
""" Return the sum total for a given list of invoice items.
"""
sku_summary = AtlasClient.summarize_invoice_items_by_sku(line_items)
total = 0
for item in sku_summary:
total += sku_summary[item]['totalPriceCents']
return total
def summarize_invoice_items_by_sku(line_items):
""" Return a dict summary of line items by SKU.
"""
sku_summary = {}
for item in line_items:
if item['sku'] not in sku_summary:
sku_summary[item['sku']]= { 'totalPriceCents' : 0 }
c = sku_summary[ item['sku'] ]['totalPriceCents'] + item['totalPriceCents']
si = { 'totalPriceCents' : c,
'sku' : item['sku'],
'endDate' : item['endDate'] }
sku_summary[ item['sku'] ] = si
return sku_summary
def project_by_name(self,project_name=''):
""" Return MongoDB Atlas Project/Group metadata for given project name.
"""
logger.debug(f'project_by_name project_name={project_name}')
return self.get('{}/groups/byName/{}'.format(ApiVersion.CM1.value,project_name))
def envelope(self,parameters={}):
parameters['envelope'] = True
return parameters
###
### raw HTTP methods
###
def get(self, path, parameters={}):
parameters = self.envelope(parameters)
url = '{}{}'.format(self.base_url,path)
logger.info('AtlasClient get:url={}'.format(url))
response= requests.get(url,params=parameters,auth=self.get_digest())
logger.info(f' response.json()={response.json()}')
response.raise_for_status()
return response.json()
def post(self, path, parameters={}, body={}):
headers = { "Content-Type" : "application/json" }
url = '{}{}'.format(self.base_url,path)
self.pyatlas_config()
print(f'url={url} path={path} base_url={self.base_url}')
pprint.pprint(body)
response = requests.post(url
,auth=self.get_digest()
,params=self.envelope(parameters)
,data=json.dumps(body)
,headers=headers)
pprint.pprint(response.json())
response.raise_for_status()
return response.json()
def delete(self, path, parameters={}):
headers = { "Content-Type" : "application/json" }
url = '{}{}'.format(self.base_url,path)
self.pyatlas_config()
print(f'url={url} path={path} base_url={self.base_url}')
response = requests.delete(url
,auth=self.get_digest()
,params=self.envelope(parameters)
,headers=headers)
pprint.pprint(response.json())
response.raise_for_status()
return response.json()
| pending_invoice | identifier_name |
__init__.py | import logging
import pprint
import os
import requests
import collections
import json
from datetime import *
from dateutil.relativedelta import *
from enum import Enum
import string
import random
from requests.auth import HTTPDigestAuth
import urllib.request
import kubernetes
# These two lines enable debugging at httplib level (requests->urllib3->http.client)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which != logged.
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
logger = logging.getLogger(__name__)
class ApiVersion(Enum):
A1 = '/api/atlas/v1.0'
A2 = '/api/atlas/v2'
CM1 = '/api/public/v1.0'
class AtlasEnvironment(Enum):
PRODUCTION = 'https://cloud.mongodb.com'
STAGING = 'https://cloud-qa.mongodb.com'
class Env(object):
PUBLIC_KEY = "MCLI_PUBLIC_API_KEY"
PRIVATE_KEY = "MCLI_PRIVATE_API_KEY"
ORG_ID = "MCLI_ORG_ID"
PROJECT_ID = "MCLI_PROJECT_ID"
@staticmethod
def get(key):
return os.getenv(key)
class AtlasClient(object):
def __init__(self,public_key=Env.get(Env.PUBLIC_KEY)
,private_key=Env.get(Env.PRIVATE_KEY)
,org_id=Env.get(Env.ORG_ID)
,project_id=Env.get(Env.PROJECT_ID)
,base_url="https://cloud.mongodb.com"):
""" Constructor - pass in username/apikey or public/private key pair for
MongoDB Atlas. Override `base_url` for use with an instance of
MongoDB Ops Manager.
"""
self.public_key = public_key
self.private_key = private_key
self.org_id = org_id
self.project_id = project_id
if isinstance(base_url,AtlasEnvironment):
self.base_url = base_url.value
else:
self.base_url = base_url
self.api_root = '{}{}'.format(base_url,ApiVersion.A1.value)
def pyatlas_config(self,verbose=True):
config = {
Env.PUBLIC_KEY : self.public_key,
Env.PRIVATE_KEY : self.private_key,
Env.ORG_ID : self.org_id,
Env.PROJECT_ID : self.project_id
}
print(f'pyatlas_config={config}')
return config
def get_digest(self):
return HTTPDigestAuth(self.public_key,self.private_key)
# API
def organizations(self):
""" Return a list of organzations available to current api user.
"""
return self.get('{}/orgs'.format(ApiVersion.A1.value))
def projects(self):
""" Alias for groups()
"""
return self.groups()
def groups(self):
""" Return list of groups for this public key.
"""
#return self.get('{}/groups'.format(ApiVersion.A1.value))
return self.get('{}/groups'.format(ApiVersion.CM1.value))
def users(self,org_id=None):
""" Return list of users for this organization.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))
## Database Users ##
def database_users(self,project_id=''):
""" GET /api/atlas/v1.0/groups/{GROUP-ID}/databaseUsers
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers')
def create_project(self
,name
,org_id=None):
if org_id is None:
org_id = self.org_id
project_data = { "name" : name }
if org_id:
project_data["orgId"] = org_id
pprint.pprint(project_data)
project = self.post(f'{ApiVersion.CM1.value}/groups'
,body=project_data)
pprint.pprint(project)
print(f'>>>>> {project.keys()}')
self.project_id = project['content']['id']
self.org_id = project['content']['orgId']
return project
def delete_project(self,name):
project = self.project_by_name(project_name=name)
group_id = project['content']['id']
logger.info(f'>>>>>>> delete_project name={name} group_id={group_id}')
response = self.delete(f'{ApiVersion.CM1.value}/groups/{group_id}')
return response
## TODO - need to expose this in the AutomaticKeyMachine api --
def new_project_apikey(self,ip_address=None,project_id=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
if project_id is None:
if self.project_id is None:
raise "No project_id found. Pass or set when creating client"
project_id=self.project_id
apikey = self.create_project_apikey(project_id=project_id)
whitelist = self.add_project_apikey_whitelist(apikey_id=apikey.id
,ip_address=ip_address
,project_id=project_id)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_project_apikey(self
,project_id
,description='pyatlas generated project apikey'
,roles='PROJECT_OWNER'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_whitelist_atlas_project(self, ip_address=None):
if ip_address is None:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
print(f'external_ip={external_ip}')
ip_address=external_ip
data = [ { 'ipAddress' : ip_address, 'comment' : 'pyatlas generated whitelist entry' } ]
pprint.pprint(data)
target = f'{ApiVersion.A1.value}/groups/{self.project_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_project_apikey_whitelist(self
,apikey_id
,ip_address
,project_id):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/groups/{project_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def new_org_apikey(self,ip_address):
apikey = self.create_org_apikey()
whitelist = self.add_org_apikey_whitelist(apikey.id
,ip_address)
return { 'apikey' : apikey, 'whitelist' : whitelist }
def create_org_apikey(self
,description='pyatlas generated org apikey'
,roles='ORG_GROUP_CREATOR'):
""" Create a new programatic apikey in <project_name>
with the given or default (GROUP_OWNER)
permissions.
"""
print('pyatlas - create_apikey')
roles = roles.split(',')
pprint.pprint(roles)
data = { 'desc' : description, 'roles' : roles }
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def add_org_apikey_whitelist(self
,apikey_id
,ip_address):
""" Add ip address to whitelist for a given
programatic apikey in <project_name>.
"""
print('pyatlas - add_whitelist')
data = [ { "ipAddress" : f"\"{ip_address}\"" } ]
pprint.pprint(data)
target = f'{ApiVersion.CM1.value}/orgs/{self.org_id}/apiKeys/{apikey_id}/whitelist'
print( f'target={target}' )
print( f'data={data}' )
response = self.post(target, body=data)
return response
def create_database_user(self,db_user={},project_id=''):
""" Create a new db user
"""
project_id = project_id if project_id != '' else self.__project_id
logger.info(f'create_database_user {db_user} {project_id}')
res = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/databaseUsers',body=db_user)
def bind(self,cluster_name,ip_address='',bind_details={},project_id=''):
""" Returns a MongoDB connection string along with
a programat
x1. Need programatic api key to add ip to whitelist
2. Add ip to whitelist
3. Generate DB user x with prog api key
4. get cluster info
5. assemble connection string and return
"""
project_id = project_id if project_id != '' else self.__project_id
if ip_address == '':
headers = { 'User-Agent': 'curl/7.61.0'} # spoof for simple response
ip = requests.get('http://ifconfig.co', headers)
ip_address = ip.text.rstrip()
logger.info(f'bind: looked up ip address: {ip_address}')
#key = self.create_programatic_apikey(description=description,project_id=project_id)
db_user = { 'username' : 'foo'
,'password' : 'changeme'
,'databaseName' : 'admin'
,'roles' : [ {'databaseName' : 'admin', 'roleName' : 'dbAdminAnyDatabase'} ]
}
user = self.create_database_user(db_user,project_id=project_id)
cluster = self.get_cluster(cluster_name)
cs = cluster['mongoURIWithOptions'].split('/',1)
#conn_str = f'{cs[0]//{key['publicKey']}:{key['privateKey']}@{cs[1]}'
return conn_str
## Cluster APIs ##
def clusters(self,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return list of clusters for this organization.
"""
project_id = project_id if project_id != '' else self.__project_id
return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))
def get_cluster(self,cluster_name,project_id=''):
""" Return cluster by name for this organization.
"""
print( f'>>>>>>{self.project_id}')
if project_id == '':
project_id = self.project_id
return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))
def cluster_ready(self,cluster_name,project_id=os.environ.get("ATLAS_PROJECT")):
""" Return True if and only if cluster stateName is `IDLE`.
"""
cluster = self.cluster(project_id,cluster_name)
pprint.pprint(cluster)
return cluster['stateName'] == 'IDLE'
def create_cluster(self, provision_details, project_id=""):
""" Create a cluster.
The provision_details should be instanace of the
AtlasCluster CRD
"""
response = self.post(f'{ApiVersion.A1.value}/groups/{project_id}/clusters'
,body=provision_details)
return response
def delete_cluster(self, cluster_name, project_id ="" ):
""" Delete the cluster.
"""
response = self.delete(f'{ApiVersion.A1.value}/groups/{project_id}/clusters/{cluster_name}')
return response
## Fidicuary APIs ##
def invoices(self,org_id=None,invoice_id=''):
""" Return all invoices or a particular invoice.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))
def pending_invoice(self,org_id=None):
""" Return the pending invoice for this organization id.
"""
if org_id is None:
org_id = self.org_id
return self.get('{}/orgs/{}/invoices/pending'.format(ApiVersion.A1.value,org_id))
def invoice_items(self,org_id=None,query={}):
""" Return the line items posted for the
given _date from the appropriate invoice.
"""
if org_id is None:
org_id = self.org_id
query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')
# Given a 'query_end_date' to find the invoice containing the
# line items for that date we need to find the invoice which
# has 'endDate' equal to the end of the month of the `query_end_date`
query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))
target_invoices = []
invoices = self.invoices(org_id)
if self.verbose:
print('Searching invoices org_id={}'.format(org_id))
print('query={} query_end_date={}'.format(query,query_end_date))
print('Result keys: {}'.format( invoices['content'].keys() ))
print('Total result count: {}'.format( invoices['content']['totalCount'] ))
for invoice in invoices['content']['results']:
#pprint.pprint(invoice)
end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')
if self.verbose:
print('invoice({})[\'endDate\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))
if end_date == query_first_next_month:
target_invoices.append(invoice)
if self.verbose:
print('Target invoices: {}'.format(target_invoices))
target_line_items = []
for invoice in target_invoices:
invoice_details = self.invoices(org_id,invoice['id'])
print('invoice_details: {}'.format(invoice_details))
for item in invoice_details['content']['lineItems']:
end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')
if end_date == query_end_date:
target_line_items.append(item)
if self.verbose:
print('target_line_items: {}'.format(target_line_items))
return target_line_items
def summarize_invoice(line_items):
""" Return the sum total for a given list of invoice items.
"""
sku_summary = AtlasClient.summarize_invoice_items_by_sku(line_items)
total = 0
for item in sku_summary:
total += sku_summary[item]['totalPriceCents']
return total
def summarize_invoice_items_by_sku(line_items):
""" Return a dict summary of line items by SKU.
"""
sku_summary = {}
for item in line_items:
if item['sku'] not in sku_summary:
|
c = sku_summary[ item['sku'] ]['totalPriceCents'] + item['totalPriceCents']
si = { 'totalPriceCents' : c,
'sku' : item['sku'],
'endDate' : item['endDate'] }
sku_summary[ item['sku'] ] = si
return sku_summary
def project_by_name(self,project_name=''):
""" Return MongoDB Atlas Project/Group metadata for given project name.
"""
logger.debug(f'project_by_name project_name={project_name}')
return self.get('{}/groups/byName/{}'.format(ApiVersion.CM1.value,project_name))
def envelope(self,parameters={}):
parameters['envelope'] = True
return parameters
###
### raw HTTP methods
###
def get(self, path, parameters={}):
parameters = self.envelope(parameters)
url = '{}{}'.format(self.base_url,path)
logger.info('AtlasClient get:url={}'.format(url))
response= requests.get(url,params=parameters,auth=self.get_digest())
logger.info(f' response.json()={response.json()}')
response.raise_for_status()
return response.json()
def post(self, path, parameters={}, body={}):
headers = { "Content-Type" : "application/json" }
url = '{}{}'.format(self.base_url,path)
self.pyatlas_config()
print(f'url={url} path={path} base_url={self.base_url}')
pprint.pprint(body)
response = requests.post(url
,auth=self.get_digest()
,params=self.envelope(parameters)
,data=json.dumps(body)
,headers=headers)
pprint.pprint(response.json())
response.raise_for_status()
return response.json()
def delete(self, path, parameters={}):
headers = { "Content-Type" : "application/json" }
url = '{}{}'.format(self.base_url,path)
self.pyatlas_config()
print(f'url={url} path={path} base_url={self.base_url}')
response = requests.delete(url
,auth=self.get_digest()
,params=self.envelope(parameters)
,headers=headers)
pprint.pprint(response.json())
response.raise_for_status()
return response.json()
| sku_summary[item['sku']]= { 'totalPriceCents' : 0 } | conditional_block |
planning.rs | use cargo::CargoError;
use cargo::core::Dependency;
use cargo::core::Package as CargoPackage;
use cargo::core::PackageId;
use cargo::core::PackageSet;
use cargo::core::Resolve;
use cargo::core::SourceId;
use cargo::core::Workspace;
use cargo::core::dependency::Kind;
use cargo::ops::Packages;
use cargo::ops;
use cargo::util::CargoResult;
use cargo::util::Cfg;
use cargo::util::Config;
use cargo::util::ToUrl;
use context::BuildDependency;
use context::BuildTarget;
use context::CrateContext;
use context::WorkspaceContext;
use settings::RazeSettings;
use settings::GenMode;
use std::collections::HashSet;
use std::env;
use std::fs;
use std::ops::Deref;
use std::path::Path;
use std::str;
use util;
pub struct PlannedBuild {
pub workspace_context: WorkspaceContext,
pub crate_contexts: Vec<CrateContext>,
}
pub struct BuildPlanner<'a> {
settings: RazeSettings,
cargo_config: &'a Config,
platform_attrs: Vec<Cfg>,
registry: Option<SourceId>,
}
impl <'a> BuildPlanner<'a> {
pub fn new(settings: RazeSettings,
cargo_config: &'a Config) -> CargoResult<BuildPlanner<'a>> {
Ok(BuildPlanner {
platform_attrs: try!(util::fetch_attrs(&settings.target)),
cargo_config: cargo_config,
registry: None,
settings: settings,
})
}
pub fn | (&mut self, host: String) -> CargoResult<()> {
match host.to_url().map(|url| SourceId::for_registry(&url)) {
Ok(registry_id) => {
self.registry = Some(registry_id);
Ok(())
},
Err(value) => Err(CargoError::from(value))
}
}
pub fn plan_build(&self) -> CargoResult<PlannedBuild> {
let ResolvedPlan {root_name, packages, resolve} =
try!(ResolvedPlan::resolve_from_files(&self.cargo_config));
let root_package_id = try!(resolve.iter()
.filter(|dep| dep.name() == root_name)
.next()
.ok_or(CargoError::from("root crate should be in cargo resolve")));
let root_direct_deps = resolve.deps(&root_package_id).cloned().collect::<HashSet<_>>();
let mut crate_contexts = Vec::new();
let source_id = match self.registry.clone() {
Some(v) => v,
None => try!(SourceId::crates_io(&self.cargo_config)),
};
for id in try!(find_all_package_ids(source_id, &resolve)) {
let package = packages.get(&id).unwrap().clone();
let mut features = resolve.features(&id).clone().into_iter().collect::<Vec<_>>();
features.sort();
let full_name = format!("{}-{}", id.name(), id.version());
let path = format!("./vendor/{}-{}/", id.name(), id.version());
// Verify that package is really vendored
if self.settings.genmode == GenMode::Vendored {
try!(fs::metadata(&path).map_err(|_| {
CargoError::from(format!("failed to find {}. Either switch to \"Remote\" genmode, or run `cargo vendor -x` first.", &path))
}));
}
// Identify all possible dependencies
let PlannedDeps { mut build_deps, mut dev_deps, mut normal_deps } =
PlannedDeps::find_all_deps(&id, &package, &resolve, &self.settings.target, &self.platform_attrs);
build_deps.sort();
dev_deps.sort();
normal_deps.sort();
let mut targets = try!(identify_targets(&full_name, &package));
targets.sort();
let possible_crate_settings =
self.settings.crates
.get(id.name())
.and_then(|c| c.get(&id.version().to_string()));
let should_gen_buildrs =
possible_crate_settings.map(|s| s.gen_buildrs.clone()).unwrap_or(false);
let build_script_target = if should_gen_buildrs {
targets.iter().find(|t| t.kind.deref() == "custom-build").cloned()
} else {
None
};
let targets_sans_build_script =
targets.into_iter().filter(|t| t.kind.deref() != "custom-build").collect::<Vec<_>>();
let additional_deps =
possible_crate_settings.map(|s| s.additional_deps.clone()).unwrap_or(Vec::new());
let additional_flags =
possible_crate_settings.map(|s| s.additional_flags.clone()).unwrap_or(Vec::new());
let extra_aliased_targets =
possible_crate_settings.map(|s| s.extra_aliased_targets.clone()).unwrap_or(Vec::new());
// Skip generated dependencies explicitly designated to be skipped (potentially due to
// being replaced or customized as part of additional_deps)
let non_skipped_normal_deps = if let Some(s) = possible_crate_settings {
normal_deps.into_iter()
.filter(|d| !s.skipped_deps.contains(&format!("{}-{}", d.name, d.version)))
.collect::<Vec<_>>()
} else {
normal_deps
};
crate_contexts.push(CrateContext {
pkg_name: id.name().to_owned(),
pkg_version: id.version().to_string(),
features: features,
is_root_dependency: root_direct_deps.contains(&id),
metadeps: Vec::new() /* TODO(acmcarther) */,
dependencies: non_skipped_normal_deps,
build_dependencies: build_deps,
dev_dependencies: dev_deps,
path: path,
build_script_target: build_script_target,
targets: targets_sans_build_script,
platform_triple: self.settings.target.to_owned(),
additional_deps: additional_deps,
additional_flags: additional_flags,
extra_aliased_targets: extra_aliased_targets,
})
}
let workspace_context = WorkspaceContext {
workspace_path: self.settings.workspace_path.clone(),
platform_triple: self.settings.target.clone(),
gen_workspace_prefix: self.settings.gen_workspace_prefix.clone(),
};
crate_contexts.sort_by_key(|context| format!("{}-{}", context.pkg_name, context.pkg_version));
Ok(PlannedBuild{
workspace_context: workspace_context,
crate_contexts: crate_contexts
})
}
}
/** The set of all included dependencies for Cargo's dependency categories. */
pub struct PlannedDeps {
pub build_deps: Vec<BuildDependency>,
pub dev_deps: Vec<BuildDependency>,
pub normal_deps: Vec<BuildDependency>,
}
impl PlannedDeps {
/**
* Identifies the full set of cargo dependencies for the provided package id using cargo's
* resolution details.
*/
pub fn find_all_deps(id: &PackageId,
package: &CargoPackage,
resolve: &Resolve,
platform_triple: &str,
platform_attrs: &Vec<Cfg>) -> PlannedDeps {
let platform_deps = package
.dependencies()
.iter()
.filter(|dep| {
dep.platform()
.map(|p| p.matches(&platform_triple, Some(&platform_attrs)))
.unwrap_or(true)
})
.cloned()
.collect::<Vec<Dependency>>();
let build_deps = util::take_kinded_dep_names(&platform_deps, Kind::Build);
let dev_deps = util::take_kinded_dep_names(&platform_deps, Kind::Development);
let normal_deps = util::take_kinded_dep_names(&platform_deps, Kind::Normal);
let resolved_deps = resolve.deps(&id).into_iter()
.map(|dep| BuildDependency {
name: dep.name().to_owned(),
version: dep.version().to_string(),
})
.collect::<Vec<BuildDependency>>();
PlannedDeps {
normal_deps:
resolved_deps.iter().filter(|d| normal_deps.contains(&d.name)).cloned().collect(),
build_deps:
resolved_deps.iter().filter(|d| build_deps.contains(&d.name)).cloned().collect(),
dev_deps:
resolved_deps.into_iter().filter(|d| dev_deps.contains(&d.name)).collect(),
}
}
}
/** A synthesized Cargo dependency resolution. */
pub struct ResolvedPlan<'a> {
pub root_name: String,
pub packages: PackageSet<'a>,
pub resolve: Resolve,
}
impl<'a> ResolvedPlan<'a> {
/**
* Performs Cargo's own build plan resolution, yielding the root crate, the set of packages, and
* the resolution graph.
*/
pub fn resolve_from_files(cargo_config: &Config) -> CargoResult<ResolvedPlan> {
let lockfile = Path::new("Cargo.lock");
let manifest_path = lockfile.parent().unwrap().join("Cargo.toml");
let manifest = env::current_dir().unwrap().join(&manifest_path);
let ws = try!(Workspace::new(&manifest, cargo_config));
let specs = Packages::All.into_package_id_specs(&ws)?;
let root_name = specs.iter().next().unwrap().name().to_owned();
let (packages, resolve) = ops::resolve_ws_precisely(
&ws,
None,
&[],
false,
false,
&specs)?;
Ok(ResolvedPlan {
root_name: root_name,
packages: packages,
resolve: resolve,
})
}
}
/** Enumerates the set of all possibly relevant packages for the Cargo dependencies */
fn find_all_package_ids(registry_id: SourceId, resolve: &Resolve) -> CargoResult<Vec<PackageId>> {
try!(fs::metadata("Cargo.lock").map_err(|_| {
CargoError::from("failed to find Cargo.lock. Please run `cargo generate-lockfile` first.")
}));
let mut package_ids = resolve.iter()
.filter(|id| *id.source_id() == registry_id)
.cloned()
.collect::<Vec<_>>();
package_ids.sort_by_key(|id| id.name().to_owned());
Ok(package_ids)
}
/** Derives target objects from Cargo's target information. */
fn identify_targets(full_name: &str, package: &CargoPackage) -> CargoResult<Vec<BuildTarget>> {
let partial_path = format!("{}/", full_name);
let partial_path_byte_length = partial_path.as_bytes().len();
let mut targets = Vec::new();
for target in package.targets().iter() {
let target_path_str = try!(target.src_path().to_str()
.ok_or(CargoError::from(format!("path for {}'s target {} wasn't unicode", &full_name, target.name()))))
.to_owned();
let crate_name_str_idx = try!(target_path_str.find(&partial_path)
.ok_or(CargoError::from(format!("path for {}'s target {} should have been in vendor directory", &full_name, target.name()))));
let local_path_bytes = target_path_str.bytes()
.skip(crate_name_str_idx + partial_path_byte_length)
.collect::<Vec<_>>();
let local_path_str = String::from_utf8(local_path_bytes).unwrap();
for kind in util::kind_to_kinds(target.kind()) {
targets.push(BuildTarget {
name: target.name().to_owned(),
path: local_path_str.clone(),
kind: kind,
});
}
}
Ok(targets)
}
| set_registry_from_url | identifier_name |
planning.rs | use cargo::CargoError;
use cargo::core::Dependency;
use cargo::core::Package as CargoPackage;
use cargo::core::PackageId;
use cargo::core::PackageSet;
use cargo::core::Resolve;
use cargo::core::SourceId;
use cargo::core::Workspace;
use cargo::core::dependency::Kind;
use cargo::ops::Packages;
use cargo::ops;
use cargo::util::CargoResult;
use cargo::util::Cfg;
use cargo::util::Config;
use cargo::util::ToUrl;
use context::BuildDependency;
use context::BuildTarget;
use context::CrateContext;
use context::WorkspaceContext;
use settings::RazeSettings;
use settings::GenMode;
use std::collections::HashSet;
use std::env;
use std::fs;
use std::ops::Deref;
use std::path::Path;
use std::str;
use util;
pub struct PlannedBuild {
pub workspace_context: WorkspaceContext,
pub crate_contexts: Vec<CrateContext>,
}
pub struct BuildPlanner<'a> {
settings: RazeSettings,
cargo_config: &'a Config,
platform_attrs: Vec<Cfg>,
registry: Option<SourceId>,
}
impl <'a> BuildPlanner<'a> {
pub fn new(settings: RazeSettings,
cargo_config: &'a Config) -> CargoResult<BuildPlanner<'a>> {
Ok(BuildPlanner {
platform_attrs: try!(util::fetch_attrs(&settings.target)),
cargo_config: cargo_config,
registry: None,
settings: settings,
})
}
pub fn set_registry_from_url(&mut self, host: String) -> CargoResult<()> {
match host.to_url().map(|url| SourceId::for_registry(&url)) {
Ok(registry_id) => {
self.registry = Some(registry_id);
Ok(())
},
Err(value) => Err(CargoError::from(value))
}
}
pub fn plan_build(&self) -> CargoResult<PlannedBuild> {
let ResolvedPlan {root_name, packages, resolve} =
try!(ResolvedPlan::resolve_from_files(&self.cargo_config));
let root_package_id = try!(resolve.iter()
.filter(|dep| dep.name() == root_name)
.next()
.ok_or(CargoError::from("root crate should be in cargo resolve")));
let root_direct_deps = resolve.deps(&root_package_id).cloned().collect::<HashSet<_>>();
let mut crate_contexts = Vec::new();
let source_id = match self.registry.clone() {
Some(v) => v,
None => try!(SourceId::crates_io(&self.cargo_config)),
};
for id in try!(find_all_package_ids(source_id, &resolve)) {
let package = packages.get(&id).unwrap().clone();
let mut features = resolve.features(&id).clone().into_iter().collect::<Vec<_>>();
features.sort();
let full_name = format!("{}-{}", id.name(), id.version());
let path = format!("./vendor/{}-{}/", id.name(), id.version());
// Verify that package is really vendored
if self.settings.genmode == GenMode::Vendored {
try!(fs::metadata(&path).map_err(|_| {
CargoError::from(format!("failed to find {}. Either switch to \"Remote\" genmode, or run `cargo vendor -x` first.", &path))
}));
}
// Identify all possible dependencies
let PlannedDeps { mut build_deps, mut dev_deps, mut normal_deps } =
PlannedDeps::find_all_deps(&id, &package, &resolve, &self.settings.target, &self.platform_attrs);
build_deps.sort();
dev_deps.sort();
normal_deps.sort();
let mut targets = try!(identify_targets(&full_name, &package));
targets.sort();
let possible_crate_settings =
self.settings.crates
.get(id.name())
.and_then(|c| c.get(&id.version().to_string()));
let should_gen_buildrs =
possible_crate_settings.map(|s| s.gen_buildrs.clone()).unwrap_or(false);
let build_script_target = if should_gen_buildrs {
targets.iter().find(|t| t.kind.deref() == "custom-build").cloned()
} else {
None
};
let targets_sans_build_script =
targets.into_iter().filter(|t| t.kind.deref() != "custom-build").collect::<Vec<_>>();
let additional_deps =
possible_crate_settings.map(|s| s.additional_deps.clone()).unwrap_or(Vec::new());
let additional_flags =
possible_crate_settings.map(|s| s.additional_flags.clone()).unwrap_or(Vec::new());
let extra_aliased_targets =
possible_crate_settings.map(|s| s.extra_aliased_targets.clone()).unwrap_or(Vec::new());
// Skip generated dependencies explicitly designated to be skipped (potentially due to
// being replaced or customized as part of additional_deps)
let non_skipped_normal_deps = if let Some(s) = possible_crate_settings | else {
normal_deps
};
crate_contexts.push(CrateContext {
pkg_name: id.name().to_owned(),
pkg_version: id.version().to_string(),
features: features,
is_root_dependency: root_direct_deps.contains(&id),
metadeps: Vec::new() /* TODO(acmcarther) */,
dependencies: non_skipped_normal_deps,
build_dependencies: build_deps,
dev_dependencies: dev_deps,
path: path,
build_script_target: build_script_target,
targets: targets_sans_build_script,
platform_triple: self.settings.target.to_owned(),
additional_deps: additional_deps,
additional_flags: additional_flags,
extra_aliased_targets: extra_aliased_targets,
})
}
let workspace_context = WorkspaceContext {
workspace_path: self.settings.workspace_path.clone(),
platform_triple: self.settings.target.clone(),
gen_workspace_prefix: self.settings.gen_workspace_prefix.clone(),
};
crate_contexts.sort_by_key(|context| format!("{}-{}", context.pkg_name, context.pkg_version));
Ok(PlannedBuild{
workspace_context: workspace_context,
crate_contexts: crate_contexts
})
}
}
/** The set of all included dependencies for Cargo's dependency categories. */
pub struct PlannedDeps {
pub build_deps: Vec<BuildDependency>,
pub dev_deps: Vec<BuildDependency>,
pub normal_deps: Vec<BuildDependency>,
}
impl PlannedDeps {
/**
* Identifies the full set of cargo dependencies for the provided package id using cargo's
* resolution details.
*/
pub fn find_all_deps(id: &PackageId,
package: &CargoPackage,
resolve: &Resolve,
platform_triple: &str,
platform_attrs: &Vec<Cfg>) -> PlannedDeps {
let platform_deps = package
.dependencies()
.iter()
.filter(|dep| {
dep.platform()
.map(|p| p.matches(&platform_triple, Some(&platform_attrs)))
.unwrap_or(true)
})
.cloned()
.collect::<Vec<Dependency>>();
let build_deps = util::take_kinded_dep_names(&platform_deps, Kind::Build);
let dev_deps = util::take_kinded_dep_names(&platform_deps, Kind::Development);
let normal_deps = util::take_kinded_dep_names(&platform_deps, Kind::Normal);
let resolved_deps = resolve.deps(&id).into_iter()
.map(|dep| BuildDependency {
name: dep.name().to_owned(),
version: dep.version().to_string(),
})
.collect::<Vec<BuildDependency>>();
PlannedDeps {
normal_deps:
resolved_deps.iter().filter(|d| normal_deps.contains(&d.name)).cloned().collect(),
build_deps:
resolved_deps.iter().filter(|d| build_deps.contains(&d.name)).cloned().collect(),
dev_deps:
resolved_deps.into_iter().filter(|d| dev_deps.contains(&d.name)).collect(),
}
}
}
/** A synthesized Cargo dependency resolution. */
pub struct ResolvedPlan<'a> {
pub root_name: String,
pub packages: PackageSet<'a>,
pub resolve: Resolve,
}
impl<'a> ResolvedPlan<'a> {
/**
* Performs Cargo's own build plan resolution, yielding the root crate, the set of packages, and
* the resolution graph.
*/
pub fn resolve_from_files(cargo_config: &Config) -> CargoResult<ResolvedPlan> {
let lockfile = Path::new("Cargo.lock");
let manifest_path = lockfile.parent().unwrap().join("Cargo.toml");
let manifest = env::current_dir().unwrap().join(&manifest_path);
let ws = try!(Workspace::new(&manifest, cargo_config));
let specs = Packages::All.into_package_id_specs(&ws)?;
let root_name = specs.iter().next().unwrap().name().to_owned();
let (packages, resolve) = ops::resolve_ws_precisely(
&ws,
None,
&[],
false,
false,
&specs)?;
Ok(ResolvedPlan {
root_name: root_name,
packages: packages,
resolve: resolve,
})
}
}
/** Enumerates the set of all possibly relevant packages for the Cargo dependencies */
fn find_all_package_ids(registry_id: SourceId, resolve: &Resolve) -> CargoResult<Vec<PackageId>> {
try!(fs::metadata("Cargo.lock").map_err(|_| {
CargoError::from("failed to find Cargo.lock. Please run `cargo generate-lockfile` first.")
}));
let mut package_ids = resolve.iter()
.filter(|id| *id.source_id() == registry_id)
.cloned()
.collect::<Vec<_>>();
package_ids.sort_by_key(|id| id.name().to_owned());
Ok(package_ids)
}
/** Derives target objects from Cargo's target information. */
fn identify_targets(full_name: &str, package: &CargoPackage) -> CargoResult<Vec<BuildTarget>> {
let partial_path = format!("{}/", full_name);
let partial_path_byte_length = partial_path.as_bytes().len();
let mut targets = Vec::new();
for target in package.targets().iter() {
let target_path_str = try!(target.src_path().to_str()
.ok_or(CargoError::from(format!("path for {}'s target {} wasn't unicode", &full_name, target.name()))))
.to_owned();
let crate_name_str_idx = try!(target_path_str.find(&partial_path)
.ok_or(CargoError::from(format!("path for {}'s target {} should have been in vendor directory", &full_name, target.name()))));
let local_path_bytes = target_path_str.bytes()
.skip(crate_name_str_idx + partial_path_byte_length)
.collect::<Vec<_>>();
let local_path_str = String::from_utf8(local_path_bytes).unwrap();
for kind in util::kind_to_kinds(target.kind()) {
targets.push(BuildTarget {
name: target.name().to_owned(),
path: local_path_str.clone(),
kind: kind,
});
}
}
Ok(targets)
}
| {
normal_deps.into_iter()
.filter(|d| !s.skipped_deps.contains(&format!("{}-{}", d.name, d.version)))
.collect::<Vec<_>>()
} | conditional_block |
planning.rs | use cargo::CargoError;
use cargo::core::Dependency;
use cargo::core::Package as CargoPackage;
use cargo::core::PackageId;
use cargo::core::PackageSet; | use cargo::ops::Packages;
use cargo::ops;
use cargo::util::CargoResult;
use cargo::util::Cfg;
use cargo::util::Config;
use cargo::util::ToUrl;
use context::BuildDependency;
use context::BuildTarget;
use context::CrateContext;
use context::WorkspaceContext;
use settings::RazeSettings;
use settings::GenMode;
use std::collections::HashSet;
use std::env;
use std::fs;
use std::ops::Deref;
use std::path::Path;
use std::str;
use util;
pub struct PlannedBuild {
pub workspace_context: WorkspaceContext,
pub crate_contexts: Vec<CrateContext>,
}
pub struct BuildPlanner<'a> {
settings: RazeSettings,
cargo_config: &'a Config,
platform_attrs: Vec<Cfg>,
registry: Option<SourceId>,
}
impl <'a> BuildPlanner<'a> {
pub fn new(settings: RazeSettings,
cargo_config: &'a Config) -> CargoResult<BuildPlanner<'a>> {
Ok(BuildPlanner {
platform_attrs: try!(util::fetch_attrs(&settings.target)),
cargo_config: cargo_config,
registry: None,
settings: settings,
})
}
pub fn set_registry_from_url(&mut self, host: String) -> CargoResult<()> {
match host.to_url().map(|url| SourceId::for_registry(&url)) {
Ok(registry_id) => {
self.registry = Some(registry_id);
Ok(())
},
Err(value) => Err(CargoError::from(value))
}
}
pub fn plan_build(&self) -> CargoResult<PlannedBuild> {
let ResolvedPlan {root_name, packages, resolve} =
try!(ResolvedPlan::resolve_from_files(&self.cargo_config));
let root_package_id = try!(resolve.iter()
.filter(|dep| dep.name() == root_name)
.next()
.ok_or(CargoError::from("root crate should be in cargo resolve")));
let root_direct_deps = resolve.deps(&root_package_id).cloned().collect::<HashSet<_>>();
let mut crate_contexts = Vec::new();
let source_id = match self.registry.clone() {
Some(v) => v,
None => try!(SourceId::crates_io(&self.cargo_config)),
};
for id in try!(find_all_package_ids(source_id, &resolve)) {
let package = packages.get(&id).unwrap().clone();
let mut features = resolve.features(&id).clone().into_iter().collect::<Vec<_>>();
features.sort();
let full_name = format!("{}-{}", id.name(), id.version());
let path = format!("./vendor/{}-{}/", id.name(), id.version());
// Verify that package is really vendored
if self.settings.genmode == GenMode::Vendored {
try!(fs::metadata(&path).map_err(|_| {
CargoError::from(format!("failed to find {}. Either switch to \"Remote\" genmode, or run `cargo vendor -x` first.", &path))
}));
}
// Identify all possible dependencies
let PlannedDeps { mut build_deps, mut dev_deps, mut normal_deps } =
PlannedDeps::find_all_deps(&id, &package, &resolve, &self.settings.target, &self.platform_attrs);
build_deps.sort();
dev_deps.sort();
normal_deps.sort();
let mut targets = try!(identify_targets(&full_name, &package));
targets.sort();
let possible_crate_settings =
self.settings.crates
.get(id.name())
.and_then(|c| c.get(&id.version().to_string()));
let should_gen_buildrs =
possible_crate_settings.map(|s| s.gen_buildrs.clone()).unwrap_or(false);
let build_script_target = if should_gen_buildrs {
targets.iter().find(|t| t.kind.deref() == "custom-build").cloned()
} else {
None
};
let targets_sans_build_script =
targets.into_iter().filter(|t| t.kind.deref() != "custom-build").collect::<Vec<_>>();
let additional_deps =
possible_crate_settings.map(|s| s.additional_deps.clone()).unwrap_or(Vec::new());
let additional_flags =
possible_crate_settings.map(|s| s.additional_flags.clone()).unwrap_or(Vec::new());
let extra_aliased_targets =
possible_crate_settings.map(|s| s.extra_aliased_targets.clone()).unwrap_or(Vec::new());
// Skip generated dependencies explicitly designated to be skipped (potentially due to
// being replaced or customized as part of additional_deps)
let non_skipped_normal_deps = if let Some(s) = possible_crate_settings {
normal_deps.into_iter()
.filter(|d| !s.skipped_deps.contains(&format!("{}-{}", d.name, d.version)))
.collect::<Vec<_>>()
} else {
normal_deps
};
crate_contexts.push(CrateContext {
pkg_name: id.name().to_owned(),
pkg_version: id.version().to_string(),
features: features,
is_root_dependency: root_direct_deps.contains(&id),
metadeps: Vec::new() /* TODO(acmcarther) */,
dependencies: non_skipped_normal_deps,
build_dependencies: build_deps,
dev_dependencies: dev_deps,
path: path,
build_script_target: build_script_target,
targets: targets_sans_build_script,
platform_triple: self.settings.target.to_owned(),
additional_deps: additional_deps,
additional_flags: additional_flags,
extra_aliased_targets: extra_aliased_targets,
})
}
let workspace_context = WorkspaceContext {
workspace_path: self.settings.workspace_path.clone(),
platform_triple: self.settings.target.clone(),
gen_workspace_prefix: self.settings.gen_workspace_prefix.clone(),
};
crate_contexts.sort_by_key(|context| format!("{}-{}", context.pkg_name, context.pkg_version));
Ok(PlannedBuild{
workspace_context: workspace_context,
crate_contexts: crate_contexts
})
}
}
/** The set of all included dependencies for Cargo's dependency categories. */
pub struct PlannedDeps {
pub build_deps: Vec<BuildDependency>,
pub dev_deps: Vec<BuildDependency>,
pub normal_deps: Vec<BuildDependency>,
}
impl PlannedDeps {
/**
* Identifies the full set of cargo dependencies for the provided package id using cargo's
* resolution details.
*/
pub fn find_all_deps(id: &PackageId,
package: &CargoPackage,
resolve: &Resolve,
platform_triple: &str,
platform_attrs: &Vec<Cfg>) -> PlannedDeps {
let platform_deps = package
.dependencies()
.iter()
.filter(|dep| {
dep.platform()
.map(|p| p.matches(&platform_triple, Some(&platform_attrs)))
.unwrap_or(true)
})
.cloned()
.collect::<Vec<Dependency>>();
let build_deps = util::take_kinded_dep_names(&platform_deps, Kind::Build);
let dev_deps = util::take_kinded_dep_names(&platform_deps, Kind::Development);
let normal_deps = util::take_kinded_dep_names(&platform_deps, Kind::Normal);
let resolved_deps = resolve.deps(&id).into_iter()
.map(|dep| BuildDependency {
name: dep.name().to_owned(),
version: dep.version().to_string(),
})
.collect::<Vec<BuildDependency>>();
PlannedDeps {
normal_deps:
resolved_deps.iter().filter(|d| normal_deps.contains(&d.name)).cloned().collect(),
build_deps:
resolved_deps.iter().filter(|d| build_deps.contains(&d.name)).cloned().collect(),
dev_deps:
resolved_deps.into_iter().filter(|d| dev_deps.contains(&d.name)).collect(),
}
}
}
/** A synthesized Cargo dependency resolution. */
pub struct ResolvedPlan<'a> {
pub root_name: String,
pub packages: PackageSet<'a>,
pub resolve: Resolve,
}
impl<'a> ResolvedPlan<'a> {
/**
* Performs Cargo's own build plan resolution, yielding the root crate, the set of packages, and
* the resolution graph.
*/
pub fn resolve_from_files(cargo_config: &Config) -> CargoResult<ResolvedPlan> {
let lockfile = Path::new("Cargo.lock");
let manifest_path = lockfile.parent().unwrap().join("Cargo.toml");
let manifest = env::current_dir().unwrap().join(&manifest_path);
let ws = try!(Workspace::new(&manifest, cargo_config));
let specs = Packages::All.into_package_id_specs(&ws)?;
let root_name = specs.iter().next().unwrap().name().to_owned();
let (packages, resolve) = ops::resolve_ws_precisely(
&ws,
None,
&[],
false,
false,
&specs)?;
Ok(ResolvedPlan {
root_name: root_name,
packages: packages,
resolve: resolve,
})
}
}
/** Enumerates the set of all possibly relevant packages for the Cargo dependencies */
fn find_all_package_ids(registry_id: SourceId, resolve: &Resolve) -> CargoResult<Vec<PackageId>> {
try!(fs::metadata("Cargo.lock").map_err(|_| {
CargoError::from("failed to find Cargo.lock. Please run `cargo generate-lockfile` first.")
}));
let mut package_ids = resolve.iter()
.filter(|id| *id.source_id() == registry_id)
.cloned()
.collect::<Vec<_>>();
package_ids.sort_by_key(|id| id.name().to_owned());
Ok(package_ids)
}
/** Derives target objects from Cargo's target information. */
fn identify_targets(full_name: &str, package: &CargoPackage) -> CargoResult<Vec<BuildTarget>> {
let partial_path = format!("{}/", full_name);
let partial_path_byte_length = partial_path.as_bytes().len();
let mut targets = Vec::new();
for target in package.targets().iter() {
let target_path_str = try!(target.src_path().to_str()
.ok_or(CargoError::from(format!("path for {}'s target {} wasn't unicode", &full_name, target.name()))))
.to_owned();
let crate_name_str_idx = try!(target_path_str.find(&partial_path)
.ok_or(CargoError::from(format!("path for {}'s target {} should have been in vendor directory", &full_name, target.name()))));
let local_path_bytes = target_path_str.bytes()
.skip(crate_name_str_idx + partial_path_byte_length)
.collect::<Vec<_>>();
let local_path_str = String::from_utf8(local_path_bytes).unwrap();
for kind in util::kind_to_kinds(target.kind()) {
targets.push(BuildTarget {
name: target.name().to_owned(),
path: local_path_str.clone(),
kind: kind,
});
}
}
Ok(targets)
} | use cargo::core::Resolve;
use cargo::core::SourceId;
use cargo::core::Workspace;
use cargo::core::dependency::Kind; | random_line_split |
main.rs | fn main() {
// defining a variable
println!("-------defining a variable");
println!("Hello, Hooman!");
let mut x = 45; // all variables initially are immutable otherwise it is mentioned
println!("The value of x is {}", x);
x = 10;
println!("The value of x is {}", x);
let y: i64;
y = 734;
println!("{}", y);
// if statement
println!("-------if statement");
if y < 10 {
println!("The {} is less!", y);
} else {
println!("The {} is big!", y);
}
// loop
println!("-------loop");
let mut n = 0;
loop {
n += 7;
if n % 5 == 0 || n % 2 == 0 {
continue;
}
println!("The value of n is {}", n);
if n > 100 {
break;
}
}
// for loop
println!("-------for loop");
for i in 1..10 {
println!("The number is {}", i);
}
let range = 10..20;
for i in range {
println!("element in range {}", i);
}
let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"];
for name in family_name.iter() {
println!("Family person is {}", name);
}
for (index, name) in family_name.iter().enumerate() {
println!("Family people {} is {}", index+1, name);
}
for name in family_name { // in this way we cannot use family_name next time
println!("name is {}", name);
}
// enum
println!("-------enum");
enum Direction {
Up,
Down,
Left,
Right
}
let player_direction1:Direction = Direction::Up;
let player_direction2:Direction = Direction::Down;
let player_direction3:Direction = Direction::Left;
let player_direction4:Direction = Direction::Right;
match player_direction1 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction2 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction3 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction4 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
// constants
println!("-------constants");
const MAXIMUM_NUMBER: u8 = 7; // must be uppercase
for n in 1..MAXIMUM_NUMBER {
println!("{}", n);
}
// tuples
println!("-------tuples");
let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true);
println!("{}", (tup1.1).1); // referencing a tuple inside the tuple
println!("{}", tup1.0);
println!("{}", tup1.2);
println!("{}", tup1.3);
println!("{}", tup1.4);
let (x, y, z, u, v) = tup1; // destructuring the tuple
println!("{}", x);
println!("{}", y.0);
// function
println!("-------functions");
fn count_to(num: u32) {
for i in 1..num {
if is_even(i) {
println!("{} is even", i);
} else {
println!("{} is odd", i);
}
}
}
count_to(7);
fn is_even(num: u32) -> bool {
return num % 2 == 0;
}
let number = 12;
println!("is {} even? {}", number, is_even(number));
// reference
println!("-------references");
let mut x = 7;
println!("x is {}", x);
{
let x_ref_mut = &mut x; // mutable reference should enclosed inside a block
*x_ref_mut += 7;
println!("x reference is {}", x_ref_mut);
}
let x_ref = &x;
println!("x is {}", x);
println!("x reference is {}", x_ref);
// structs
println!("-------structs");
struct Color {
red: u8, // u8: 0-255
green: u8,
blue: u8
}
let bg = Color {red: 255, green: 70, blue: 15};
println!("{}, {}, {}", bg.red, bg.green, bg.blue);
struct Color2(u8, u8, u8);
let mut bg2 = Color2(30, 70, 255);
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
bg2.2 = 40;
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
// pass by reference
println!("-------pass by reference");
fn print_color(c: Color) {
println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue);
}
fn | (c: &Color2) {
println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2);
}
print_color(bg);
/* print_color(bg); *impossible */
print_color2(&bg2);
print_color2(&bg2);
print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference
// arrays
println!("-------arrays");
let sample_array = [1, 3, 5, 7]; // either ways are valid
let sample_array2: [i32; 4] = [6, 8, 15, 20];
println!("{}", sample_array[1]);
for (i, el) in sample_array.iter().enumerate() {
println!("{}-th element is {}", i, el);
}
for i in 0..sample_array2.len() {
println!("{}", sample_array2[i]);
}
let array_of_2 = [2; 10]; // generating an array of 2's with length 10
for el in array_of_2.iter() {
println!("{}", el);
}
// impl
println!("-------impl");
struct Rectangle {
width: u32,
height: u32
}
impl Rectangle {
fn print_description(&self) {
println!("Rectangle: {} x {}", self.width, self.height);
}
fn is_square(&self) -> bool{
return self.width == self.height;
}
fn area(&self) -> u32 {
return self.width * self.height;
}
fn perimeter(&self) -> u32 {
return (self.width + self.height) * 2;
}
}
let rectangle: Rectangle = Rectangle {height: 30, width: 10, };
rectangle.print_description();
println!("The given rectangle is square? {}", rectangle.is_square());
println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter());
// Strings
println!("-------Strings");
let new_string = "Hello World"; // primitive string
println!("{}", new_string);
let mut my_string = String::from("How is it going today?");
println!("{}", my_string);
println!("{}", my_string.len());
println!("{}", my_string.is_empty());
for token in my_string.split_whitespace() { // there is not in primitive string
println!("{}-", token)
}
println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today"));
my_string.push_str(new_string);
println!("{}", my_string);
/* println!("{}", my_string.push_str(new_string)) *impossible */
// Traits (like interface)
println!("-------Traits");
struct Person {
name: String,
age: u32,
}
// impl Person {
// fn to_string(&self) -> String {
// return format!("My name is {} and my age is {}", self.name, self.age);
// }
// }
impl ToString for Person { // trait "ToString" is implemented for "Person"
fn to_string(&self) -> String {
return format!("My name is {} and my age is {}", self.name, self.age);
}
}
let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")};
println!("{}", hooman.to_string());
// Custom Traits (like interface)
println!("-------Custom Traits");
trait HasVoiceBox {
// speak
fn speak(&self);
// check if can speak
fn can_speak(&self) -> bool;
}
impl HasVoiceBox for Person {
fn speak(&self) {
println!("Hello, my name is {} ", self.name);
}
fn can_speak(&self) -> bool {
if self.age > 3 {
return true;
} return false;
}
}
println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak());
hooman.speak();
// Match Operator (like Switch)
println!("-------Match Operator");
let number = 11;
match number {
1 => println!("It is one!"), // case 1
2 => println!("it is two!"), // case 2
3 | 4 => println!("it is three or four!"), // case 3 | 4
5..=10 => println!("it is between 5 to 10"), // case 5 to 10
_ => println!("it is out of the range!"), // default
}
// read input from console
println!("-------read input from console");
use std::io;
let mut input = String::new();
println!("Hey mate! Say something:");
match io::stdin().read_line(&mut input) {
Ok(_) => {
println!("Success! You said: {}", input.to_ascii_uppercase());
},
Err(e) => println!("Oops! SOmething went wrong: {}", e)
}
// Hashmap
println!("-------Hashmap");
use std::collections::HashMap;
// define HashMap
let mut marks = HashMap::new();
// add values
marks.insert("Rust Programming", 96);
marks.insert("Lua Programming", 100);
marks.insert("C++ Programming", 90);
marks.insert("Java Programming", 94);
// prompt length of the HashMap
println!("How many subjects are collected there? {}", marks.len());
// find a subject
match marks.get("Rust Programming") {
Some(mark) => println!("You have got {} for that.", mark),
None => println!("You did not study this subject!"),
}
// remove an item
marks.remove("Java Programming");
// loop through HashMap
for (subject, mark) in &marks {
println!("For {} you have got {}.", subject, mark);
}
// check for value
println!("Did you study C#? {} ", marks.contains_key("C# Programming"));
}
| print_color2 | identifier_name |
main.rs | fn main() {
// defining a variable
println!("-------defining a variable");
println!("Hello, Hooman!");
let mut x = 45; // all variables initially are immutable otherwise it is mentioned
println!("The value of x is {}", x);
x = 10;
println!("The value of x is {}", x);
let y: i64;
y = 734;
println!("{}", y);
// if statement
println!("-------if statement");
if y < 10 {
println!("The {} is less!", y);
} else {
println!("The {} is big!", y);
}
// loop
println!("-------loop");
let mut n = 0;
loop {
n += 7;
if n % 5 == 0 || n % 2 == 0 {
continue;
}
println!("The value of n is {}", n);
if n > 100 {
break;
}
}
// for loop
println!("-------for loop");
for i in 1..10 {
println!("The number is {}", i);
}
let range = 10..20;
for i in range {
println!("element in range {}", i);
}
let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"];
for name in family_name.iter() {
println!("Family person is {}", name);
}
for (index, name) in family_name.iter().enumerate() {
println!("Family people {} is {}", index+1, name);
}
for name in family_name { // in this way we cannot use family_name next time
println!("name is {}", name);
}
// enum
println!("-------enum");
enum Direction {
Up,
Down,
Left,
Right
}
let player_direction1:Direction = Direction::Up;
let player_direction2:Direction = Direction::Down;
let player_direction3:Direction = Direction::Left;
let player_direction4:Direction = Direction::Right;
match player_direction1 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction2 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction3 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction4 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
// constants
println!("-------constants");
const MAXIMUM_NUMBER: u8 = 7; // must be uppercase
for n in 1..MAXIMUM_NUMBER {
println!("{}", n);
}
// tuples
println!("-------tuples");
let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true);
println!("{}", (tup1.1).1); // referencing a tuple inside the tuple
println!("{}", tup1.0);
println!("{}", tup1.2);
println!("{}", tup1.3);
println!("{}", tup1.4);
let (x, y, z, u, v) = tup1; // destructuring the tuple
println!("{}", x);
println!("{}", y.0);
// function
println!("-------functions");
fn count_to(num: u32) {
for i in 1..num {
if is_even(i) | else {
println!("{} is odd", i);
}
}
}
count_to(7);
fn is_even(num: u32) -> bool {
return num % 2 == 0;
}
let number = 12;
println!("is {} even? {}", number, is_even(number));
// reference
println!("-------references");
let mut x = 7;
println!("x is {}", x);
{
let x_ref_mut = &mut x; // mutable reference should enclosed inside a block
*x_ref_mut += 7;
println!("x reference is {}", x_ref_mut);
}
let x_ref = &x;
println!("x is {}", x);
println!("x reference is {}", x_ref);
// structs
println!("-------structs");
struct Color {
red: u8, // u8: 0-255
green: u8,
blue: u8
}
let bg = Color {red: 255, green: 70, blue: 15};
println!("{}, {}, {}", bg.red, bg.green, bg.blue);
struct Color2(u8, u8, u8);
let mut bg2 = Color2(30, 70, 255);
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
bg2.2 = 40;
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
// pass by reference
println!("-------pass by reference");
fn print_color(c: Color) {
println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue);
}
fn print_color2(c: &Color2) {
println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2);
}
print_color(bg);
/* print_color(bg); *impossible */
print_color2(&bg2);
print_color2(&bg2);
print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference
// arrays
println!("-------arrays");
let sample_array = [1, 3, 5, 7]; // either ways are valid
let sample_array2: [i32; 4] = [6, 8, 15, 20];
println!("{}", sample_array[1]);
for (i, el) in sample_array.iter().enumerate() {
println!("{}-th element is {}", i, el);
}
for i in 0..sample_array2.len() {
println!("{}", sample_array2[i]);
}
let array_of_2 = [2; 10]; // generating an array of 2's with length 10
for el in array_of_2.iter() {
println!("{}", el);
}
// impl
println!("-------impl");
struct Rectangle {
width: u32,
height: u32
}
impl Rectangle {
fn print_description(&self) {
println!("Rectangle: {} x {}", self.width, self.height);
}
fn is_square(&self) -> bool{
return self.width == self.height;
}
fn area(&self) -> u32 {
return self.width * self.height;
}
fn perimeter(&self) -> u32 {
return (self.width + self.height) * 2;
}
}
let rectangle: Rectangle = Rectangle {height: 30, width: 10, };
rectangle.print_description();
println!("The given rectangle is square? {}", rectangle.is_square());
println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter());
// Strings
println!("-------Strings");
let new_string = "Hello World"; // primitive string
println!("{}", new_string);
let mut my_string = String::from("How is it going today?");
println!("{}", my_string);
println!("{}", my_string.len());
println!("{}", my_string.is_empty());
for token in my_string.split_whitespace() { // there is not in primitive string
println!("{}-", token)
}
println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today"));
my_string.push_str(new_string);
println!("{}", my_string);
/* println!("{}", my_string.push_str(new_string)) *impossible */
// Traits (like interface)
println!("-------Traits");
struct Person {
name: String,
age: u32,
}
// impl Person {
// fn to_string(&self) -> String {
// return format!("My name is {} and my age is {}", self.name, self.age);
// }
// }
impl ToString for Person { // trait "ToString" is implemented for "Person"
fn to_string(&self) -> String {
return format!("My name is {} and my age is {}", self.name, self.age);
}
}
let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")};
println!("{}", hooman.to_string());
// Custom Traits (like interface)
println!("-------Custom Traits");
trait HasVoiceBox {
// speak
fn speak(&self);
// check if can speak
fn can_speak(&self) -> bool;
}
impl HasVoiceBox for Person {
fn speak(&self) {
println!("Hello, my name is {} ", self.name);
}
fn can_speak(&self) -> bool {
if self.age > 3 {
return true;
} return false;
}
}
println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak());
hooman.speak();
// Match Operator (like Switch)
println!("-------Match Operator");
let number = 11;
match number {
1 => println!("It is one!"), // case 1
2 => println!("it is two!"), // case 2
3 | 4 => println!("it is three or four!"), // case 3 | 4
5..=10 => println!("it is between 5 to 10"), // case 5 to 10
_ => println!("it is out of the range!"), // default
}
// read input from console
println!("-------read input from console");
use std::io;
let mut input = String::new();
println!("Hey mate! Say something:");
match io::stdin().read_line(&mut input) {
Ok(_) => {
println!("Success! You said: {}", input.to_ascii_uppercase());
},
Err(e) => println!("Oops! SOmething went wrong: {}", e)
}
// Hashmap
println!("-------Hashmap");
use std::collections::HashMap;
// define HashMap
let mut marks = HashMap::new();
// add values
marks.insert("Rust Programming", 96);
marks.insert("Lua Programming", 100);
marks.insert("C++ Programming", 90);
marks.insert("Java Programming", 94);
// prompt length of the HashMap
println!("How many subjects are collected there? {}", marks.len());
// find a subject
match marks.get("Rust Programming") {
Some(mark) => println!("You have got {} for that.", mark),
None => println!("You did not study this subject!"),
}
// remove an item
marks.remove("Java Programming");
// loop through HashMap
for (subject, mark) in &marks {
println!("For {} you have got {}.", subject, mark);
}
// check for value
println!("Did you study C#? {} ", marks.contains_key("C# Programming"));
}
| {
println!("{} is even", i);
} | conditional_block |
main.rs | fn main() {
// defining a variable
println!("-------defining a variable");
println!("Hello, Hooman!");
let mut x = 45; // all variables initially are immutable otherwise it is mentioned
println!("The value of x is {}", x);
x = 10;
println!("The value of x is {}", x);
let y: i64;
y = 734;
println!("{}", y);
// if statement
println!("-------if statement");
if y < 10 {
println!("The {} is less!", y);
} else {
println!("The {} is big!", y);
}
// loop
println!("-------loop");
let mut n = 0;
loop {
n += 7;
if n % 5 == 0 || n % 2 == 0 {
continue;
}
println!("The value of n is {}", n);
if n > 100 {
break;
}
}
// for loop
println!("-------for loop");
for i in 1..10 {
println!("The number is {}", i);
}
let range = 10..20;
for i in range {
println!("element in range {}", i);
}
let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"];
for name in family_name.iter() {
println!("Family person is {}", name);
}
for (index, name) in family_name.iter().enumerate() {
println!("Family people {} is {}", index+1, name);
}
for name in family_name { // in this way we cannot use family_name next time
println!("name is {}", name);
}
// enum
println!("-------enum");
enum Direction {
Up,
Down,
Left,
Right
}
let player_direction1:Direction = Direction::Up;
let player_direction2:Direction = Direction::Down;
let player_direction3:Direction = Direction::Left;
let player_direction4:Direction = Direction::Right;
match player_direction1 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction2 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction3 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction4 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
// constants
println!("-------constants");
const MAXIMUM_NUMBER: u8 = 7; // must be uppercase
for n in 1..MAXIMUM_NUMBER {
println!("{}", n);
}
// tuples
println!("-------tuples");
let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true);
println!("{}", (tup1.1).1); // referencing a tuple inside the tuple
println!("{}", tup1.0);
println!("{}", tup1.2);
println!("{}", tup1.3);
println!("{}", tup1.4);
let (x, y, z, u, v) = tup1; // destructuring the tuple
println!("{}", x);
println!("{}", y.0);
// function
println!("-------functions");
fn count_to(num: u32) {
for i in 1..num {
if is_even(i) {
println!("{} is even", i);
} else {
println!("{} is odd", i);
}
}
}
count_to(7);
fn is_even(num: u32) -> bool {
return num % 2 == 0;
}
let number = 12;
println!("is {} even? {}", number, is_even(number));
// reference
println!("-------references");
let mut x = 7;
println!("x is {}", x);
{
let x_ref_mut = &mut x; // mutable reference should enclosed inside a block
*x_ref_mut += 7;
println!("x reference is {}", x_ref_mut);
}
let x_ref = &x;
println!("x is {}", x);
println!("x reference is {}", x_ref);
// structs
println!("-------structs");
struct Color {
red: u8, // u8: 0-255
green: u8,
blue: u8
}
let bg = Color {red: 255, green: 70, blue: 15};
println!("{}, {}, {}", bg.red, bg.green, bg.blue);
struct Color2(u8, u8, u8);
let mut bg2 = Color2(30, 70, 255);
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
bg2.2 = 40;
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
// pass by reference
println!("-------pass by reference");
fn print_color(c: Color) {
println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue);
}
fn print_color2(c: &Color2) {
println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2);
}
print_color(bg);
/* print_color(bg); *impossible */
print_color2(&bg2);
print_color2(&bg2);
print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference
// arrays
println!("-------arrays");
let sample_array = [1, 3, 5, 7]; // either ways are valid
let sample_array2: [i32; 4] = [6, 8, 15, 20];
println!("{}", sample_array[1]);
for (i, el) in sample_array.iter().enumerate() {
println!("{}-th element is {}", i, el);
}
for i in 0..sample_array2.len() {
println!("{}", sample_array2[i]);
}
let array_of_2 = [2; 10]; // generating an array of 2's with length 10
for el in array_of_2.iter() {
println!("{}", el);
}
// impl
println!("-------impl");
struct Rectangle {
width: u32,
height: u32
}
impl Rectangle {
fn print_description(&self) {
println!("Rectangle: {} x {}", self.width, self.height);
}
fn is_square(&self) -> bool{
return self.width == self.height;
}
fn area(&self) -> u32 {
return self.width * self.height;
}
fn perimeter(&self) -> u32 |
}
let rectangle: Rectangle = Rectangle {height: 30, width: 10, };
rectangle.print_description();
println!("The given rectangle is square? {}", rectangle.is_square());
println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter());
// Strings
println!("-------Strings");
let new_string = "Hello World"; // primitive string
println!("{}", new_string);
let mut my_string = String::from("How is it going today?");
println!("{}", my_string);
println!("{}", my_string.len());
println!("{}", my_string.is_empty());
for token in my_string.split_whitespace() { // there is not in primitive string
println!("{}-", token)
}
println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today"));
my_string.push_str(new_string);
println!("{}", my_string);
/* println!("{}", my_string.push_str(new_string)) *impossible */
// Traits (like interface)
println!("-------Traits");
struct Person {
name: String,
age: u32,
}
// impl Person {
// fn to_string(&self) -> String {
// return format!("My name is {} and my age is {}", self.name, self.age);
// }
// }
impl ToString for Person { // trait "ToString" is implemented for "Person"
fn to_string(&self) -> String {
return format!("My name is {} and my age is {}", self.name, self.age);
}
}
let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")};
println!("{}", hooman.to_string());
// Custom Traits (like interface)
println!("-------Custom Traits");
trait HasVoiceBox {
// speak
fn speak(&self);
// check if can speak
fn can_speak(&self) -> bool;
}
impl HasVoiceBox for Person {
fn speak(&self) {
println!("Hello, my name is {} ", self.name);
}
fn can_speak(&self) -> bool {
if self.age > 3 {
return true;
} return false;
}
}
println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak());
hooman.speak();
// Match Operator (like Switch)
println!("-------Match Operator");
let number = 11;
match number {
1 => println!("It is one!"), // case 1
2 => println!("it is two!"), // case 2
3 | 4 => println!("it is three or four!"), // case 3 | 4
5..=10 => println!("it is between 5 to 10"), // case 5 to 10
_ => println!("it is out of the range!"), // default
}
// read input from console
println!("-------read input from console");
use std::io;
let mut input = String::new();
println!("Hey mate! Say something:");
match io::stdin().read_line(&mut input) {
Ok(_) => {
println!("Success! You said: {}", input.to_ascii_uppercase());
},
Err(e) => println!("Oops! SOmething went wrong: {}", e)
}
// Hashmap
println!("-------Hashmap");
use std::collections::HashMap;
// define HashMap
let mut marks = HashMap::new();
// add values
marks.insert("Rust Programming", 96);
marks.insert("Lua Programming", 100);
marks.insert("C++ Programming", 90);
marks.insert("Java Programming", 94);
// prompt length of the HashMap
println!("How many subjects are collected there? {}", marks.len());
// find a subject
match marks.get("Rust Programming") {
Some(mark) => println!("You have got {} for that.", mark),
None => println!("You did not study this subject!"),
}
// remove an item
marks.remove("Java Programming");
// loop through HashMap
for (subject, mark) in &marks {
println!("For {} you have got {}.", subject, mark);
}
// check for value
println!("Did you study C#? {} ", marks.contains_key("C# Programming"));
}
| {
return (self.width + self.height) * 2;
} | identifier_body |
main.rs | fn main() {
// defining a variable
println!("-------defining a variable");
println!("Hello, Hooman!");
let mut x = 45; // all variables initially are immutable otherwise it is mentioned
println!("The value of x is {}", x);
x = 10;
println!("The value of x is {}", x);
let y: i64;
y = 734;
println!("{}", y);
// if statement
println!("-------if statement");
if y < 10 {
println!("The {} is less!", y);
} else {
println!("The {} is big!", y);
}
// loop
println!("-------loop");
let mut n = 0;
loop {
n += 7;
if n % 5 == 0 || n % 2 == 0 {
continue;
}
println!("The value of n is {}", n);
if n > 100 {
break;
}
}
// for loop
println!("-------for loop");
for i in 1..10 {
println!("The number is {}", i);
}
let range = 10..20;
for i in range {
println!("element in range {}", i);
}
let family_name = vec!["Amir", "Hooman", "Aref", "Shahnaz", "Vihan", "Shima"];
for name in family_name.iter() {
println!("Family person is {}", name);
}
for (index, name) in family_name.iter().enumerate() {
println!("Family people {} is {}", index+1, name);
}
for name in family_name { // in this way we cannot use family_name next time
println!("name is {}", name);
}
// enum
println!("-------enum");
enum Direction {
Up,
Down,
Left,
Right
}
let player_direction1:Direction = Direction::Up;
let player_direction2:Direction = Direction::Down;
let player_direction3:Direction = Direction::Left;
let player_direction4:Direction = Direction::Right;
match player_direction1 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction2 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction3 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
match player_direction4 {
Direction::Up => println!("We are heading Up!"),
Direction::Down => println!("We are heading Down!"),
Direction::Left => println!("We are heading Left!"),
Direction::Right => println!("We are heading Right!")
}
// constants
println!("-------constants");
const MAXIMUM_NUMBER: u8 = 7; // must be uppercase
for n in 1..MAXIMUM_NUMBER {
println!("{}", n);
}
// tuples
println!("-------tuples");
let tup1 = ("A", ("Hooman", "Hesamyan"), "C", 734, true);
println!("{}", (tup1.1).1); // referencing a tuple inside the tuple
println!("{}", tup1.0);
println!("{}", tup1.2);
println!("{}", tup1.3);
println!("{}", tup1.4);
let (x, y, z, u, v) = tup1; // destructuring the tuple
println!("{}", x);
println!("{}", y.0);
// function
println!("-------functions");
fn count_to(num: u32) {
for i in 1..num {
if is_even(i) {
println!("{} is even", i);
} else {
println!("{} is odd", i);
}
}
}
count_to(7);
fn is_even(num: u32) -> bool {
return num % 2 == 0;
}
let number = 12;
println!("is {} even? {}", number, is_even(number));
// reference
println!("-------references");
let mut x = 7;
println!("x is {}", x);
{
let x_ref_mut = &mut x; // mutable reference should enclosed inside a block
*x_ref_mut += 7;
println!("x reference is {}", x_ref_mut);
}
let x_ref = &x;
println!("x is {}", x);
println!("x reference is {}", x_ref);
// structs
println!("-------structs");
struct Color {
red: u8, // u8: 0-255
green: u8,
blue: u8
}
let bg = Color {red: 255, green: 70, blue: 15};
println!("{}, {}, {}", bg.red, bg.green, bg.blue);
struct Color2(u8, u8, u8);
let mut bg2 = Color2(30, 70, 255);
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
bg2.2 = 40;
println!("{}, {}, {}", bg2.0, bg2.1, bg2.2);
// pass by reference
println!("-------pass by reference");
fn print_color(c: Color) {
println!("Color - R:{} G:{} B:{}", c.red, c.green, c.blue);
}
fn print_color2(c: &Color2) {
println!("Color - R:{} G:{} B:{}", c.0, c.1, c.2);
}
print_color(bg);
/* print_color(bg); *impossible */
print_color2(&bg2);
print_color2(&bg2);
print_color2(&bg2); // it is possible to have multile function invocation due to it is called by reference
// arrays
println!("-------arrays");
let sample_array = [1, 3, 5, 7]; // either ways are valid
let sample_array2: [i32; 4] = [6, 8, 15, 20];
println!("{}", sample_array[1]);
for (i, el) in sample_array.iter().enumerate() {
println!("{}-th element is {}", i, el);
}
for i in 0..sample_array2.len() {
println!("{}", sample_array2[i]);
}
let array_of_2 = [2; 10]; // generating an array of 2's with length 10
for el in array_of_2.iter() {
println!("{}", el);
}
// impl
println!("-------impl");
struct Rectangle {
width: u32,
height: u32
}
impl Rectangle {
fn print_description(&self) {
println!("Rectangle: {} x {}", self.width, self.height);
}
fn is_square(&self) -> bool{
return self.width == self.height;
}
fn area(&self) -> u32 {
return self.width * self.height;
}
fn perimeter(&self) -> u32 {
return (self.width + self.height) * 2;
}
}
let rectangle: Rectangle = Rectangle {height: 30, width: 10, };
rectangle.print_description();
println!("The given rectangle is square? {}", rectangle.is_square());
println!("Area is {} and perimeter is {}", rectangle.area(), rectangle.perimeter());
// Strings
println!("-------Strings");
let new_string = "Hello World"; // primitive string
println!("{}", new_string);
let mut my_string = String::from("How is it going today?");
println!("{}", my_string);
println!("{}", my_string.len());
println!("{}", my_string.is_empty());
for token in my_string.split_whitespace() { // there is not in primitive string
println!("{}-", token)
}
println!("Does contain {} 'today' in it? {}", my_string, my_string.contains("today"));
my_string.push_str(new_string);
println!("{}", my_string);
/* println!("{}", my_string.push_str(new_string)) *impossible */
// Traits (like interface)
println!("-------Traits");
struct Person {
name: String,
age: u32,
}
// impl Person {
// fn to_string(&self) -> String {
// return format!("My name is {} and my age is {}", self.name, self.age);
// }
// }
impl ToString for Person { // trait "ToString" is implemented for "Person"
fn to_string(&self) -> String {
return format!("My name is {} and my age is {}", self.name, self.age);
}
}
let hooman: Person = Person {age: 39, name: String::from("Hesamyan Hooman")};
println!("{}", hooman.to_string());
// Custom Traits (like interface)
println!("-------Custom Traits");
trait HasVoiceBox {
// speak
fn speak(&self);
// check if can speak
fn can_speak(&self) -> bool;
}
impl HasVoiceBox for Person {
fn speak(&self) {
println!("Hello, my name is {} ", self.name);
}
fn can_speak(&self) -> bool {
if self.age > 3 {
return true;
} return false;
}
}
println!("I am {} and I can speak? {}", hooman.name, hooman.can_speak());
hooman.speak();
// Match Operator (like Switch)
println!("-------Match Operator");
let number = 11;
match number {
1 => println!("It is one!"), // case 1
2 => println!("it is two!"), // case 2
3 | 4 => println!("it is three or four!"), // case 3 | 4
5..=10 => println!("it is between 5 to 10"), // case 5 to 10
_ => println!("it is out of the range!"), // default
}
// read input from console
println!("-------read input from console");
use std::io;
let mut input = String::new();
println!("Hey mate! Say something:");
match io::stdin().read_line(&mut input) {
Ok(_) => {
println!("Success! You said: {}", input.to_ascii_uppercase());
},
Err(e) => println!("Oops! SOmething went wrong: {}", e)
}
// Hashmap
println!("-------Hashmap");
use std::collections::HashMap;
// define HashMap
let mut marks = HashMap::new();
// add values
marks.insert("Rust Programming", 96);
marks.insert("Lua Programming", 100);
marks.insert("C++ Programming", 90);
marks.insert("Java Programming", 94);
// prompt length of the HashMap
println!("How many subjects are collected there? {}", marks.len());
// find a subject
match marks.get("Rust Programming") {
Some(mark) => println!("You have got {} for that.", mark),
None => println!("You did not study this subject!"),
}
// remove an item
marks.remove("Java Programming");
// loop through HashMap
for (subject, mark) in &marks {
println!("For {} you have got {}.", subject, mark);
}
// check for value
println!("Did you study C#? {} ", marks.contains_key("C# Programming"));
| } | random_line_split |
|
main.py | # -*- coding: UTF-8 -*-
import urllib
import json
import requests
import re
import time, os, shutil, logging
from .GetConfig import config
from .CrackVerifyCode import crack
from .GetPageDetail import page_detail
# 引入字节编码
from urllib.parse import quote
# 引入beautifulsoup
from bs4 import BeautifulSoup
import shutil
from selenium import webdriver
from time import sleep
from selenium.webdriver.chrome.options import Options
from requests.cookies import RequestsCookieJar
from urllib.parse import quote_plus, urlencode
from http import cookiejar
HEADER = config.crawl_headers
# 获取cookie
BASIC_URL = 'https://kns.cnki.net/kns/brief/result.aspx'
# 利用post请求先行注册一次
SEARCH_HANDLE_URL = 'https://kns.cnki.net/kns/request/SearchHandler.ashx'
# 发送get请求获得文献资源
GET_PAGE_URL = 'https://kns.cnki.net/kns/brief/brief.aspx?pagename='
# 下载的基础链接
DOWNLOAD_URL = 'https://kdoc.cnki.net/kdoc/'
# 切换页面基础链接
CHANGE_PAGE_URL = 'https://kns.cnki.net/kns/brief/brief.aspx'
class SearchTools(object):
'''
构建搜索类
实现搜索方法
'''
def __init__(self,count):
self.session = requests.Session()
self.sheet_name = "CRA" + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
self.index = 0
self.cur_page_num = 1
# 保持会话
self.session.get(BASIC_URL, headers=HEADER)
self.count=count
def get_cookies(self):
# self.webdriver_path = "D:\\workspaces\\pythonworks\\webdriver\\chromedriver_win32\\chromedriver.exe"
self.webdriver_path = "D:\\chromedriver.exe"
# self.webdriver_path = "D:\\安装包\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe"
# options = webdriver.ChromeOptions()
chrome_options = Options()
# options1 = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
# options1.add_experimental_option('excludeSwitches', ['enable-logging'])
# driver = webdriver.Chrome(executable_path=self.webdriver_path, chrome_options=chrome_options, options=options1)
# driver = webdriver.PhantomJS(executable_path=self.webdriver_path)
driver = webdriver.Chrome(executable_path=self.webdriver_path, chrome_options=chrome_options)
# driver = webdriver.Chrome(self.webdriver_path)
driver.get("https://www.cnki.net/")
driver.find_element_by_id("txt_SearchText").click()
sleep(2)
driver.find_element_by_id("txt_SearchText").send_keys("机器学习")
sleep(1)
element = driver.find_element_by_class_name("search-btn")
webdriver.ActionChains(driver).move_to_element(element).click(element).perform()
driver.find_element_by_class_name("search-btn").click()
sleep(1)
coo = driver.get_cookies()
cookies = {}
self.ck = str()
# 获取cookie中的name和value,转化成requests可以使用的形式
for cookie in coo:
cookies[cookie['name']] = cookie['value']
self.ck = self.ck + cookie['name'] + '=' + cookie['value'] + ';'
# print(cookie['name'] + '=' + cookie['value'] + ';')
return self.ck
def search_reference(self, ueser_input,args):
'''
第一次发送post请求
再一次发送get请求,这次请求没有写文献等东西
两次请求来获得文献列表
'''
if os.path.isdir('data'):
# 递归删除文件
shutil.rmtree('data')
# 创建一个空的
os.mkdir('data')
'''DbPrefix 为CFLS时 仅下载中文,SCDB 下载中英文(英文无下载链接)'''
static_post_data = {
'action': '',
'NaviCode': '*',
'ua': '1.21',
'isinEn': '1',
'PageName': 'ASP.brief_result_aspx',
'DbPrefix': 'CJFQ',
'DbCatalog': '中国学术期刊网络出版总库',
# 'ConfigFile': 'SCDB.xml',
'ConfigFile': 'CJFQ.xml',
'db_opt': 'CJFQ,CDFD,CMFD,CPFD,IPFD,CCND,CCJD', # 搜索类别(CNKI右侧的)
'his': '0',
'__': time.asctime(time.localtime()) + ' GMT+0800 (中国标准时间)'
}
# 将固定字段与自定义字段组合
post_data = {**static_post_data, **ueser_input}
try:
self.get_cookies()
except Exception as e:
print(e)
print("cookie获取失败")
# 必须有第一次请求,否则会提示服务器没有用户
first_post_res = self.session.post(
SEARCH_HANDLE_URL, data=post_data, headers=HEADER)
# get请求中需要传入第一个检索条件的值
key_value = quote(ueser_input.get('txt_1_value1'))
# print("first_post_res:",first_post_res.text)
# print("key_value:",key_value)
self.get_result_url = GET_PAGE_URL + first_post_res.text + '&t=1544249384932&keyValue=' + key_value + '&S=1&sorttype='
# 检索结果的第一个页面
second_get_res = self.session.get(self.get_result_url,headers=HEADER)
# cookies = second_get_res.cookies
# cookie = requests.utils.dict_from_cookiejar(cookies)
# print(cookie)
# print(second_get_res.text)
# second_get_res = self.session.get(SEARCH_HANDLE_URL, data=post_data,headers=HEADER)
change_page_pattern_compile = re.compile(
r'.*?pagerTitleCell.*?<a href="(.*?)".*')
try:
self.change_page_url = re.search(change_page_pattern_compile,
second_get_res.text).group(1)
print(self.change_page_url)
try:
self.parse_page(
self.pre_parse_page(second_get_res.text), second_get_res.text,args)
except Exception as e:
print(e)
except Exception as e:
print(e)
# pass
# self.parse_page(
# self.pre_parse_page(second_get_res.text), second_get_res.text,args)
def pre_parse_page(self, page_source):
'''
用户选择需要检索的页数
'''
reference_num_pattern_compile = re.compile(r'.*?找到 (.*?) ')
reference_num = re.search(reference_num_pattern_compile,
page_source).group(1)
reference_num_int = int(reference_num.replace(',', ''))
print('检索到' + reference_num + '条结果,全部下载大约需要' +
self.s2h(reference_num_int * 5) + '。')
# is_all_download = input('是否要 | is_all_download = 'n'
# 将所有数量根据每页20计算多少页
if is_all_download == 'y':
page, i = divmod(reference_num_int, 20)
if i != 0:
page += 1
return page
else:
count = self.count
self.select_download_num = int(count)
while True:
if self.select_download_num > reference_num_int:
print('输入数量大于检索结果,请重新输入!')
self.select_download_num = int(input('请输入需要下载的数量(不满一页将下载整页):'))
else:
page, i = divmod(self.select_download_num, 20)
# 不满一页的下载一整页
if i != 0:
page += 1
print("开始下载前%d页所有文件,预计用时%s" % (page, self.s2h(page * 20 * 5)))
print('--------------------------')
return page
def parse_page(self, download_page_left, page_source,args):
'''
保存页面信息
解析每一页的下载地址
'''
# gettype = args['type']
gettype = "pdf"
soup = BeautifulSoup(page_source, 'lxml')
# 定位到内容表区域
tr_table = soup.find(name='table', attrs={'class': 'GridTableContent'})
# 处理验证码
try:
# 去除第一个tr标签(表头)
tr_table.tr.extract()
except Exception as e:
logging.error('出现验证码')
return self.parse_page(
download_page_left,
crack.get_image(self.get_result_url, self.session,
page_source),args)
# 遍历每一行
for index, tr_info in enumerate(tr_table.find_all(name='tr')):
tr_text = ''
download_url = ''
detail_url = ''
# 遍历每一列
for index, td_info in enumerate(tr_info.find_all(name='td')):
# 因为一列中的信息非常杂乱,此处进行二次拼接
td_text = ''
for string in td_info.stripped_strings:
td_text += string
tr_text += td_text + ' '
# with open(
# 'data/ReferenceList.txt', 'a',
# encoding='utf-8') as file:
# file.write(td_text + ' ')
# 寻找下载链接
dl_url = td_info.find('a', attrs={'class': 'briefDl_D'})
# 寻找详情链接
dt_url = td_info.find('a', attrs={'class': 'fz14'})
# 排除不是所需要的列
if dt_url:
detail_url = dt_url.attrs['href']
if dl_url:
# download_url = dl_url.attrs['href']+"&dflag=pdfdown"
# download_url = dl_url.attrs['href']+"&dflag=cajdown"
download_url = dl_url.attrs['href']+"&dflag="+ gettype +"down"
# download_url = dl_url.attrs['href']
try:
# 将每一篇文献的信息分组
single_refence_list = tr_text.split(' ')
if args["flag"] == True:
self.index += 1
self.docid = self.sheet_name + str(self.index).zfill(4)
self.download_refence(download_url, single_refence_list,args)
# 是否开启详情页数据抓取
if config.crawl_isdetail ==1:
time.sleep(config.crawl_stepWaitTime)
if len(self.download_url)>40:
page_detail.get_detail_page(self.session, self.get_result_url,
detail_url, single_refence_list,
self.download_url,self.docid, gettype)
# with open('data/ReferenceList.txt', 'a', encoding='utf-8') as file:
# file.write('\n')
else:
logging.error("无下载链接")
# time.sleep(0.5)
else:
args["CrawProcess"].emit('爬取结束')
print("结束爬取,退出")
break
# exit()
except OSError:
pass
# download_page_left为剩余等待遍历页面
if download_page_left > 1:
self.cur_page_num += 1
self.get_another_page(download_page_left,args)
def get_another_page(self, download_page_left,args):
'''
请求其他页面和请求第一个页面形式不同
重新构造请求
'''
time.sleep(config.crawl_stepWaitTime)
curpage_pattern_compile = re.compile(r'.*?curpage=(\d+).*?')
self.get_result_url = CHANGE_PAGE_URL + re.sub(
curpage_pattern_compile, '?curpage=' + str(self.cur_page_num),
self.change_page_url)
get_res = self.session.get(self.get_result_url, headers=HEADER)
download_page_left -= 1
self.parse_page(download_page_left, get_res.text,args)
def download_refence(self,url, single_refence_list,args):
'''
拼接下载地址
进行文献下载
'''
gettype = "pdf"
# gettype = args['type']
# 拼接下载地址
self.download_url = DOWNLOAD_URL + re.sub(r'../', '', url)
# print("url---------------", self.download_url)
if len(self.download_url) > 40:
args['count']+=1
self.pg="正在下载第%s/%s篇文献"%(args['count'],str(self.select_download_num))
self.info='节点1_正在下载: ' + single_refence_list[1] + '.' + gettype
args["CrawProcess"].emit(str(self.pg+"\n"+self.info))
# print(type(args["CrawProcess"]))
name = single_refence_list[1]
# name = single_refence_list[1] + '_' + single_refence_list[2]
'''检查文件命名,防止网站资源有特殊字符本地无法保存'''
file_pattern_compile = re.compile(r'[\\/:\*\?"<>\|]')
name = re.sub(file_pattern_compile, '', name)
# with open('data/Links.txt', 'a', encoding='utf-8') as file:
# file.write(self.download_url + '\n')
# if config.crawl_isdownload ==1:
if not os.path.isdir('data/PDFs'):
os.mkdir(r'data/PDFs')
# filename = self.docid+name+".pdf"
filename = self.docid+name+"." + gettype
try:
if not os.path.isfile(os.path.join("data/PDFs/", filename)):
sess = requests.Session()
HEADER['Referer'] = self.download_url
# HEADER['Cookie'] = 'LID=WEEvREcwSlJHSldSdmVqelcxVTNETUwxSkpTdzNSelZPMGtUTTR3djg1QT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;'
# HEADER['Cookie'] = 'CurrSortFieldType=desc;CurrSortField=%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2f(%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2c%27TIME%27);c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqelcxVTNETUwwTExCbEZsQXRxTzRsVnpSSVpvTT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/15/2020 15:04:56;cnkiUserKey=80843df4-4597-8109-17a3-f4f7642134c4;Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"fC3r2l"};c_m_expire=2020-09-15 15:04:56;SID_kns8=123112;Ecp_session=1;ASP.NET_SessionId=cdwbc4sppmhjofebxlgpbbp4;SID_kns_new=kns123121;Ecp_ClientId=5200915144402179584;Ecp_notFirstLogin=fC3r2l;LID=WEEvREcwSlJHSldSdmVqelcxVTNETUwwTExCbEZsQXRxTzRsVnpSSVpvTT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;'
# HEADER['Cookie'] = 'c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVRTFY0MHlhNld6cXdxem9kRXpzcz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/15/2020 16:25:29;cnkiUserKey=700c6580-66f0-d89f-414c-c84f72dc52fa;c_m_expire=2020-09-15 16:25:29;SID_kns8=123106;ASP.NET_SessionId=qag4isl11jbdrt0mjunnyvjr;SID_kns_new=kns123117;Ecp_ClientId=1200915160502413634;Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"rptZbY"};Ecp_notFirstLogin=rptZbY;LID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVRTFY0MHlhNld6cXdxem9kRXpzcz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;Ecp_session=1;'
HEADER['Cookie'] = self.ck
# HEADER['Cookie'] = 'Ecp_ClientId=1200824163400713266; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; SID=zhuye006; Ecp_session=1; _pk_ref=%5B%22%22%2C%22%22%2C1600247285%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D1QNB3ozqZFmKQrJunLFuJn3iSEv6k-AZeBA3xHZ-8Wa%26wd%3D%26eqid%3Ded55ec7e00044464000000035f61627d%22%5D; _pk_ses=*; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVUaFVEOGJ4TldxYkF6bEU4anQzZz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/16/2020 17:27:44; LID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVUaFVEOGJ4TldxYkF6bEU4anQzZz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; c_m_expire=2020-09-16 17:27:44; Ecp_notFirstLogin=Gr0r31; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"Gr0r31"}'
refence_file = sess.get(self.download_url, headers=HEADER)
with open('data/PDFs/' + filename, 'wb') as file:
file.write(refence_file.content)
# refence_file = requests.get(self.download_url,headers=HEADER)
# with open('data/CAJs/' + filename , 'wb') as file:
# file.write(refence_file.content)
# print(self.download_url)
# refence_file =sess.get(self.download_url,headers=HEADER)
# htmls = refence_file.text
# soup = BeautifulSoup(htmls, 'lxml')
# print(soup.find_all(('img')))
# if len(soup.find_all('img'))>0:
#
# validCodeSubSrc = soup.find_all('img')[0]['src']
#
# code=crack.get_image2(validCodeSubSrc, self.session)
#
# HEADER['Referer'] = self.download_url
#
# payload = "vcode=" + code
# ret = sess.post(self.download_url, data=payload)
# print(ret)
except Exception as e:
logging.error(e)
logging.error('下载出错')
time.sleep(config.crawl_stepWaitTime)
'''移动文件到指定路径'''
def move_file(self,src_dir, target_dir,args):
args["CrawProcess"].emit("正在移动文件")
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for item in os.listdir(src_dir):
src_name = os.path.join(src_dir, item)
target_name = os.path.join(target_dir, item)
shutil.move(src_name, target_name)
args["CrawProcess"].emit("文件移动完成,爬取完成")
def s2h(self,seconds):
'''
将秒数转为小时数
'''
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return ("%02d小时%02d分钟%02d秒" % (h, m, s))
# Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:08:51; c_m_expire=2020-09-14 10:08:51
# Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:08:51; c_m_expire=2020-09-14 10:08:51
# Ecp_notFirstLogin=5BEo2M; Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; CurrSortField=%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2f(%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2c%27TIME%27); CurrSortFieldType=desc; SID_kcms=124108; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:11:55; c_m_expire=2020-09-14 10:11:55
# https://kns.cnki.net/kcms/download.aspx?filename=w5WUJNFV5pmdrlTbJp3SaNXa09Gbr4GWLZGOLVkcotyYNBDVl9WVyRHTxFnVzRHSuV2LWxkei9mbyhVUwVmdNxUanZ0d1VHZYVUQpJzZYJ1QEdWekx2cwJ3dyFjcxEzQitGWNhnQzoGNptSaj9yaNJ0NDdGMCllU&tablename=CAPJLAST&dflag=cajdown
| 全部下载(y/n)?')
| identifier_name |
main.py | # -*- coding: UTF-8 -*-
import urllib
import json
import requests
import re
import time, os, shutil, logging
from .GetConfig import config
from .CrackVerifyCode import crack
from .GetPageDetail import page_detail
# 引入字节编码
from urllib.parse import quote
# 引入beautifulsoup
from bs4 import BeautifulSoup
import shutil
from selenium import webdriver
from time import sleep
from selenium.webdriver.chrome.options import Options
from requests.cookies import RequestsCookieJar
from urllib.parse import quote_plus, urlencode
from http import cookiejar
HEADER = config.crawl_headers
# 获取cookie
BASIC_URL = 'https://kns.cnki.net/kns/brief/result.aspx'
# 利用post请求先行注册一次
SEARCH_HANDLE_URL = 'https://kns.cnki.net/kns/request/SearchHandler.ashx'
# 发送get请求获得文献资源
GET_PAGE_URL = 'https://kns.cnki.net/kns/brief/brief.aspx?pagename='
# 下载的基础链接
DOWNLOAD_URL = 'https://kdoc.cnki.net/kdoc/'
# 切换页面基础链接
CHANGE_PAGE_URL = 'https://kns.cnki.net/kns/brief/brief.aspx'
class SearchTools(object):
'''
构建搜索类
实现搜索方法
'''
def __init__(self,count):
self.session = requests.Session()
self.sheet_name = "CRA" + time.strftime('%Y%m%d%H%M%S', time.localtime | \\chromedriver.exe"
self.webdriver_path = "D:\\chromedriver.exe"
# self.webdriver_path = "D:\\安装包\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe"
# options = webdriver.ChromeOptions()
chrome_options = Options()
# options1 = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
# options1.add_experimental_option('excludeSwitches', ['enable-logging'])
# driver = webdriver.Chrome(executable_path=self.webdriver_path, chrome_options=chrome_options, options=options1)
# driver = webdriver.PhantomJS(executable_path=self.webdriver_path)
driver = webdriver.Chrome(executable_path=self.webdriver_path, chrome_options=chrome_options)
# driver = webdriver.Chrome(self.webdriver_path)
driver.get("https://www.cnki.net/")
driver.find_element_by_id("txt_SearchText").click()
sleep(2)
driver.find_element_by_id("txt_SearchText").send_keys("机器学习")
sleep(1)
element = driver.find_element_by_class_name("search-btn")
webdriver.ActionChains(driver).move_to_element(element).click(element).perform()
driver.find_element_by_class_name("search-btn").click()
sleep(1)
coo = driver.get_cookies()
cookies = {}
self.ck = str()
# 获取cookie中的name和value,转化成requests可以使用的形式
for cookie in coo:
cookies[cookie['name']] = cookie['value']
self.ck = self.ck + cookie['name'] + '=' + cookie['value'] + ';'
# print(cookie['name'] + '=' + cookie['value'] + ';')
return self.ck
def search_reference(self, ueser_input,args):
'''
第一次发送post请求
再一次发送get请求,这次请求没有写文献等东西
两次请求来获得文献列表
'''
if os.path.isdir('data'):
# 递归删除文件
shutil.rmtree('data')
# 创建一个空的
os.mkdir('data')
'''DbPrefix 为CFLS时 仅下载中文,SCDB 下载中英文(英文无下载链接)'''
static_post_data = {
'action': '',
'NaviCode': '*',
'ua': '1.21',
'isinEn': '1',
'PageName': 'ASP.brief_result_aspx',
'DbPrefix': 'CJFQ',
'DbCatalog': '中国学术期刊网络出版总库',
# 'ConfigFile': 'SCDB.xml',
'ConfigFile': 'CJFQ.xml',
'db_opt': 'CJFQ,CDFD,CMFD,CPFD,IPFD,CCND,CCJD', # 搜索类别(CNKI右侧的)
'his': '0',
'__': time.asctime(time.localtime()) + ' GMT+0800 (中国标准时间)'
}
# 将固定字段与自定义字段组合
post_data = {**static_post_data, **ueser_input}
try:
self.get_cookies()
except Exception as e:
print(e)
print("cookie获取失败")
# 必须有第一次请求,否则会提示服务器没有用户
first_post_res = self.session.post(
SEARCH_HANDLE_URL, data=post_data, headers=HEADER)
# get请求中需要传入第一个检索条件的值
key_value = quote(ueser_input.get('txt_1_value1'))
# print("first_post_res:",first_post_res.text)
# print("key_value:",key_value)
self.get_result_url = GET_PAGE_URL + first_post_res.text + '&t=1544249384932&keyValue=' + key_value + '&S=1&sorttype='
# 检索结果的第一个页面
second_get_res = self.session.get(self.get_result_url,headers=HEADER)
# cookies = second_get_res.cookies
# cookie = requests.utils.dict_from_cookiejar(cookies)
# print(cookie)
# print(second_get_res.text)
# second_get_res = self.session.get(SEARCH_HANDLE_URL, data=post_data,headers=HEADER)
change_page_pattern_compile = re.compile(
r'.*?pagerTitleCell.*?<a href="(.*?)".*')
try:
self.change_page_url = re.search(change_page_pattern_compile,
second_get_res.text).group(1)
print(self.change_page_url)
try:
self.parse_page(
self.pre_parse_page(second_get_res.text), second_get_res.text,args)
except Exception as e:
print(e)
except Exception as e:
print(e)
# pass
# self.parse_page(
# self.pre_parse_page(second_get_res.text), second_get_res.text,args)
def pre_parse_page(self, page_source):
'''
用户选择需要检索的页数
'''
reference_num_pattern_compile = re.compile(r'.*?找到 (.*?) ')
reference_num = re.search(reference_num_pattern_compile,
page_source).group(1)
reference_num_int = int(reference_num.replace(',', ''))
print('检索到' + reference_num + '条结果,全部下载大约需要' +
self.s2h(reference_num_int * 5) + '。')
# is_all_download = input('是否要全部下载(y/n)?')
is_all_download = 'n'
# 将所有数量根据每页20计算多少页
if is_all_download == 'y':
page, i = divmod(reference_num_int, 20)
if i != 0:
page += 1
return page
else:
count = self.count
self.select_download_num = int(count)
while True:
if self.select_download_num > reference_num_int:
print('输入数量大于检索结果,请重新输入!')
self.select_download_num = int(input('请输入需要下载的数量(不满一页将下载整页):'))
else:
page, i = divmod(self.select_download_num, 20)
# 不满一页的下载一整页
if i != 0:
page += 1
print("开始下载前%d页所有文件,预计用时%s" % (page, self.s2h(page * 20 * 5)))
print('--------------------------')
return page
def parse_page(self, download_page_left, page_source,args):
'''
保存页面信息
解析每一页的下载地址
'''
# gettype = args['type']
gettype = "pdf"
soup = BeautifulSoup(page_source, 'lxml')
# 定位到内容表区域
tr_table = soup.find(name='table', attrs={'class': 'GridTableContent'})
# 处理验证码
try:
# 去除第一个tr标签(表头)
tr_table.tr.extract()
except Exception as e:
logging.error('出现验证码')
return self.parse_page(
download_page_left,
crack.get_image(self.get_result_url, self.session,
page_source),args)
# 遍历每一行
for index, tr_info in enumerate(tr_table.find_all(name='tr')):
tr_text = ''
download_url = ''
detail_url = ''
# 遍历每一列
for index, td_info in enumerate(tr_info.find_all(name='td')):
# 因为一列中的信息非常杂乱,此处进行二次拼接
td_text = ''
for string in td_info.stripped_strings:
td_text += string
tr_text += td_text + ' '
# with open(
# 'data/ReferenceList.txt', 'a',
# encoding='utf-8') as file:
# file.write(td_text + ' ')
# 寻找下载链接
dl_url = td_info.find('a', attrs={'class': 'briefDl_D'})
# 寻找详情链接
dt_url = td_info.find('a', attrs={'class': 'fz14'})
# 排除不是所需要的列
if dt_url:
detail_url = dt_url.attrs['href']
if dl_url:
# download_url = dl_url.attrs['href']+"&dflag=pdfdown"
# download_url = dl_url.attrs['href']+"&dflag=cajdown"
download_url = dl_url.attrs['href']+"&dflag="+ gettype +"down"
# download_url = dl_url.attrs['href']
try:
# 将每一篇文献的信息分组
single_refence_list = tr_text.split(' ')
if args["flag"] == True:
self.index += 1
self.docid = self.sheet_name + str(self.index).zfill(4)
self.download_refence(download_url, single_refence_list,args)
# 是否开启详情页数据抓取
if config.crawl_isdetail ==1:
time.sleep(config.crawl_stepWaitTime)
if len(self.download_url)>40:
page_detail.get_detail_page(self.session, self.get_result_url,
detail_url, single_refence_list,
self.download_url,self.docid, gettype)
# with open('data/ReferenceList.txt', 'a', encoding='utf-8') as file:
# file.write('\n')
else:
logging.error("无下载链接")
# time.sleep(0.5)
else:
args["CrawProcess"].emit('爬取结束')
print("结束爬取,退出")
break
# exit()
except OSError:
pass
# download_page_left为剩余等待遍历页面
if download_page_left > 1:
self.cur_page_num += 1
self.get_another_page(download_page_left,args)
def get_another_page(self, download_page_left,args):
'''
请求其他页面和请求第一个页面形式不同
重新构造请求
'''
time.sleep(config.crawl_stepWaitTime)
curpage_pattern_compile = re.compile(r'.*?curpage=(\d+).*?')
self.get_result_url = CHANGE_PAGE_URL + re.sub(
curpage_pattern_compile, '?curpage=' + str(self.cur_page_num),
self.change_page_url)
get_res = self.session.get(self.get_result_url, headers=HEADER)
download_page_left -= 1
self.parse_page(download_page_left, get_res.text,args)
def download_refence(self,url, single_refence_list,args):
'''
拼接下载地址
进行文献下载
'''
gettype = "pdf"
# gettype = args['type']
# 拼接下载地址
self.download_url = DOWNLOAD_URL + re.sub(r'../', '', url)
# print("url---------------", self.download_url)
if len(self.download_url) > 40:
args['count']+=1
self.pg="正在下载第%s/%s篇文献"%(args['count'],str(self.select_download_num))
self.info='节点1_正在下载: ' + single_refence_list[1] + '.' + gettype
args["CrawProcess"].emit(str(self.pg+"\n"+self.info))
# print(type(args["CrawProcess"]))
name = single_refence_list[1]
# name = single_refence_list[1] + '_' + single_refence_list[2]
'''检查文件命名,防止网站资源有特殊字符本地无法保存'''
file_pattern_compile = re.compile(r'[\\/:\*\?"<>\|]')
name = re.sub(file_pattern_compile, '', name)
# with open('data/Links.txt', 'a', encoding='utf-8') as file:
# file.write(self.download_url + '\n')
# if config.crawl_isdownload ==1:
if not os.path.isdir('data/PDFs'):
os.mkdir(r'data/PDFs')
# filename = self.docid+name+".pdf"
filename = self.docid+name+"." + gettype
try:
if not os.path.isfile(os.path.join("data/PDFs/", filename)):
sess = requests.Session()
HEADER['Referer'] = self.download_url
# HEADER['Cookie'] = 'LID=WEEvREcwSlJHSldSdmVqelcxVTNETUwxSkpTdzNSelZPMGtUTTR3djg1QT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;'
# HEADER['Cookie'] = 'CurrSortFieldType=desc;CurrSortField=%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2f(%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2c%27TIME%27);c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqelcxVTNETUwwTExCbEZsQXRxTzRsVnpSSVpvTT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/15/2020 15:04:56;cnkiUserKey=80843df4-4597-8109-17a3-f4f7642134c4;Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"fC3r2l"};c_m_expire=2020-09-15 15:04:56;SID_kns8=123112;Ecp_session=1;ASP.NET_SessionId=cdwbc4sppmhjofebxlgpbbp4;SID_kns_new=kns123121;Ecp_ClientId=5200915144402179584;Ecp_notFirstLogin=fC3r2l;LID=WEEvREcwSlJHSldSdmVqelcxVTNETUwwTExCbEZsQXRxTzRsVnpSSVpvTT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;'
# HEADER['Cookie'] = 'c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVRTFY0MHlhNld6cXdxem9kRXpzcz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/15/2020 16:25:29;cnkiUserKey=700c6580-66f0-d89f-414c-c84f72dc52fa;c_m_expire=2020-09-15 16:25:29;SID_kns8=123106;ASP.NET_SessionId=qag4isl11jbdrt0mjunnyvjr;SID_kns_new=kns123117;Ecp_ClientId=1200915160502413634;Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"rptZbY"};Ecp_notFirstLogin=rptZbY;LID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVRTFY0MHlhNld6cXdxem9kRXpzcz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;Ecp_session=1;'
HEADER['Cookie'] = self.ck
# HEADER['Cookie'] = 'Ecp_ClientId=1200824163400713266; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; SID=zhuye006; Ecp_session=1; _pk_ref=%5B%22%22%2C%22%22%2C1600247285%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D1QNB3ozqZFmKQrJunLFuJn3iSEv6k-AZeBA3xHZ-8Wa%26wd%3D%26eqid%3Ded55ec7e00044464000000035f61627d%22%5D; _pk_ses=*; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVUaFVEOGJ4TldxYkF6bEU4anQzZz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/16/2020 17:27:44; LID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVUaFVEOGJ4TldxYkF6bEU4anQzZz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; c_m_expire=2020-09-16 17:27:44; Ecp_notFirstLogin=Gr0r31; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"Gr0r31"}'
refence_file = sess.get(self.download_url, headers=HEADER)
with open('data/PDFs/' + filename, 'wb') as file:
file.write(refence_file.content)
# refence_file = requests.get(self.download_url,headers=HEADER)
# with open('data/CAJs/' + filename , 'wb') as file:
# file.write(refence_file.content)
# print(self.download_url)
# refence_file =sess.get(self.download_url,headers=HEADER)
# htmls = refence_file.text
# soup = BeautifulSoup(htmls, 'lxml')
# print(soup.find_all(('img')))
# if len(soup.find_all('img'))>0:
#
# validCodeSubSrc = soup.find_all('img')[0]['src']
#
# code=crack.get_image2(validCodeSubSrc, self.session)
#
# HEADER['Referer'] = self.download_url
#
# payload = "vcode=" + code
# ret = sess.post(self.download_url, data=payload)
# print(ret)
except Exception as e:
logging.error(e)
logging.error('下载出错')
time.sleep(config.crawl_stepWaitTime)
'''移动文件到指定路径'''
def move_file(self,src_dir, target_dir,args):
args["CrawProcess"].emit("正在移动文件")
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for item in os.listdir(src_dir):
src_name = os.path.join(src_dir, item)
target_name = os.path.join(target_dir, item)
shutil.move(src_name, target_name)
args["CrawProcess"].emit("文件移动完成,爬取完成")
def s2h(self,seconds):
'''
将秒数转为小时数
'''
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return ("%02d小时%02d分钟%02d秒" % (h, m, s))
# Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:08:51; c_m_expire=2020-09-14 10:08:51
# Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:08:51; c_m_expire=2020-09-14 10:08:51
# Ecp_notFirstLogin=5BEo2M; Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; CurrSortField=%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2f(%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2c%27TIME%27); CurrSortFieldType=desc; SID_kcms=124108; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:11:55; c_m_expire=2020-09-14 10:11:55
# https://kns.cnki.net/kcms/download.aspx?filename=w5WUJNFV5pmdrlTbJp3SaNXa09Gbr4GWLZGOLVkcotyYNBDVl9WVyRHTxFnVzRHSuV2LWxkei9mbyhVUwVmdNxUanZ0d1VHZYVUQpJzZYJ1QEdWekx2cwJ3dyFjcxEzQitGWNhnQzoGNptSaj9yaNJ0NDdGMCllU&tablename=CAPJLAST&dflag=cajdown
| (time.time()))
self.index = 0
self.cur_page_num = 1
# 保持会话
self.session.get(BASIC_URL, headers=HEADER)
self.count=count
def get_cookies(self):
# self.webdriver_path = "D:\\workspaces\\pythonworks\\webdriver\\chromedriver_win32 | identifier_body |
main.py | # -*- coding: UTF-8 -*-
import urllib
import json
import requests
import re
import time, os, shutil, logging
from .GetConfig import config
from .CrackVerifyCode import crack
from .GetPageDetail import page_detail
# 引入字节编码
from urllib.parse import quote
# 引入beautifulsoup
from bs4 import BeautifulSoup
import shutil
from selenium import webdriver
from time import sleep
from selenium.webdriver.chrome.options import Options
from requests.cookies import RequestsCookieJar
from urllib.parse import quote_plus, urlencode
from http import cookiejar
HEADER = config.crawl_headers
# 获取cookie
BASIC_URL = 'https://kns.cnki.net/kns/brief/result.aspx'
# 利用post请求先行注册一次
SEARCH_HANDLE_URL = 'https://kns.cnki.net/kns/request/SearchHandler.ashx'
# 发送get请求获得文献资源
GET_PAGE_URL = 'https://kns.cnki.net/kns/brief/brief.aspx?pagename='
# 下载的基础链接
DOWNLOAD_URL = 'https://kdoc.cnki.net/kdoc/'
# 切换页面基础链接
CHANGE_PAGE_URL = 'https://kns.cnki.net/kns/brief/brief.aspx'
class SearchTools(object):
'''
构建搜索类
实现搜索方法
'''
def __init__(self,count):
self.session = requests.Session()
self.sheet_name = "CRA" + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
self.index = 0
self.cur_page_num = 1
# 保持会话
self.session.get(BASIC_URL, headers=HEADER)
self.count=count
def get_cookies(self):
# self.webdriver_path = "D:\\workspaces\\pythonworks\\webdriver\\chromedriver_win32\\chromedriver.exe"
self.webdriver_path = "D:\\chromedriver.exe"
# self.webdriver_path = "D:\\安装包\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe"
# options = webdriver.ChromeOptions()
chrome_options = Options()
# options1 = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
# options1.add_experimental_option('excludeSwitches', ['enable-logging'])
# driver = webdriver.Chrome(executable_path=self.webdriver_path, chrome_options=chrome_options, options=options1)
# driver = webdriver.PhantomJS(executable_path=self.webdriver_path)
driver = webdriver.Chrome(executable_path=self.webdriver_path, chrome_options=chrome_options)
# driver = webdriver.Chrome(self.webdriver_path)
driver.get("https://www.cnki.net/")
driver.find_element_by_id("txt_SearchText").click()
sleep(2)
driver.find_element_by_id("txt_SearchText").send_keys("机器学习")
sleep(1)
element = driver.find_element_by_class_name("search-btn")
webdriver.ActionChains(driver).move_to_element(element).click(element).perform()
driver.find_element_by_class_name("search-btn").click()
sleep(1)
coo = driver.get_cookies()
cookies = {}
self.ck = str()
# 获取cookie中的name和value,转化成requests可以使用的形式
for cookie in coo: | self.ck = self.ck + cookie['name'] + '=' + cookie['value'] + ';'
# print(cookie['name'] + '=' + cookie['value'] + ';')
return self.ck
def search_reference(self, ueser_input,args):
'''
第一次发送post请求
再一次发送get请求,这次请求没有写文献等东西
两次请求来获得文献列表
'''
if os.path.isdir('data'):
# 递归删除文件
shutil.rmtree('data')
# 创建一个空的
os.mkdir('data')
'''DbPrefix 为CFLS时 仅下载中文,SCDB 下载中英文(英文无下载链接)'''
static_post_data = {
'action': '',
'NaviCode': '*',
'ua': '1.21',
'isinEn': '1',
'PageName': 'ASP.brief_result_aspx',
'DbPrefix': 'CJFQ',
'DbCatalog': '中国学术期刊网络出版总库',
# 'ConfigFile': 'SCDB.xml',
'ConfigFile': 'CJFQ.xml',
'db_opt': 'CJFQ,CDFD,CMFD,CPFD,IPFD,CCND,CCJD', # 搜索类别(CNKI右侧的)
'his': '0',
'__': time.asctime(time.localtime()) + ' GMT+0800 (中国标准时间)'
}
# 将固定字段与自定义字段组合
post_data = {**static_post_data, **ueser_input}
try:
self.get_cookies()
except Exception as e:
print(e)
print("cookie获取失败")
# 必须有第一次请求,否则会提示服务器没有用户
first_post_res = self.session.post(
SEARCH_HANDLE_URL, data=post_data, headers=HEADER)
# get请求中需要传入第一个检索条件的值
key_value = quote(ueser_input.get('txt_1_value1'))
# print("first_post_res:",first_post_res.text)
# print("key_value:",key_value)
self.get_result_url = GET_PAGE_URL + first_post_res.text + '&t=1544249384932&keyValue=' + key_value + '&S=1&sorttype='
# 检索结果的第一个页面
second_get_res = self.session.get(self.get_result_url,headers=HEADER)
# cookies = second_get_res.cookies
# cookie = requests.utils.dict_from_cookiejar(cookies)
# print(cookie)
# print(second_get_res.text)
# second_get_res = self.session.get(SEARCH_HANDLE_URL, data=post_data,headers=HEADER)
change_page_pattern_compile = re.compile(
r'.*?pagerTitleCell.*?<a href="(.*?)".*')
try:
self.change_page_url = re.search(change_page_pattern_compile,
second_get_res.text).group(1)
print(self.change_page_url)
try:
self.parse_page(
self.pre_parse_page(second_get_res.text), second_get_res.text,args)
except Exception as e:
print(e)
except Exception as e:
print(e)
# pass
# self.parse_page(
# self.pre_parse_page(second_get_res.text), second_get_res.text,args)
def pre_parse_page(self, page_source):
'''
用户选择需要检索的页数
'''
reference_num_pattern_compile = re.compile(r'.*?找到 (.*?) ')
reference_num = re.search(reference_num_pattern_compile,
page_source).group(1)
reference_num_int = int(reference_num.replace(',', ''))
print('检索到' + reference_num + '条结果,全部下载大约需要' +
self.s2h(reference_num_int * 5) + '。')
# is_all_download = input('是否要全部下载(y/n)?')
is_all_download = 'n'
# 将所有数量根据每页20计算多少页
if is_all_download == 'y':
page, i = divmod(reference_num_int, 20)
if i != 0:
page += 1
return page
else:
count = self.count
self.select_download_num = int(count)
while True:
if self.select_download_num > reference_num_int:
print('输入数量大于检索结果,请重新输入!')
self.select_download_num = int(input('请输入需要下载的数量(不满一页将下载整页):'))
else:
page, i = divmod(self.select_download_num, 20)
# 不满一页的下载一整页
if i != 0:
page += 1
print("开始下载前%d页所有文件,预计用时%s" % (page, self.s2h(page * 20 * 5)))
print('--------------------------')
return page
def parse_page(self, download_page_left, page_source,args):
'''
保存页面信息
解析每一页的下载地址
'''
# gettype = args['type']
gettype = "pdf"
soup = BeautifulSoup(page_source, 'lxml')
# 定位到内容表区域
tr_table = soup.find(name='table', attrs={'class': 'GridTableContent'})
# 处理验证码
try:
# 去除第一个tr标签(表头)
tr_table.tr.extract()
except Exception as e:
logging.error('出现验证码')
return self.parse_page(
download_page_left,
crack.get_image(self.get_result_url, self.session,
page_source),args)
# 遍历每一行
for index, tr_info in enumerate(tr_table.find_all(name='tr')):
tr_text = ''
download_url = ''
detail_url = ''
# 遍历每一列
for index, td_info in enumerate(tr_info.find_all(name='td')):
# 因为一列中的信息非常杂乱,此处进行二次拼接
td_text = ''
for string in td_info.stripped_strings:
td_text += string
tr_text += td_text + ' '
# with open(
# 'data/ReferenceList.txt', 'a',
# encoding='utf-8') as file:
# file.write(td_text + ' ')
# 寻找下载链接
dl_url = td_info.find('a', attrs={'class': 'briefDl_D'})
# 寻找详情链接
dt_url = td_info.find('a', attrs={'class': 'fz14'})
# 排除不是所需要的列
if dt_url:
detail_url = dt_url.attrs['href']
if dl_url:
# download_url = dl_url.attrs['href']+"&dflag=pdfdown"
# download_url = dl_url.attrs['href']+"&dflag=cajdown"
download_url = dl_url.attrs['href']+"&dflag="+ gettype +"down"
# download_url = dl_url.attrs['href']
try:
# 将每一篇文献的信息分组
single_refence_list = tr_text.split(' ')
if args["flag"] == True:
self.index += 1
self.docid = self.sheet_name + str(self.index).zfill(4)
self.download_refence(download_url, single_refence_list,args)
# 是否开启详情页数据抓取
if config.crawl_isdetail ==1:
time.sleep(config.crawl_stepWaitTime)
if len(self.download_url)>40:
page_detail.get_detail_page(self.session, self.get_result_url,
detail_url, single_refence_list,
self.download_url,self.docid, gettype)
# with open('data/ReferenceList.txt', 'a', encoding='utf-8') as file:
# file.write('\n')
else:
logging.error("无下载链接")
# time.sleep(0.5)
else:
args["CrawProcess"].emit('爬取结束')
print("结束爬取,退出")
break
# exit()
except OSError:
pass
# download_page_left为剩余等待遍历页面
if download_page_left > 1:
self.cur_page_num += 1
self.get_another_page(download_page_left,args)
def get_another_page(self, download_page_left,args):
'''
请求其他页面和请求第一个页面形式不同
重新构造请求
'''
time.sleep(config.crawl_stepWaitTime)
curpage_pattern_compile = re.compile(r'.*?curpage=(\d+).*?')
self.get_result_url = CHANGE_PAGE_URL + re.sub(
curpage_pattern_compile, '?curpage=' + str(self.cur_page_num),
self.change_page_url)
get_res = self.session.get(self.get_result_url, headers=HEADER)
download_page_left -= 1
self.parse_page(download_page_left, get_res.text,args)
def download_refence(self,url, single_refence_list,args):
'''
拼接下载地址
进行文献下载
'''
gettype = "pdf"
# gettype = args['type']
# 拼接下载地址
self.download_url = DOWNLOAD_URL + re.sub(r'../', '', url)
# print("url---------------", self.download_url)
if len(self.download_url) > 40:
args['count']+=1
self.pg="正在下载第%s/%s篇文献"%(args['count'],str(self.select_download_num))
self.info='节点1_正在下载: ' + single_refence_list[1] + '.' + gettype
args["CrawProcess"].emit(str(self.pg+"\n"+self.info))
# print(type(args["CrawProcess"]))
name = single_refence_list[1]
# name = single_refence_list[1] + '_' + single_refence_list[2]
'''检查文件命名,防止网站资源有特殊字符本地无法保存'''
file_pattern_compile = re.compile(r'[\\/:\*\?"<>\|]')
name = re.sub(file_pattern_compile, '', name)
# with open('data/Links.txt', 'a', encoding='utf-8') as file:
# file.write(self.download_url + '\n')
# if config.crawl_isdownload ==1:
if not os.path.isdir('data/PDFs'):
os.mkdir(r'data/PDFs')
# filename = self.docid+name+".pdf"
filename = self.docid+name+"." + gettype
try:
if not os.path.isfile(os.path.join("data/PDFs/", filename)):
sess = requests.Session()
HEADER['Referer'] = self.download_url
# HEADER['Cookie'] = 'LID=WEEvREcwSlJHSldSdmVqelcxVTNETUwxSkpTdzNSelZPMGtUTTR3djg1QT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;'
# HEADER['Cookie'] = 'CurrSortFieldType=desc;CurrSortField=%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2f(%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2c%27TIME%27);c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqelcxVTNETUwwTExCbEZsQXRxTzRsVnpSSVpvTT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/15/2020 15:04:56;cnkiUserKey=80843df4-4597-8109-17a3-f4f7642134c4;Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"fC3r2l"};c_m_expire=2020-09-15 15:04:56;SID_kns8=123112;Ecp_session=1;ASP.NET_SessionId=cdwbc4sppmhjofebxlgpbbp4;SID_kns_new=kns123121;Ecp_ClientId=5200915144402179584;Ecp_notFirstLogin=fC3r2l;LID=WEEvREcwSlJHSldSdmVqelcxVTNETUwwTExCbEZsQXRxTzRsVnpSSVpvTT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;'
# HEADER['Cookie'] = 'c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVRTFY0MHlhNld6cXdxem9kRXpzcz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/15/2020 16:25:29;cnkiUserKey=700c6580-66f0-d89f-414c-c84f72dc52fa;c_m_expire=2020-09-15 16:25:29;SID_kns8=123106;ASP.NET_SessionId=qag4isl11jbdrt0mjunnyvjr;SID_kns_new=kns123117;Ecp_ClientId=1200915160502413634;Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"rptZbY"};Ecp_notFirstLogin=rptZbY;LID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVRTFY0MHlhNld6cXdxem9kRXpzcz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;Ecp_session=1;'
HEADER['Cookie'] = self.ck
# HEADER['Cookie'] = 'Ecp_ClientId=1200824163400713266; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; SID=zhuye006; Ecp_session=1; _pk_ref=%5B%22%22%2C%22%22%2C1600247285%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D1QNB3ozqZFmKQrJunLFuJn3iSEv6k-AZeBA3xHZ-8Wa%26wd%3D%26eqid%3Ded55ec7e00044464000000035f61627d%22%5D; _pk_ses=*; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVUaFVEOGJ4TldxYkF6bEU4anQzZz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/16/2020 17:27:44; LID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVUaFVEOGJ4TldxYkF6bEU4anQzZz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; c_m_expire=2020-09-16 17:27:44; Ecp_notFirstLogin=Gr0r31; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"Gr0r31"}'
refence_file = sess.get(self.download_url, headers=HEADER)
with open('data/PDFs/' + filename, 'wb') as file:
file.write(refence_file.content)
# refence_file = requests.get(self.download_url,headers=HEADER)
# with open('data/CAJs/' + filename , 'wb') as file:
# file.write(refence_file.content)
# print(self.download_url)
# refence_file =sess.get(self.download_url,headers=HEADER)
# htmls = refence_file.text
# soup = BeautifulSoup(htmls, 'lxml')
# print(soup.find_all(('img')))
# if len(soup.find_all('img'))>0:
#
# validCodeSubSrc = soup.find_all('img')[0]['src']
#
# code=crack.get_image2(validCodeSubSrc, self.session)
#
# HEADER['Referer'] = self.download_url
#
# payload = "vcode=" + code
# ret = sess.post(self.download_url, data=payload)
# print(ret)
except Exception as e:
logging.error(e)
logging.error('下载出错')
time.sleep(config.crawl_stepWaitTime)
'''移动文件到指定路径'''
def move_file(self,src_dir, target_dir,args):
args["CrawProcess"].emit("正在移动文件")
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for item in os.listdir(src_dir):
src_name = os.path.join(src_dir, item)
target_name = os.path.join(target_dir, item)
shutil.move(src_name, target_name)
args["CrawProcess"].emit("文件移动完成,爬取完成")
def s2h(self,seconds):
'''
将秒数转为小时数
'''
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return ("%02d小时%02d分钟%02d秒" % (h, m, s))
# Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:08:51; c_m_expire=2020-09-14 10:08:51
# Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:08:51; c_m_expire=2020-09-14 10:08:51
# Ecp_notFirstLogin=5BEo2M; Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; CurrSortField=%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2f(%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2c%27TIME%27); CurrSortFieldType=desc; SID_kcms=124108; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:11:55; c_m_expire=2020-09-14 10:11:55
# https://kns.cnki.net/kcms/download.aspx?filename=w5WUJNFV5pmdrlTbJp3SaNXa09Gbr4GWLZGOLVkcotyYNBDVl9WVyRHTxFnVzRHSuV2LWxkei9mbyhVUwVmdNxUanZ0d1VHZYVUQpJzZYJ1QEdWekx2cwJ3dyFjcxEzQitGWNhnQzoGNptSaj9yaNJ0NDdGMCllU&tablename=CAPJLAST&dflag=cajdown | cookies[cookie['name']] = cookie['value'] | random_line_split |
main.py | # -*- coding: UTF-8 -*-
import urllib
import json
import requests
import re
import time, os, shutil, logging
from .GetConfig import config
from .CrackVerifyCode import crack
from .GetPageDetail import page_detail
# 引入字节编码
from urllib.parse import quote
# 引入beautifulsoup
from bs4 import BeautifulSoup
import shutil
from selenium import webdriver
from time import sleep
from selenium.webdriver.chrome.options import Options
from requests.cookies import RequestsCookieJar
from urllib.parse import quote_plus, urlencode
from http import cookiejar
HEADER = config.crawl_headers
# 获取cookie
BASIC_URL = 'https://kns.cnki.net/kns/brief/result.aspx'
# 利用post请求先行注册一次
SEARCH_HANDLE_URL = 'https://kns.cnki.net/kns/request/SearchHandler.ashx'
# 发送get请求获得文献资源
GET_PAGE_URL = 'https://kns.cnki.net/kns/brief/brief.aspx?pagename='
# 下载的基础链接
DOWNLOAD_URL = 'https://kdoc.cnki.net/kdoc/'
# 切换页面基础链接
CHANGE_PAGE_URL = 'https://kns.cnki.net/kns/brief/brief.aspx'
class SearchTools(object):
'''
构建搜索类
实现搜索方法
'''
def __init__(self,count):
self.session = requests.Session()
self.sheet_name = "CRA" + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
self.index = 0
self.cur_page_num = 1
# 保持会话
self.session.get(BASIC_URL, headers=HEADER)
self.count=count
def get_cookies(self):
# self.webdriver_path = "D:\\workspaces\\pythonworks\\webdriver\\chromedriver_win32\\chromedriver.exe"
self.webdriver_path = "D:\\chromedriver.exe"
# self.webdriver_path = "D:\\安装包\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe"
# options = webdriver.ChromeOptions()
chrome_options = Options()
# options1 = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
# options1.add_experimental_option('excludeSwitches', ['enable-logging'])
# driver = webdriver.Chrome(executable_path=self.webdriver_path, chrome_options=chrome_options, options=options1)
# driver = webdriver.PhantomJS(executable_path=self.webdriver_path)
driver = webdriver.Chrome(executable_path=self.webdriver_path, chrome_options=chrome_options)
# driver = webdriver.Chrome(self.webdriver_path)
driver.get("https://www.cnki.net/")
driver.find_element_by_id("txt_SearchText").click()
sleep(2)
driver.find_element_by_id("txt_SearchText").send_keys("机器学习")
sleep(1)
element = driver.find_element_by_class_name("search-btn")
webdriver.ActionChains(driver).move_to_element(element).click(element).perform()
driver.find_element_by_class_name("search-btn").click()
sleep(1)
coo = driver.get_cookies()
cookies = {}
self.ck = str()
# 获取cookie中的name和value,转化成requests可以使用的形式
for cookie in coo:
cookies[cookie['name']] = cookie['value']
self.ck = self.ck + cookie['name'] + '=' + cookie['value'] + ';'
# print(cookie['name'] + '=' + cookie['value'] + ';')
return self.ck
def search_reference(self, ueser_input,args):
'''
第一次发送post请求
再一次发送get请求,这次请求没有写文献等东西
两次请求来获得文献列表
'''
if os.path.isdir('data'):
# 递归删除文件
shutil.rmtree('data')
# 创建一个空的
os.mkdir('data')
'''DbPrefix 为CFLS时 仅下载中文,SCDB 下载中英文(英文无下载链接)'''
static_post_data = {
'action': '',
'NaviCode': '*',
'ua': '1.21',
'isinEn': '1',
'PageName': 'ASP.brief_result_aspx',
'DbPrefix': 'CJFQ',
'DbCatalog': '中国学术期刊网络出版总库',
# 'ConfigFile': 'SCDB.xml',
'ConfigFile': 'CJFQ.xml',
'db_opt': 'CJFQ,CDFD,CMFD,CPFD,IPFD,CCND,CCJD', # 搜索类别(CNKI右侧的)
'his': '0',
'__': time.asctime(time.localtime()) + ' GMT+0800 (中国标准时间)'
}
# 将固定字段与自定义字段组合
post_data = {**static_post_data, **ueser_input}
try:
self.get_cookies()
except Exception as e:
print(e)
print("cookie获取失败")
# 必须有第一次请求,否则会提示服务器没有用户
first_post_res = self.session.post(
SEARCH_HANDLE_URL, data=post_data, headers=HEADER)
# get请求中需要传入第一个检索条件的值
key_value = quote(ueser_input.get('txt_1_value1'))
# print("first_post_res:",first_post_res.text)
# print("key_value:",key_value)
self.get_result_url = GET_PAGE_URL + first_post_res.text + '&t=1544249384932&keyValue=' + key_value + '&S=1&sorttype='
# 检索结果的第一个页面
second_get_res = self.session.get(self.get_result_url,headers=HEADER)
# cookies = second_get_res.cookies
# cookie = requests.utils.dict_from_cookiejar(cookies)
# print(cookie)
# print(second_get_res.text)
# second_get_res = self.session.get(SEARCH_HANDLE_URL, data=post_data,headers=HEADER)
change_page_pattern_compile = re.compile(
r'.*?pagerTitleCell.*?<a href="(.*?)".*')
try:
self.change_page_url = re.search(change_page_pattern_compile,
second_get_res.text).group(1)
print(self.change_page_url)
try:
self.parse_page(
self.pre_parse_page(second_get_res.text), second_get_res.text,args)
except Exception as e:
print(e)
except Exception as e:
print(e)
# pass
# self.parse_page(
# self.pre_parse_page(second_get_res.text), second_get_res.text,args)
def pre_parse_page(self, page_source):
'''
用户选择需要检索的页数
'''
reference_num_pattern_compile = re.compile(r'.*?找到 (.*?) ')
reference_num = re.search(reference_num_pattern_compile,
page_source).group(1)
reference_num_int = int(reference_num.replace(',', ''))
print('检索到' + reference_num + '条结果,全部下载大约需要' +
self.s2h(reference_num_int * 5) + '。')
# is_all_download = input('是否要全部下载(y/n)?')
is_all_download = 'n'
# 将所有数量根据每页20计算多少页
if is_all_download == 'y':
page, i = divmod(reference_num_int, 20)
if i != 0:
page += 1
return page
else:
count = self.count
self.select_download_num = int(count)
while True:
if self.select_download_num > reference_num_int:
print('输入数量大于检索结果,请重新输入!')
self.select_download_num = int(input('请输入需要下载的数量(不满一页将下载整页):'))
else:
page, i = divmod(self.select_download_num, 20)
# 不满一页的下载一整页
if i != 0:
page += 1
print("开始下载前%d页所有文件,预计用时%s" % (page, self.s2h(page * 20 * 5)))
print('--------------------------')
return page
def parse_page(self, download_page_left, page_source,args):
'''
保存页面信息
解析每一页的下载地址
'''
# gettype = args['type']
gettype = "pdf"
soup = BeautifulSoup(page_source, 'lxml')
# 定位到内容表区域
tr_table = soup.find(name='table', attrs={'class': 'GridTableContent'})
# 处理验证码
try:
# 去除第一个tr标签(表头)
tr_table.tr.extract()
except Exception as e:
logging.error('出现验证码')
return self.parse_page(
download_page_left,
crack.get_image(self.get_result_url, self.session,
page_source),args)
# 遍历每一行
for index, tr_info in enumerate(tr_table.find_all(name='tr')):
tr_text = ''
download_url = ''
detail_url = ''
# 遍历每一列
for index, td_info in enumerate(tr_info.find_all(name='td')):
# 因为一列中的信息非常杂乱,此处进行二次拼接
td_text = ''
for string in td_info.stripped_strings:
td_text += string
tr_text += td_text + ' '
# with open(
# 'data/ReferenceList.txt', 'a',
# encoding='utf-8') as file:
# file.write(td_text + ' ')
# 寻找下载链接
dl_url = td_info.find('a', attrs={'class': 'briefDl_D'})
# 寻找详情链接
dt_url = td_info.find('a', attrs={'class': 'fz14'})
# 排除不是所需要的列
if dt_url:
detail_url = dt_url.attrs['href']
if dl_url:
# download_url = dl_url.attrs['href']+"&dflag=pdfdown"
# download_url = dl_url.attrs['href']+"&dflag=cajdown"
download_url = dl_url.attrs['href']+"&dflag="+ gettype +"down"
# download_url = dl_url.attrs['href']
try:
# 将每一篇文献的信息分组
single_refence_list = tr_text.split(' ')
if args["flag"] == True:
self.index += 1
self.docid = self.sheet_name + str(self.index).zfill(4)
self.download_refence(download_url, single_refence_list,args)
# 是否开启详情页数据抓取
if config.crawl_isdetail ==1:
time.sleep(config.crawl_stepWaitTime)
if len(self.download_url)>40:
page_detail.get_detail_page(self.session, self.get_result_url,
detail_url, single_refence_list,
self.download_url,self.docid, gettype)
# with open('data/ReferenceList.txt', 'a', encoding='utf-8') as file:
# file.write('\n')
else:
logging.error("无下载链接")
# time.sleep(0.5)
else:
args["CrawProcess"].emit('爬取结束')
print("结束爬取,退出")
break
# exit()
except OSError:
pass
# download_page_left为剩余等待遍历页面
if download_page_left > 1:
self.cur_page_num += 1
self.get_another_page(download_page_left,args)
def get_another_page(self, download_page_left,args):
'''
请求其他页面和请求第一个页面形式不同
重新构造请求
'''
time.sleep(config.crawl_stepWaitTime)
curpage_pattern_compile = re.compile(r'.*?curpage=(\d+).*?')
self.get_result_url = CHANGE_PAGE_URL + re.sub(
curpage_pattern_compile, '?curpage=' + str(self.cur_page_num),
self.change_page_url)
get_res = self.session.get(self.get_result_url, headers=HEADER)
download_page_left -= 1
self.parse_page(download_page_left, get_res.text,args)
def download_refence(self,url, single_refence_list,args):
'''
拼接下载地址
进行文献下载
'''
gettype = "pdf"
# gettype = args['type']
# 拼接下载地址
self.download_url = DOWNLOAD_URL + re.sub(r'../', '', url)
# print("url---------------", self.download_url)
if len(self.download_url) > 40:
args['count']+=1
self.pg="正在下载第%s/%s篇文献"%(args['count'],str(self.select_download_num))
self.info='节点1_正在下载: ' + single_refence_list[1] + '.' + gettype
args["CrawProcess"].emit(str(self.pg+"\n"+self.info))
# print(type(args["CrawProcess"]))
name = single_refence_list[1]
# name = single_refence_list[1] + '_' + single_refence_list[2]
'''检查文件命名,防止网站资源有特殊字符本地无法保存'''
file_pattern_compile = re.compile(r'[\\/:\*\?"<>\|]')
name = re.sub(file_pattern_compile, '', name)
# with open('data/Links.txt', 'a', encoding='utf-8') as file:
# file.write(self.download_url + '\n')
# if config.crawl_isdownload ==1:
if not os.path.isdir('data/PDFs'):
os.mkdir(r'data/PDFs')
# filename = self.docid+name+".pdf"
filename = self.docid+name+"." + gettype
try:
if not os.path.isfile(os.path.join("data/PDFs/", filename)):
sess = requests.Session()
HEADER['Referer'] = self.download_url
# HEADER['Cookie'] = 'LID=WEEvREcwSlJHSldSdmVqelcxVTNETUwxSkpTdzNSelZPMGtUTTR3djg1QT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;'
# HEADER['Cookie'] = 'CurrSortFieldType=desc;CurrSortField=%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2f(%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2c%27TIME%27);c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqelcxVTNETUwwTExCbEZsQXRxTzRsVnpSSVpvTT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/15/2020 15:04:56;cnkiUserKey=80843df4-4597-8109-17a3-f4f7642134c4;Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"fC3r2l"};c_m_expire=2020-09-15 15:04:56;SID_kns8=123112;Ecp_session=1;ASP.NET_SessionId=cdwbc4sppmhjofebxlgpbbp4;SID_kns_new=kns123121;Ecp_ClientId=5200915144402179584;Ecp_notFirstLogin=fC3r2l;LID=WEEvREcwSlJHSldSdmVqelcxVTNETUwwTExCbEZsQXRxTzRsVnpSSVpvTT0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;'
# HEADER['Cookie'] = 'c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVRTFY0MHlhNld6cXdxem9kRXpzcz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/15/2020 16:25:29;cnkiUserKey=700c6580-66f0-d89f-414c-c84f72dc52fa;c_m_expire=2020-09-15 16:25:29;SID_kns8=123106;ASP.NET_SessionId=qag4isl11jbdrt0mjunnyvjr;SID_kns_new=kns123117;Ecp_ClientId=1200915160502413634;Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"rptZbY"};Ecp_notFirstLogin=rptZbY;LID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVRTFY0MHlhNld6cXdxem9kRXpzcz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!;Ecp_session=1;'
HEADER['Cookie'] = self.ck
# HEADER['Cookie'] = 'Ecp_ClientId=1200824163400713266; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; SID=zhuye006; Ecp_session=1; _pk_ref=%5B%22%22%2C%22%22%2C1600247285%2C%22https%3A%2F%2Fwww.baidu.com%2Flink%3Furl%3D1QNB3ozqZFmKQrJunLFuJn3iSEv6k-AZeBA3xHZ-8Wa%26wd%3D%26eqid%3Ded55ec7e00044464000000035f61627d%22%5D; _pk_ses=*; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVUaFVEOGJ4TldxYkF6bEU4anQzZz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/16/2020 17:27:44; LID=WEEvREcwSlJHSldSdmVqM1BLUWdMWjVUaFVEOGJ4TldxYkF6bEU4anQzZz0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; c_m_expire=2020-09-16 17:27:44; Ecp_notFirstLogin=Gr0r31; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"Gr0r31"}'
refence_file = sess.get(self.download_url, headers=HEADER)
with open('data/PDFs/' + filename, 'wb') as file:
file.write(refence_file.content)
# refence_file = requests.get(self.download_url,headers=HEADER)
# with open('data/CAJs/' + filename , 'wb') as file:
# file.write(refence_file.content)
# print(self.download_url)
# refence_file =sess.get(self.download_url,headers=HEADER)
# htmls = refence_file.text
# soup = BeautifulSoup(htmls, 'lxml')
# print(soup.find_all(('img')))
# if len(soup.find_all('img'))>0:
#
# validCodeSubSrc = soup.find_all('img')[0]['src']
#
# code=crack.get_image2(validCodeSubSrc, self.session)
#
# HEADER['Referer'] = self.download_url
#
# payload = "vcode=" + code
# ret = sess.post(self.download_url, data=payload)
# print(ret)
except Exception as e:
logging.error(e)
logging.error('下载出错')
time.sleep(config.crawl_stepWaitTime)
'''移动文件到指定路径'''
def move_file(self,src_dir, target_dir,args):
args["CrawProcess"].emit("正在移动文件")
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for item in os.listdir(src_dir):
src_name = os.path.join(src_dir, item)
target_name = os.path.join(target_dir, item)
shutil.move(src_name, target_name)
args["CrawProcess"].emit("文件移动完成,爬取完成")
def s2h(self,seconds):
'''
将秒数转为小时数
'''
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return ("%02d小时%02d分钟%02d秒" % (h, m, s))
# Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:08:51; c_m_expire=2020-09-14 10:08:51
# Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C15 | KensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:08:51; c_m_expire=2020-09-14 10:08:51
# Ecp_notFirstLogin=5BEo2M; Ecp_ClientId=1200824163400713266; RsPerPage=20; cnkiUserKey=3bc189b4-1612-5130-3b53-e91d7f426804; _pk_ref=%5B%22%22%2C%22%22%2C1599961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!; Ecp_session=1; Ecp_LoginStuts={"IsAutoLogin":false,"UserName":"NJ0023","ShowName":"%E6%B2%B3%E6%B5%B7%E5%A4%A7%E5%AD%A6","UserType":"bk","BUserName":"","BShowName":"","BUserType":"","r":"5BEo2M"}; ASP.NET_SessionId=xer0y025pdahbeg1pdbooazq; SID_kns8=123110; CurrSortField=%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2f(%e5%8f%91%e8%a1%a8%e6%97%b6%e9%97%b4%2c%27TIME%27); CurrSortFieldType=desc; SID_kcms=124108; c_m_LinID=LinID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEjKensW4IQMovwHtwkF4VYPoHbKxJw!!&ot=09/14/2020 10:11:55; c_m_expire=2020-09-14 10:11:55
# https://kns.cnki.net/kcms/download.aspx?filename=w5WUJNFV5pmdrlTbJp3SaNXa09Gbr4GWLZGOLVkcotyYNBDVl9WVyRHTxFnVzRHSuV2LWxkei9mbyhVUwVmdNxUanZ0d1VHZYVUQpJzZYJ1QEdWekx2cwJ3dyFjcxEzQitGWNhnQzoGNptSaj9yaNJ0NDdGMCllU&tablename=CAPJLAST&dflag=cajdown
| 99961800%2C%22https%3A%2F%2Fwww.cnki.net%2F%22%5D; LID=WEEvREcwSlJHSldSdmVqMDh6aS9uaHNiSkpvbExySllXaCs1MkpUR1NCST0=$9A4hF_YAuvQ5obgVAqNKPCYcEj | conditional_block |
createKB.py | import kindred
import argparse
from collections import defaultdict
import sys
import json
import re
import utils
import gzip
def isASCII(s):
#try:
# s.decode('ascii')
# return True
#except UnicodeDecodeError:
# return False
|
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build classifiers and create the PGmine knowledge base')
parser.add_argument('--trainingFiles',required=True,type=str,help='3 BioC files (comma-separated) for star_allele, rs and other')
parser.add_argument('--selectedChemicals',required=True,type=str,help='Which chemicals to filter for')
parser.add_argument('--dbsnp',required=True,type=str,help='File containing mappings from dbSNP IDs to genes')
parser.add_argument('--genes',required=True,type=str,help='File containing gene data')
parser.add_argument('--variantStopwords',required=True,type=str,help='Variants to remove')
parser.add_argument('--relevantMeSH',required=True,type=str,help='File with MeSH term mappings')
parser.add_argument('--inBioC',required=True,type=str,help='BioC file to predict things on')
parser.add_argument('--outKB',required=True,type=str,help='TSV file for KB')
args = parser.parse_args()
chemMeshIDs = set()
meshIDsToChemName,meshIDsToPharmGKB,meshIDsToDrugBank = {},{},{}
cancerDrugMeshIDs = set()
with open(args.selectedChemicals) as f:
chemData = json.load(f)
for chem in chemData:
for meshID in chem['MeSH']:
meshID = 'MESH:'+meshID
chemMeshIDs.add(meshID)
if chem['isCancerDrug']:
cancerDrugMeshIDs.add(meshID)
meshIDsToChemName[meshID] = chem['name']
meshIDsToDrugBank[meshID] = chem['DrugBank']
if 'PharmGKB' in chem:
meshIDsToPharmGKB[meshID] = chem['PharmGKB']
dbsnp = {}
with open(args.dbsnp) as f:
for line in f:
rsid,geneInfos = line.strip('\n').split('\t')
geneInfos = [ tuple(geneInfo.split(':')) for geneInfo in geneInfos.split('|') ]
geneNames = [ geneName for geneName,geneID in geneInfos ]
geneIDs = [ geneID for geneName,geneID in geneInfos ]
dbsnp[rsid] = (geneNames,geneIDs)
with open(args.variantStopwords) as f:
variantStopwords = set( line.strip().lower() for line in f )
geneID2Name = {}
proteinCodingGenes = set()
with open(args.genes) as f:
for line in f:
entrezID,name,geneType = line.strip('\n').split('\t')
geneID2Name[entrezID] = name
if geneType == 'protein-coding':
proteinCodingGenes.add(entrezID)
print("Loaded chemical, gene and variant data")
pediatricTerms = set(['Pediatrics','Infant','Infant, Newborn','Child','Child, Preschool','Adolescent','Birth Cohort'])
adultTerms = set(['Adult','Aged','Middle Aged','Young Adult'])
with gzip.open(args.relevantMeSH,'rt') as f:
relevantMeSH = json.load(f)
pediatricPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in pediatricTerms for t in terms ) )
adultPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in adultTerms for t in terms ) )
pmidToRelevantMeSH = { int(pmid):[t for t in terms if t in pediatricTerms or t in adultTerms] for pmid,terms in relevantMeSH.items() }
print("Loaded mesh PMIDs for pediatric/adult terms")
# Fix mapping of some popular variants to the correct SNP
variantFixes = {
'rs121913377':'rs113488022' # BRAF V600E
}
modes = ["star_rs","other"]
trainingFiles = args.trainingFiles.split(',')
assert len(trainingFiles) == 2, "Must provide 2 files (comma-separated) for star_rs and other"
hlaGeneIDs = {"3105","3106","3107","3108","3109","3111","3112","3113","3115","3117","3118","3119","3120","3121","3122","3123","3125","3126","3127","3133","3134","3135"}
obviousMistakes = {('Abacavir','HLA-B*15:02'),('Allopurinol','HLA-B*15:02'),('Carbamazepine','HLA-B*57:01'),('Allopurinol','HLA-B*57:01'),('Carbamazepine','HLA-B*58:01'),('Abacavir','HLA-B*58:01')}
chemicalExclusions = {'cc and tc', 'cc+tc', 'cc + tc','whitehall ii','rvoto','lev-pae','oxaipn','luminal b','oxc-mpe','rapid stemi','vp40e'}
headers = ['pmid','title','journal','journal_short','year','month','day','is_pediatric_paper','is_adult_paper','age_mesh_tags','section','subsection','chemical_mesh_id','chemical_pharmgkb_id','chemical_drugbank_id','chemical_text','chemical_normalized','chemical_position','variant_id','variant_type','variant_text','variant_normalized','variant_position','gene_ids','gene_names','score','sentence','formatted_sentence']
with open(args.outKB,'w') as outF:
outF.write("\t".join(headers) + "\n")
for mode,trainingData in zip(modes,trainingFiles):
print("Creating classifier for %s" % mode)
predictedCount = 0
trainCorpus = kindred.load('biocxml',trainingData)
corpus = kindred.load('biocxml',args.inBioC)
for doc in trainCorpus.documents:
for relation in doc.relations:
relation.relationType = 'ChemicalMutation'
for doc in corpus.documents:
if mode == 'star_allele':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('*')) ]
elif mode == 'rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('rs')) ]
elif mode == 'star_rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not (e.text.strip().startswith('rs') or e.text.strip().startswith('*'))) ]
else:
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and (e.text.strip().startswith('*') or e.text.strip().startswith('rs'))) ]
doc.entities = [ e for e in doc.entities if e.position[0][0] >= 0 ]
doc.entities = [ e for e in doc.entities if e.position[0][1] <= len(doc.text) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and not e.metadata['conceptid'] in chemMeshIDs) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and len(e.text) <= 4) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and utils.normalizeMutation(e.text) is None) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and e.text.lower() in variantStopwords) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and re.match("^[A-Z][\[\]\-\(\)\d]*[A-Z]$",e.text) ) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and e.text.lower() in chemicalExclusions) ]
parser = kindred.Parser(model='en_core_sci_sm')
parser.parse(trainCorpus)
parser.parse(corpus)
chemicalVariantClassifier = kindred.RelationClassifier(classifierType='LogisticRegression',threshold=0.5,acceptedEntityTypes=[('Chemical','Mutation')])
chemicalVariantClassifier.train(trainCorpus)
chemicalVariantClassifier.predict(corpus)
for doc in corpus.documents:
pmid = doc.metadata['pmid']
title = doc.metadata['title']
journal = doc.metadata['journal']
year = doc.metadata['year']
month = doc.metadata['month']
day = doc.metadata['day']
section = doc.metadata['section']
is_pediatric_paper = pmid and int(pmid) in pediatricPMIDs
is_adult_paper = pmid and int(pmid) in adultPMIDs
age_mesh_tags = "|".join(pmidToRelevantMeSH[int(pmid)]) if int(pmid) in pmidToRelevantMeSH else ""
journal_short = journal
if len(journal_short) > 50:
journal_short = journal_short[:50] + '...'
if 'subsection' in doc.metadata:
subsection = doc.metadata['subsection'].replace('’',"'")
elif doc.metadata['section'] == 'abstract':
subsection = 'abstract'
elif doc.metadata['section'] == 'title':
subsection = 'title'
if subsection == 'None':
subsection = 'unknown'
groups = defaultdict(set)
scores = defaultdict(lambda : 0)
# We want to group pairs of chemical/variants together so if we don't create redundant relations explaining the same relation where there are multiples of the same chemical/variant across the sentence
chemicalVariantRelations = [ r for r in doc.relations if r.relationType == 'ChemicalMutation' ]
for chemicalVariant in chemicalVariantRelations:
chemical,variant = chemicalVariant.entities
chemical_mesh_id = chemical.metadata['conceptid']
variant_concept_id = variant.metadata['conceptid']
if ';' in chemical_mesh_id:
continue
elif ';' in variant_concept_id:
continue
key = (chemical_mesh_id,variant_concept_id)
groups[key].add(chemical)
groups[key].add(variant)
scores[key] = max(scores[key],chemicalVariant.probability)
for key,chemicalVariants in groups.items():
score = scores[key]
# Sort by location in sentence
chemicalVariants = sorted(chemicalVariants, key = lambda x: x.position[0] )
chemicals = [ e for e in chemicalVariants if e.entityType == 'Chemical' ]
variants = [ e for e in chemicalVariants if e.entityType == 'Mutation' ]
chemical,variant = chemicals[0],variants[0]
chemical_text = chemical.text
chemical_mesh_id = chemical.metadata['conceptid']
chemical_pharmgkb_id = meshIDsToPharmGKB[chemical_mesh_id] if chemical_mesh_id in meshIDsToPharmGKB else 'NA'
chemical_normalized = meshIDsToChemName[chemical_mesh_id]
chemical_drugbank_id = meshIDsToDrugBank[chemical_mesh_id]
# Remap statins
chemical_text_lower = chemical_text.lower()
if chemical_text_lower in ['statin','statins']:
chemical_pharmgkb_id = 'PA133950441'
chemical_normalized = 'HMG-CoA reductase inhibitors'
chemical_drugbank_id = ''
elif chemical_text_lower == 'citalopram':
chemical_pharmgkb_id = 'PA449015'
chemical_normalized = 'Citalopram'
chemical_drugbank_id = 'DB00215'
elif chemical_text_lower == 'levomilnacipran':
chemical_pharmgkb_id = 'PA166182150'
chemical_normalized = 'Levomilnacipran'
chemical_drugbank_id = 'DB08918'
variant_text = variant.text
variant_normalized = utils.normalizeMutation(variant_text)
if variant_normalized is None:
continue
variant_metadata = variant.metadata['conceptid'].split(';')
corresponding_rsids = [ x for x in variant_metadata if re.match(r'rs\d+',x) ]
corresponding_genes = [ x for x in variant_metadata if re.match(r'CorrespondingGene:(?P<id>\d+)',x) ]
variant_id = ''
genes,gene_names,gene_ids = [],'',''
if len(corresponding_rsids) == 1:
variant_id = corresponding_rsids[0]
if variant_id in dbsnp:
gene_names,gene_ids = dbsnp[variant_id]
proteinCoding = [ (gene_id,gene_name) for gene_id,gene_name in zip(gene_ids,gene_names) if gene_id in proteinCodingGenes ]
if len(proteinCoding) > 0:
# Only include the protein coding if there are any
gene_ids = [ gene_id for gene_id,gene_name in proteinCoding ]
gene_names = [ gene_name for gene_id,gene_name in proteinCoding ]
genes = [ e for e in doc.entities if e.entityType == 'Gene' and e.metadata['conceptid'] in gene_ids ]
gene_names = ",".join(gene_names)
gene_ids = ",".join(gene_ids)
if len(corresponding_genes) == 1:
tmp_gene_id = corresponding_genes[0].split(':')[1]
if tmp_gene_id in geneID2Name:
gene_names = geneID2Name[tmp_gene_id]
gene_ids = tmp_gene_id
if variant_id in variantFixes:
variant_id = variantFixes[variant_id]
chemical_position = ";".join( "%s,%s" % c.position[0] for c in chemicals )
variant_position = ";".join( "%s,%s" % v.position[0] for v in variants )
if variant_text.startswith('rs') and variant_text == variant_id:
variant_normalized = variant_id
# Skip variants that start with asterisks but don't have metadata for a star allele - likely a mistake
if variant_text.strip().startswith('*') and not 'associated_gene' in variant.metadata:
continue
variant_type = 'unclear'
if variant_normalized.startswith('*'):
variant_type = 'star_allele'
elif variant_normalized.startswith('p.'):
variant_type = 'protein'
elif variant_normalized.startswith('c.') or variant_normalized.startswith('g.') or variant_normalized.startswith('m.'):
variant_type = 'dna'
elif variant_normalized.startswith('rs'):
variant_type = 'rs_id'
if variant_type == 'star_allele':
variant_normalized = variant.metadata['conceptid']
associated_gene = variant.metadata['associated_gene']
gene_ids,gene_names = None,None
gene_ids = [ gene_id for gene_id in associated_gene.split(';') if gene_id in geneID2Name ]
gene_names = [ geneID2Name[gene_id] for gene_id in gene_ids ]
if len(gene_ids) != 1:
continue
gene_ids = gene_ids[0]
gene_names = gene_names[0]
genes = [ e for e in doc.entities if e.entityType == 'Gene' and e.metadata['conceptid'] == gene_ids ]
isHLAGene = gene_ids in hlaGeneIDs
if isHLAGene:
variant_normalized = variant_normalized[1:].lstrip('0').replace(':','')
if len(variant_normalized) % 2 == 1: # Pad the variant name with a zero to make it an even length
variant_normalized = "0" + variant_normalized
variant_normalized = re.sub("(\d)(?=(\d{2})+(?!\d))", r"\1:", variant_normalized) # Put in ':' every two digits
variant_normalized = '*' + variant_normalized
variant_id = gene_names + variant_normalized
# Skip cancer drugs that are associated with a DNA/protein variant as likely somatic
if chemical_mesh_id in cancerDrugMeshIDs and variant_type in ['dna','protein']:
continue
# Remove some very frequent mismatches
if (chemical_normalized,variant_id) in obviousMistakes:
continue
sentence = doc.text.replace('’',"'")
formatted_sentence = utils.getFormattedDoc(doc, chemicals + variants + genes)
outData = [ pmid, title, journal, journal_short, year, month, day, is_pediatric_paper, is_adult_paper, age_mesh_tags, section, subsection, chemical_mesh_id, chemical_pharmgkb_id, chemical_drugbank_id, chemical_text, chemical_normalized, chemical_position, variant_id, variant_type, variant_text, variant_normalized, variant_position, gene_ids, gene_names, score, sentence, formatted_sentence ]
allowedUnicode = {'title','journal','journal_short','chemical_text','variant_text','sentence','formatted_sentence'}
assert len(outData) == len(headers)
for h,v in zip(headers,outData):
if not (h in allowedUnicode or isASCII(str(v))):
print('WARNING: Found non-ASCII "%s" in column "%s" (PMID=%s)' % (str(v),h,pmid))
outF.write("\t".join(map(str,outData)) + "\n")
predictedCount += 1
print("Predicted %d association(s) for %s variants" % (predictedCount, mode))
| return len(s) == len(s.encode()) | identifier_body |
createKB.py | import kindred
import argparse
from collections import defaultdict
import sys
import json
import re
import utils
import gzip
def isASCII(s):
#try:
# s.decode('ascii')
# return True
#except UnicodeDecodeError:
# return False
return len(s) == len(s.encode())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build classifiers and create the PGmine knowledge base')
parser.add_argument('--trainingFiles',required=True,type=str,help='3 BioC files (comma-separated) for star_allele, rs and other')
parser.add_argument('--selectedChemicals',required=True,type=str,help='Which chemicals to filter for')
parser.add_argument('--dbsnp',required=True,type=str,help='File containing mappings from dbSNP IDs to genes')
parser.add_argument('--genes',required=True,type=str,help='File containing gene data')
parser.add_argument('--variantStopwords',required=True,type=str,help='Variants to remove')
parser.add_argument('--relevantMeSH',required=True,type=str,help='File with MeSH term mappings')
parser.add_argument('--inBioC',required=True,type=str,help='BioC file to predict things on')
parser.add_argument('--outKB',required=True,type=str,help='TSV file for KB')
args = parser.parse_args()
chemMeshIDs = set()
meshIDsToChemName,meshIDsToPharmGKB,meshIDsToDrugBank = {},{},{}
cancerDrugMeshIDs = set()
with open(args.selectedChemicals) as f:
chemData = json.load(f)
for chem in chemData:
for meshID in chem['MeSH']:
meshID = 'MESH:'+meshID
chemMeshIDs.add(meshID)
if chem['isCancerDrug']:
cancerDrugMeshIDs.add(meshID)
meshIDsToChemName[meshID] = chem['name']
meshIDsToDrugBank[meshID] = chem['DrugBank']
if 'PharmGKB' in chem:
meshIDsToPharmGKB[meshID] = chem['PharmGKB']
dbsnp = {}
with open(args.dbsnp) as f:
for line in f:
rsid,geneInfos = line.strip('\n').split('\t')
geneInfos = [ tuple(geneInfo.split(':')) for geneInfo in geneInfos.split('|') ]
geneNames = [ geneName for geneName,geneID in geneInfos ]
geneIDs = [ geneID for geneName,geneID in geneInfos ]
dbsnp[rsid] = (geneNames,geneIDs)
with open(args.variantStopwords) as f:
variantStopwords = set( line.strip().lower() for line in f )
geneID2Name = {}
proteinCodingGenes = set()
with open(args.genes) as f:
for line in f:
entrezID,name,geneType = line.strip('\n').split('\t')
geneID2Name[entrezID] = name
if geneType == 'protein-coding':
proteinCodingGenes.add(entrezID)
print("Loaded chemical, gene and variant data")
pediatricTerms = set(['Pediatrics','Infant','Infant, Newborn','Child','Child, Preschool','Adolescent','Birth Cohort'])
adultTerms = set(['Adult','Aged','Middle Aged','Young Adult'])
with gzip.open(args.relevantMeSH,'rt') as f:
relevantMeSH = json.load(f)
pediatricPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in pediatricTerms for t in terms ) )
adultPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in adultTerms for t in terms ) )
pmidToRelevantMeSH = { int(pmid):[t for t in terms if t in pediatricTerms or t in adultTerms] for pmid,terms in relevantMeSH.items() }
print("Loaded mesh PMIDs for pediatric/adult terms")
# Fix mapping of some popular variants to the correct SNP
variantFixes = {
'rs121913377':'rs113488022' # BRAF V600E
}
modes = ["star_rs","other"]
trainingFiles = args.trainingFiles.split(',')
assert len(trainingFiles) == 2, "Must provide 2 files (comma-separated) for star_rs and other"
hlaGeneIDs = {"3105","3106","3107","3108","3109","3111","3112","3113","3115","3117","3118","3119","3120","3121","3122","3123","3125","3126","3127","3133","3134","3135"}
obviousMistakes = {('Abacavir','HLA-B*15:02'),('Allopurinol','HLA-B*15:02'),('Carbamazepine','HLA-B*57:01'),('Allopurinol','HLA-B*57:01'),('Carbamazepine','HLA-B*58:01'),('Abacavir','HLA-B*58:01')}
chemicalExclusions = {'cc and tc', 'cc+tc', 'cc + tc','whitehall ii','rvoto','lev-pae','oxaipn','luminal b','oxc-mpe','rapid stemi','vp40e'}
headers = ['pmid','title','journal','journal_short','year','month','day','is_pediatric_paper','is_adult_paper','age_mesh_tags','section','subsection','chemical_mesh_id','chemical_pharmgkb_id','chemical_drugbank_id','chemical_text','chemical_normalized','chemical_position','variant_id','variant_type','variant_text','variant_normalized','variant_position','gene_ids','gene_names','score','sentence','formatted_sentence']
with open(args.outKB,'w') as outF:
outF.write("\t".join(headers) + "\n")
for mode,trainingData in zip(modes,trainingFiles):
print("Creating classifier for %s" % mode)
predictedCount = 0
trainCorpus = kindred.load('biocxml',trainingData)
corpus = kindred.load('biocxml',args.inBioC)
for doc in trainCorpus.documents:
for relation in doc.relations:
relation.relationType = 'ChemicalMutation'
for doc in corpus.documents:
if mode == 'star_allele':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('*')) ]
elif mode == 'rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('rs')) ]
elif mode == 'star_rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not (e.text.strip().startswith('rs') or e.text.strip().startswith('*'))) ]
else:
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and (e.text.strip().startswith('*') or e.text.strip().startswith('rs'))) ]
doc.entities = [ e for e in doc.entities if e.position[0][0] >= 0 ]
doc.entities = [ e for e in doc.entities if e.position[0][1] <= len(doc.text) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and not e.metadata['conceptid'] in chemMeshIDs) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and len(e.text) <= 4) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and utils.normalizeMutation(e.text) is None) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and e.text.lower() in variantStopwords) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and re.match("^[A-Z][\[\]\-\(\)\d]*[A-Z]$",e.text) ) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and e.text.lower() in chemicalExclusions) ]
parser = kindred.Parser(model='en_core_sci_sm')
parser.parse(trainCorpus)
parser.parse(corpus)
chemicalVariantClassifier = kindred.RelationClassifier(classifierType='LogisticRegression',threshold=0.5,acceptedEntityTypes=[('Chemical','Mutation')])
chemicalVariantClassifier.train(trainCorpus)
chemicalVariantClassifier.predict(corpus)
for doc in corpus.documents:
pmid = doc.metadata['pmid']
title = doc.metadata['title']
journal = doc.metadata['journal']
year = doc.metadata['year']
month = doc.metadata['month']
day = doc.metadata['day']
section = doc.metadata['section']
is_pediatric_paper = pmid and int(pmid) in pediatricPMIDs
is_adult_paper = pmid and int(pmid) in adultPMIDs
age_mesh_tags = "|".join(pmidToRelevantMeSH[int(pmid)]) if int(pmid) in pmidToRelevantMeSH else ""
journal_short = journal
if len(journal_short) > 50:
journal_short = journal_short[:50] + '...'
if 'subsection' in doc.metadata:
subsection = doc.metadata['subsection'].replace('’',"'")
elif doc.metadata['section'] == 'abstract':
subsection = 'abstract'
elif doc.metadata['section'] == 'title':
subsection = 'title'
if subsection == 'None':
subsection = 'unknown'
groups = defaultdict(set)
scores = defaultdict(lambda : 0)
# We want to group pairs of chemical/variants together so if we don't create redundant relations explaining the same relation where there are multiples of the same chemical/variant across the sentence
chemicalVariantRelations = [ r for r in doc.relations if r.relationType == 'ChemicalMutation' ]
for chemicalVariant in chemicalVariantRelations:
chemical,variant = chemicalVariant.entities
chemical_mesh_id = chemical.metadata['conceptid']
variant_concept_id = variant.metadata['conceptid']
if ';' in chemical_mesh_id:
continue
elif ';' in variant_concept_id:
continue
key = (chemical_mesh_id,variant_concept_id)
groups[key].add(chemical)
groups[key].add(variant)
scores[key] = max(scores[key],chemicalVariant.probability)
for key,chemicalVariants in groups.items():
score = scores[key]
# Sort by location in sentence
chemicalVariants = sorted(chemicalVariants, key = lambda x: x.position[0] )
chemicals = [ e for e in chemicalVariants if e.entityType == 'Chemical' ]
variants = [ e for e in chemicalVariants if e.entityType == 'Mutation' ]
chemical,variant = chemicals[0],variants[0]
chemical_text = chemical.text
chemical_mesh_id = chemical.metadata['conceptid']
chemical_pharmgkb_id = meshIDsToPharmGKB[chemical_mesh_id] if chemical_mesh_id in meshIDsToPharmGKB else 'NA'
chemical_normalized = meshIDsToChemName[chemical_mesh_id]
chemical_drugbank_id = meshIDsToDrugBank[chemical_mesh_id]
# Remap statins
chemical_text_lower = chemical_text.lower()
if chemical_text_lower in ['statin','statins']:
chemical_pharmgkb_id = 'PA133950441'
chemical_normalized = 'HMG-CoA reductase inhibitors'
chemical_drugbank_id = ''
elif chemical_text_lower == 'citalopram':
chemical_pharmgkb_id = 'PA449015'
chemical_normalized = 'Citalopram'
chemical_drugbank_id = 'DB00215'
elif chemical_text_lower == 'levomilnacipran':
chemical_pharmgkb_id = 'PA166182150'
chemical_normalized = 'Levomilnacipran'
chemical_drugbank_id = 'DB08918'
variant_text = variant.text
variant_normalized = utils.normalizeMutation(variant_text)
if variant_normalized is None:
continue
variant_metadata = variant.metadata['conceptid'].split(';') | corresponding_rsids = [ x for x in variant_metadata if re.match(r'rs\d+',x) ]
corresponding_genes = [ x for x in variant_metadata if re.match(r'CorrespondingGene:(?P<id>\d+)',x) ]
variant_id = ''
genes,gene_names,gene_ids = [],'',''
if len(corresponding_rsids) == 1:
variant_id = corresponding_rsids[0]
if variant_id in dbsnp:
gene_names,gene_ids = dbsnp[variant_id]
proteinCoding = [ (gene_id,gene_name) for gene_id,gene_name in zip(gene_ids,gene_names) if gene_id in proteinCodingGenes ]
if len(proteinCoding) > 0:
# Only include the protein coding if there are any
gene_ids = [ gene_id for gene_id,gene_name in proteinCoding ]
gene_names = [ gene_name for gene_id,gene_name in proteinCoding ]
genes = [ e for e in doc.entities if e.entityType == 'Gene' and e.metadata['conceptid'] in gene_ids ]
gene_names = ",".join(gene_names)
gene_ids = ",".join(gene_ids)
if len(corresponding_genes) == 1:
tmp_gene_id = corresponding_genes[0].split(':')[1]
if tmp_gene_id in geneID2Name:
gene_names = geneID2Name[tmp_gene_id]
gene_ids = tmp_gene_id
if variant_id in variantFixes:
variant_id = variantFixes[variant_id]
chemical_position = ";".join( "%s,%s" % c.position[0] for c in chemicals )
variant_position = ";".join( "%s,%s" % v.position[0] for v in variants )
if variant_text.startswith('rs') and variant_text == variant_id:
variant_normalized = variant_id
# Skip variants that start with asterisks but don't have metadata for a star allele - likely a mistake
if variant_text.strip().startswith('*') and not 'associated_gene' in variant.metadata:
continue
variant_type = 'unclear'
if variant_normalized.startswith('*'):
variant_type = 'star_allele'
elif variant_normalized.startswith('p.'):
variant_type = 'protein'
elif variant_normalized.startswith('c.') or variant_normalized.startswith('g.') or variant_normalized.startswith('m.'):
variant_type = 'dna'
elif variant_normalized.startswith('rs'):
variant_type = 'rs_id'
if variant_type == 'star_allele':
variant_normalized = variant.metadata['conceptid']
associated_gene = variant.metadata['associated_gene']
gene_ids,gene_names = None,None
gene_ids = [ gene_id for gene_id in associated_gene.split(';') if gene_id in geneID2Name ]
gene_names = [ geneID2Name[gene_id] for gene_id in gene_ids ]
if len(gene_ids) != 1:
continue
gene_ids = gene_ids[0]
gene_names = gene_names[0]
genes = [ e for e in doc.entities if e.entityType == 'Gene' and e.metadata['conceptid'] == gene_ids ]
isHLAGene = gene_ids in hlaGeneIDs
if isHLAGene:
variant_normalized = variant_normalized[1:].lstrip('0').replace(':','')
if len(variant_normalized) % 2 == 1: # Pad the variant name with a zero to make it an even length
variant_normalized = "0" + variant_normalized
variant_normalized = re.sub("(\d)(?=(\d{2})+(?!\d))", r"\1:", variant_normalized) # Put in ':' every two digits
variant_normalized = '*' + variant_normalized
variant_id = gene_names + variant_normalized
# Skip cancer drugs that are associated with a DNA/protein variant as likely somatic
if chemical_mesh_id in cancerDrugMeshIDs and variant_type in ['dna','protein']:
continue
# Remove some very frequent mismatches
if (chemical_normalized,variant_id) in obviousMistakes:
continue
sentence = doc.text.replace('’',"'")
formatted_sentence = utils.getFormattedDoc(doc, chemicals + variants + genes)
outData = [ pmid, title, journal, journal_short, year, month, day, is_pediatric_paper, is_adult_paper, age_mesh_tags, section, subsection, chemical_mesh_id, chemical_pharmgkb_id, chemical_drugbank_id, chemical_text, chemical_normalized, chemical_position, variant_id, variant_type, variant_text, variant_normalized, variant_position, gene_ids, gene_names, score, sentence, formatted_sentence ]
allowedUnicode = {'title','journal','journal_short','chemical_text','variant_text','sentence','formatted_sentence'}
assert len(outData) == len(headers)
for h,v in zip(headers,outData):
if not (h in allowedUnicode or isASCII(str(v))):
print('WARNING: Found non-ASCII "%s" in column "%s" (PMID=%s)' % (str(v),h,pmid))
outF.write("\t".join(map(str,outData)) + "\n")
predictedCount += 1
print("Predicted %d association(s) for %s variants" % (predictedCount, mode)) | random_line_split |
|
createKB.py | import kindred
import argparse
from collections import defaultdict
import sys
import json
import re
import utils
import gzip
def | (s):
#try:
# s.decode('ascii')
# return True
#except UnicodeDecodeError:
# return False
return len(s) == len(s.encode())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build classifiers and create the PGmine knowledge base')
parser.add_argument('--trainingFiles',required=True,type=str,help='3 BioC files (comma-separated) for star_allele, rs and other')
parser.add_argument('--selectedChemicals',required=True,type=str,help='Which chemicals to filter for')
parser.add_argument('--dbsnp',required=True,type=str,help='File containing mappings from dbSNP IDs to genes')
parser.add_argument('--genes',required=True,type=str,help='File containing gene data')
parser.add_argument('--variantStopwords',required=True,type=str,help='Variants to remove')
parser.add_argument('--relevantMeSH',required=True,type=str,help='File with MeSH term mappings')
parser.add_argument('--inBioC',required=True,type=str,help='BioC file to predict things on')
parser.add_argument('--outKB',required=True,type=str,help='TSV file for KB')
args = parser.parse_args()
chemMeshIDs = set()
meshIDsToChemName,meshIDsToPharmGKB,meshIDsToDrugBank = {},{},{}
cancerDrugMeshIDs = set()
with open(args.selectedChemicals) as f:
chemData = json.load(f)
for chem in chemData:
for meshID in chem['MeSH']:
meshID = 'MESH:'+meshID
chemMeshIDs.add(meshID)
if chem['isCancerDrug']:
cancerDrugMeshIDs.add(meshID)
meshIDsToChemName[meshID] = chem['name']
meshIDsToDrugBank[meshID] = chem['DrugBank']
if 'PharmGKB' in chem:
meshIDsToPharmGKB[meshID] = chem['PharmGKB']
dbsnp = {}
with open(args.dbsnp) as f:
for line in f:
rsid,geneInfos = line.strip('\n').split('\t')
geneInfos = [ tuple(geneInfo.split(':')) for geneInfo in geneInfos.split('|') ]
geneNames = [ geneName for geneName,geneID in geneInfos ]
geneIDs = [ geneID for geneName,geneID in geneInfos ]
dbsnp[rsid] = (geneNames,geneIDs)
with open(args.variantStopwords) as f:
variantStopwords = set( line.strip().lower() for line in f )
geneID2Name = {}
proteinCodingGenes = set()
with open(args.genes) as f:
for line in f:
entrezID,name,geneType = line.strip('\n').split('\t')
geneID2Name[entrezID] = name
if geneType == 'protein-coding':
proteinCodingGenes.add(entrezID)
print("Loaded chemical, gene and variant data")
pediatricTerms = set(['Pediatrics','Infant','Infant, Newborn','Child','Child, Preschool','Adolescent','Birth Cohort'])
adultTerms = set(['Adult','Aged','Middle Aged','Young Adult'])
with gzip.open(args.relevantMeSH,'rt') as f:
relevantMeSH = json.load(f)
pediatricPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in pediatricTerms for t in terms ) )
adultPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in adultTerms for t in terms ) )
pmidToRelevantMeSH = { int(pmid):[t for t in terms if t in pediatricTerms or t in adultTerms] for pmid,terms in relevantMeSH.items() }
print("Loaded mesh PMIDs for pediatric/adult terms")
# Fix mapping of some popular variants to the correct SNP
variantFixes = {
'rs121913377':'rs113488022' # BRAF V600E
}
modes = ["star_rs","other"]
trainingFiles = args.trainingFiles.split(',')
assert len(trainingFiles) == 2, "Must provide 2 files (comma-separated) for star_rs and other"
hlaGeneIDs = {"3105","3106","3107","3108","3109","3111","3112","3113","3115","3117","3118","3119","3120","3121","3122","3123","3125","3126","3127","3133","3134","3135"}
obviousMistakes = {('Abacavir','HLA-B*15:02'),('Allopurinol','HLA-B*15:02'),('Carbamazepine','HLA-B*57:01'),('Allopurinol','HLA-B*57:01'),('Carbamazepine','HLA-B*58:01'),('Abacavir','HLA-B*58:01')}
chemicalExclusions = {'cc and tc', 'cc+tc', 'cc + tc','whitehall ii','rvoto','lev-pae','oxaipn','luminal b','oxc-mpe','rapid stemi','vp40e'}
headers = ['pmid','title','journal','journal_short','year','month','day','is_pediatric_paper','is_adult_paper','age_mesh_tags','section','subsection','chemical_mesh_id','chemical_pharmgkb_id','chemical_drugbank_id','chemical_text','chemical_normalized','chemical_position','variant_id','variant_type','variant_text','variant_normalized','variant_position','gene_ids','gene_names','score','sentence','formatted_sentence']
with open(args.outKB,'w') as outF:
outF.write("\t".join(headers) + "\n")
for mode,trainingData in zip(modes,trainingFiles):
print("Creating classifier for %s" % mode)
predictedCount = 0
trainCorpus = kindred.load('biocxml',trainingData)
corpus = kindred.load('biocxml',args.inBioC)
for doc in trainCorpus.documents:
for relation in doc.relations:
relation.relationType = 'ChemicalMutation'
for doc in corpus.documents:
if mode == 'star_allele':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('*')) ]
elif mode == 'rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('rs')) ]
elif mode == 'star_rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not (e.text.strip().startswith('rs') or e.text.strip().startswith('*'))) ]
else:
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and (e.text.strip().startswith('*') or e.text.strip().startswith('rs'))) ]
doc.entities = [ e for e in doc.entities if e.position[0][0] >= 0 ]
doc.entities = [ e for e in doc.entities if e.position[0][1] <= len(doc.text) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and not e.metadata['conceptid'] in chemMeshIDs) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and len(e.text) <= 4) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and utils.normalizeMutation(e.text) is None) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and e.text.lower() in variantStopwords) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and re.match("^[A-Z][\[\]\-\(\)\d]*[A-Z]$",e.text) ) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and e.text.lower() in chemicalExclusions) ]
parser = kindred.Parser(model='en_core_sci_sm')
parser.parse(trainCorpus)
parser.parse(corpus)
chemicalVariantClassifier = kindred.RelationClassifier(classifierType='LogisticRegression',threshold=0.5,acceptedEntityTypes=[('Chemical','Mutation')])
chemicalVariantClassifier.train(trainCorpus)
chemicalVariantClassifier.predict(corpus)
for doc in corpus.documents:
pmid = doc.metadata['pmid']
title = doc.metadata['title']
journal = doc.metadata['journal']
year = doc.metadata['year']
month = doc.metadata['month']
day = doc.metadata['day']
section = doc.metadata['section']
is_pediatric_paper = pmid and int(pmid) in pediatricPMIDs
is_adult_paper = pmid and int(pmid) in adultPMIDs
age_mesh_tags = "|".join(pmidToRelevantMeSH[int(pmid)]) if int(pmid) in pmidToRelevantMeSH else ""
journal_short = journal
if len(journal_short) > 50:
journal_short = journal_short[:50] + '...'
if 'subsection' in doc.metadata:
subsection = doc.metadata['subsection'].replace('’',"'")
elif doc.metadata['section'] == 'abstract':
subsection = 'abstract'
elif doc.metadata['section'] == 'title':
subsection = 'title'
if subsection == 'None':
subsection = 'unknown'
groups = defaultdict(set)
scores = defaultdict(lambda : 0)
# We want to group pairs of chemical/variants together so if we don't create redundant relations explaining the same relation where there are multiples of the same chemical/variant across the sentence
chemicalVariantRelations = [ r for r in doc.relations if r.relationType == 'ChemicalMutation' ]
for chemicalVariant in chemicalVariantRelations:
chemical,variant = chemicalVariant.entities
chemical_mesh_id = chemical.metadata['conceptid']
variant_concept_id = variant.metadata['conceptid']
if ';' in chemical_mesh_id:
continue
elif ';' in variant_concept_id:
continue
key = (chemical_mesh_id,variant_concept_id)
groups[key].add(chemical)
groups[key].add(variant)
scores[key] = max(scores[key],chemicalVariant.probability)
for key,chemicalVariants in groups.items():
score = scores[key]
# Sort by location in sentence
chemicalVariants = sorted(chemicalVariants, key = lambda x: x.position[0] )
chemicals = [ e for e in chemicalVariants if e.entityType == 'Chemical' ]
variants = [ e for e in chemicalVariants if e.entityType == 'Mutation' ]
chemical,variant = chemicals[0],variants[0]
chemical_text = chemical.text
chemical_mesh_id = chemical.metadata['conceptid']
chemical_pharmgkb_id = meshIDsToPharmGKB[chemical_mesh_id] if chemical_mesh_id in meshIDsToPharmGKB else 'NA'
chemical_normalized = meshIDsToChemName[chemical_mesh_id]
chemical_drugbank_id = meshIDsToDrugBank[chemical_mesh_id]
# Remap statins
chemical_text_lower = chemical_text.lower()
if chemical_text_lower in ['statin','statins']:
chemical_pharmgkb_id = 'PA133950441'
chemical_normalized = 'HMG-CoA reductase inhibitors'
chemical_drugbank_id = ''
elif chemical_text_lower == 'citalopram':
chemical_pharmgkb_id = 'PA449015'
chemical_normalized = 'Citalopram'
chemical_drugbank_id = 'DB00215'
elif chemical_text_lower == 'levomilnacipran':
chemical_pharmgkb_id = 'PA166182150'
chemical_normalized = 'Levomilnacipran'
chemical_drugbank_id = 'DB08918'
variant_text = variant.text
variant_normalized = utils.normalizeMutation(variant_text)
if variant_normalized is None:
continue
variant_metadata = variant.metadata['conceptid'].split(';')
corresponding_rsids = [ x for x in variant_metadata if re.match(r'rs\d+',x) ]
corresponding_genes = [ x for x in variant_metadata if re.match(r'CorrespondingGene:(?P<id>\d+)',x) ]
variant_id = ''
genes,gene_names,gene_ids = [],'',''
if len(corresponding_rsids) == 1:
variant_id = corresponding_rsids[0]
if variant_id in dbsnp:
gene_names,gene_ids = dbsnp[variant_id]
proteinCoding = [ (gene_id,gene_name) for gene_id,gene_name in zip(gene_ids,gene_names) if gene_id in proteinCodingGenes ]
if len(proteinCoding) > 0:
# Only include the protein coding if there are any
gene_ids = [ gene_id for gene_id,gene_name in proteinCoding ]
gene_names = [ gene_name for gene_id,gene_name in proteinCoding ]
genes = [ e for e in doc.entities if e.entityType == 'Gene' and e.metadata['conceptid'] in gene_ids ]
gene_names = ",".join(gene_names)
gene_ids = ",".join(gene_ids)
if len(corresponding_genes) == 1:
tmp_gene_id = corresponding_genes[0].split(':')[1]
if tmp_gene_id in geneID2Name:
gene_names = geneID2Name[tmp_gene_id]
gene_ids = tmp_gene_id
if variant_id in variantFixes:
variant_id = variantFixes[variant_id]
chemical_position = ";".join( "%s,%s" % c.position[0] for c in chemicals )
variant_position = ";".join( "%s,%s" % v.position[0] for v in variants )
if variant_text.startswith('rs') and variant_text == variant_id:
variant_normalized = variant_id
# Skip variants that start with asterisks but don't have metadata for a star allele - likely a mistake
if variant_text.strip().startswith('*') and not 'associated_gene' in variant.metadata:
continue
variant_type = 'unclear'
if variant_normalized.startswith('*'):
variant_type = 'star_allele'
elif variant_normalized.startswith('p.'):
variant_type = 'protein'
elif variant_normalized.startswith('c.') or variant_normalized.startswith('g.') or variant_normalized.startswith('m.'):
variant_type = 'dna'
elif variant_normalized.startswith('rs'):
variant_type = 'rs_id'
if variant_type == 'star_allele':
variant_normalized = variant.metadata['conceptid']
associated_gene = variant.metadata['associated_gene']
gene_ids,gene_names = None,None
gene_ids = [ gene_id for gene_id in associated_gene.split(';') if gene_id in geneID2Name ]
gene_names = [ geneID2Name[gene_id] for gene_id in gene_ids ]
if len(gene_ids) != 1:
continue
gene_ids = gene_ids[0]
gene_names = gene_names[0]
genes = [ e for e in doc.entities if e.entityType == 'Gene' and e.metadata['conceptid'] == gene_ids ]
isHLAGene = gene_ids in hlaGeneIDs
if isHLAGene:
variant_normalized = variant_normalized[1:].lstrip('0').replace(':','')
if len(variant_normalized) % 2 == 1: # Pad the variant name with a zero to make it an even length
variant_normalized = "0" + variant_normalized
variant_normalized = re.sub("(\d)(?=(\d{2})+(?!\d))", r"\1:", variant_normalized) # Put in ':' every two digits
variant_normalized = '*' + variant_normalized
variant_id = gene_names + variant_normalized
# Skip cancer drugs that are associated with a DNA/protein variant as likely somatic
if chemical_mesh_id in cancerDrugMeshIDs and variant_type in ['dna','protein']:
continue
# Remove some very frequent mismatches
if (chemical_normalized,variant_id) in obviousMistakes:
continue
sentence = doc.text.replace('’',"'")
formatted_sentence = utils.getFormattedDoc(doc, chemicals + variants + genes)
outData = [ pmid, title, journal, journal_short, year, month, day, is_pediatric_paper, is_adult_paper, age_mesh_tags, section, subsection, chemical_mesh_id, chemical_pharmgkb_id, chemical_drugbank_id, chemical_text, chemical_normalized, chemical_position, variant_id, variant_type, variant_text, variant_normalized, variant_position, gene_ids, gene_names, score, sentence, formatted_sentence ]
allowedUnicode = {'title','journal','journal_short','chemical_text','variant_text','sentence','formatted_sentence'}
assert len(outData) == len(headers)
for h,v in zip(headers,outData):
if not (h in allowedUnicode or isASCII(str(v))):
print('WARNING: Found non-ASCII "%s" in column "%s" (PMID=%s)' % (str(v),h,pmid))
outF.write("\t".join(map(str,outData)) + "\n")
predictedCount += 1
print("Predicted %d association(s) for %s variants" % (predictedCount, mode))
| isASCII | identifier_name |
createKB.py | import kindred
import argparse
from collections import defaultdict
import sys
import json
import re
import utils
import gzip
def isASCII(s):
#try:
# s.decode('ascii')
# return True
#except UnicodeDecodeError:
# return False
return len(s) == len(s.encode())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build classifiers and create the PGmine knowledge base')
parser.add_argument('--trainingFiles',required=True,type=str,help='3 BioC files (comma-separated) for star_allele, rs and other')
parser.add_argument('--selectedChemicals',required=True,type=str,help='Which chemicals to filter for')
parser.add_argument('--dbsnp',required=True,type=str,help='File containing mappings from dbSNP IDs to genes')
parser.add_argument('--genes',required=True,type=str,help='File containing gene data')
parser.add_argument('--variantStopwords',required=True,type=str,help='Variants to remove')
parser.add_argument('--relevantMeSH',required=True,type=str,help='File with MeSH term mappings')
parser.add_argument('--inBioC',required=True,type=str,help='BioC file to predict things on')
parser.add_argument('--outKB',required=True,type=str,help='TSV file for KB')
args = parser.parse_args()
chemMeshIDs = set()
meshIDsToChemName,meshIDsToPharmGKB,meshIDsToDrugBank = {},{},{}
cancerDrugMeshIDs = set()
with open(args.selectedChemicals) as f:
chemData = json.load(f)
for chem in chemData:
for meshID in chem['MeSH']:
|
dbsnp = {}
with open(args.dbsnp) as f:
for line in f:
rsid,geneInfos = line.strip('\n').split('\t')
geneInfos = [ tuple(geneInfo.split(':')) for geneInfo in geneInfos.split('|') ]
geneNames = [ geneName for geneName,geneID in geneInfos ]
geneIDs = [ geneID for geneName,geneID in geneInfos ]
dbsnp[rsid] = (geneNames,geneIDs)
with open(args.variantStopwords) as f:
variantStopwords = set( line.strip().lower() for line in f )
geneID2Name = {}
proteinCodingGenes = set()
with open(args.genes) as f:
for line in f:
entrezID,name,geneType = line.strip('\n').split('\t')
geneID2Name[entrezID] = name
if geneType == 'protein-coding':
proteinCodingGenes.add(entrezID)
print("Loaded chemical, gene and variant data")
pediatricTerms = set(['Pediatrics','Infant','Infant, Newborn','Child','Child, Preschool','Adolescent','Birth Cohort'])
adultTerms = set(['Adult','Aged','Middle Aged','Young Adult'])
with gzip.open(args.relevantMeSH,'rt') as f:
relevantMeSH = json.load(f)
pediatricPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in pediatricTerms for t in terms ) )
adultPMIDs = set( int(pmid) for pmid,terms in relevantMeSH.items() if any( t in adultTerms for t in terms ) )
pmidToRelevantMeSH = { int(pmid):[t for t in terms if t in pediatricTerms or t in adultTerms] for pmid,terms in relevantMeSH.items() }
print("Loaded mesh PMIDs for pediatric/adult terms")
# Fix mapping of some popular variants to the correct SNP
variantFixes = {
'rs121913377':'rs113488022' # BRAF V600E
}
modes = ["star_rs","other"]
trainingFiles = args.trainingFiles.split(',')
assert len(trainingFiles) == 2, "Must provide 2 files (comma-separated) for star_rs and other"
hlaGeneIDs = {"3105","3106","3107","3108","3109","3111","3112","3113","3115","3117","3118","3119","3120","3121","3122","3123","3125","3126","3127","3133","3134","3135"}
obviousMistakes = {('Abacavir','HLA-B*15:02'),('Allopurinol','HLA-B*15:02'),('Carbamazepine','HLA-B*57:01'),('Allopurinol','HLA-B*57:01'),('Carbamazepine','HLA-B*58:01'),('Abacavir','HLA-B*58:01')}
chemicalExclusions = {'cc and tc', 'cc+tc', 'cc + tc','whitehall ii','rvoto','lev-pae','oxaipn','luminal b','oxc-mpe','rapid stemi','vp40e'}
headers = ['pmid','title','journal','journal_short','year','month','day','is_pediatric_paper','is_adult_paper','age_mesh_tags','section','subsection','chemical_mesh_id','chemical_pharmgkb_id','chemical_drugbank_id','chemical_text','chemical_normalized','chemical_position','variant_id','variant_type','variant_text','variant_normalized','variant_position','gene_ids','gene_names','score','sentence','formatted_sentence']
with open(args.outKB,'w') as outF:
outF.write("\t".join(headers) + "\n")
for mode,trainingData in zip(modes,trainingFiles):
print("Creating classifier for %s" % mode)
predictedCount = 0
trainCorpus = kindred.load('biocxml',trainingData)
corpus = kindred.load('biocxml',args.inBioC)
for doc in trainCorpus.documents:
for relation in doc.relations:
relation.relationType = 'ChemicalMutation'
for doc in corpus.documents:
if mode == 'star_allele':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('*')) ]
elif mode == 'rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not e.text.strip().startswith('rs')) ]
elif mode == 'star_rs':
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and not (e.text.strip().startswith('rs') or e.text.strip().startswith('*'))) ]
else:
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and (e.text.strip().startswith('*') or e.text.strip().startswith('rs'))) ]
doc.entities = [ e for e in doc.entities if e.position[0][0] >= 0 ]
doc.entities = [ e for e in doc.entities if e.position[0][1] <= len(doc.text) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and not e.metadata['conceptid'] in chemMeshIDs) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and len(e.text) <= 4) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and utils.normalizeMutation(e.text) is None) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Mutation' and e.text.lower() in variantStopwords) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and re.match("^[A-Z][\[\]\-\(\)\d]*[A-Z]$",e.text) ) ]
doc.entities = [ e for e in doc.entities if not (e.entityType == 'Chemical' and e.text.lower() in chemicalExclusions) ]
parser = kindred.Parser(model='en_core_sci_sm')
parser.parse(trainCorpus)
parser.parse(corpus)
chemicalVariantClassifier = kindred.RelationClassifier(classifierType='LogisticRegression',threshold=0.5,acceptedEntityTypes=[('Chemical','Mutation')])
chemicalVariantClassifier.train(trainCorpus)
chemicalVariantClassifier.predict(corpus)
for doc in corpus.documents:
pmid = doc.metadata['pmid']
title = doc.metadata['title']
journal = doc.metadata['journal']
year = doc.metadata['year']
month = doc.metadata['month']
day = doc.metadata['day']
section = doc.metadata['section']
is_pediatric_paper = pmid and int(pmid) in pediatricPMIDs
is_adult_paper = pmid and int(pmid) in adultPMIDs
age_mesh_tags = "|".join(pmidToRelevantMeSH[int(pmid)]) if int(pmid) in pmidToRelevantMeSH else ""
journal_short = journal
if len(journal_short) > 50:
journal_short = journal_short[:50] + '...'
if 'subsection' in doc.metadata:
subsection = doc.metadata['subsection'].replace('’',"'")
elif doc.metadata['section'] == 'abstract':
subsection = 'abstract'
elif doc.metadata['section'] == 'title':
subsection = 'title'
if subsection == 'None':
subsection = 'unknown'
groups = defaultdict(set)
scores = defaultdict(lambda : 0)
# We want to group pairs of chemical/variants together so if we don't create redundant relations explaining the same relation where there are multiples of the same chemical/variant across the sentence
chemicalVariantRelations = [ r for r in doc.relations if r.relationType == 'ChemicalMutation' ]
for chemicalVariant in chemicalVariantRelations:
chemical,variant = chemicalVariant.entities
chemical_mesh_id = chemical.metadata['conceptid']
variant_concept_id = variant.metadata['conceptid']
if ';' in chemical_mesh_id:
continue
elif ';' in variant_concept_id:
continue
key = (chemical_mesh_id,variant_concept_id)
groups[key].add(chemical)
groups[key].add(variant)
scores[key] = max(scores[key],chemicalVariant.probability)
for key,chemicalVariants in groups.items():
score = scores[key]
# Sort by location in sentence
chemicalVariants = sorted(chemicalVariants, key = lambda x: x.position[0] )
chemicals = [ e for e in chemicalVariants if e.entityType == 'Chemical' ]
variants = [ e for e in chemicalVariants if e.entityType == 'Mutation' ]
chemical,variant = chemicals[0],variants[0]
chemical_text = chemical.text
chemical_mesh_id = chemical.metadata['conceptid']
chemical_pharmgkb_id = meshIDsToPharmGKB[chemical_mesh_id] if chemical_mesh_id in meshIDsToPharmGKB else 'NA'
chemical_normalized = meshIDsToChemName[chemical_mesh_id]
chemical_drugbank_id = meshIDsToDrugBank[chemical_mesh_id]
# Remap statins
chemical_text_lower = chemical_text.lower()
if chemical_text_lower in ['statin','statins']:
chemical_pharmgkb_id = 'PA133950441'
chemical_normalized = 'HMG-CoA reductase inhibitors'
chemical_drugbank_id = ''
elif chemical_text_lower == 'citalopram':
chemical_pharmgkb_id = 'PA449015'
chemical_normalized = 'Citalopram'
chemical_drugbank_id = 'DB00215'
elif chemical_text_lower == 'levomilnacipran':
chemical_pharmgkb_id = 'PA166182150'
chemical_normalized = 'Levomilnacipran'
chemical_drugbank_id = 'DB08918'
variant_text = variant.text
variant_normalized = utils.normalizeMutation(variant_text)
if variant_normalized is None:
continue
variant_metadata = variant.metadata['conceptid'].split(';')
corresponding_rsids = [ x for x in variant_metadata if re.match(r'rs\d+',x) ]
corresponding_genes = [ x for x in variant_metadata if re.match(r'CorrespondingGene:(?P<id>\d+)',x) ]
variant_id = ''
genes,gene_names,gene_ids = [],'',''
if len(corresponding_rsids) == 1:
variant_id = corresponding_rsids[0]
if variant_id in dbsnp:
gene_names,gene_ids = dbsnp[variant_id]
proteinCoding = [ (gene_id,gene_name) for gene_id,gene_name in zip(gene_ids,gene_names) if gene_id in proteinCodingGenes ]
if len(proteinCoding) > 0:
# Only include the protein coding if there are any
gene_ids = [ gene_id for gene_id,gene_name in proteinCoding ]
gene_names = [ gene_name for gene_id,gene_name in proteinCoding ]
genes = [ e for e in doc.entities if e.entityType == 'Gene' and e.metadata['conceptid'] in gene_ids ]
gene_names = ",".join(gene_names)
gene_ids = ",".join(gene_ids)
if len(corresponding_genes) == 1:
tmp_gene_id = corresponding_genes[0].split(':')[1]
if tmp_gene_id in geneID2Name:
gene_names = geneID2Name[tmp_gene_id]
gene_ids = tmp_gene_id
if variant_id in variantFixes:
variant_id = variantFixes[variant_id]
chemical_position = ";".join( "%s,%s" % c.position[0] for c in chemicals )
variant_position = ";".join( "%s,%s" % v.position[0] for v in variants )
if variant_text.startswith('rs') and variant_text == variant_id:
variant_normalized = variant_id
# Skip variants that start with asterisks but don't have metadata for a star allele - likely a mistake
if variant_text.strip().startswith('*') and not 'associated_gene' in variant.metadata:
continue
variant_type = 'unclear'
if variant_normalized.startswith('*'):
variant_type = 'star_allele'
elif variant_normalized.startswith('p.'):
variant_type = 'protein'
elif variant_normalized.startswith('c.') or variant_normalized.startswith('g.') or variant_normalized.startswith('m.'):
variant_type = 'dna'
elif variant_normalized.startswith('rs'):
variant_type = 'rs_id'
if variant_type == 'star_allele':
variant_normalized = variant.metadata['conceptid']
associated_gene = variant.metadata['associated_gene']
gene_ids,gene_names = None,None
gene_ids = [ gene_id for gene_id in associated_gene.split(';') if gene_id in geneID2Name ]
gene_names = [ geneID2Name[gene_id] for gene_id in gene_ids ]
if len(gene_ids) != 1:
continue
gene_ids = gene_ids[0]
gene_names = gene_names[0]
genes = [ e for e in doc.entities if e.entityType == 'Gene' and e.metadata['conceptid'] == gene_ids ]
isHLAGene = gene_ids in hlaGeneIDs
if isHLAGene:
variant_normalized = variant_normalized[1:].lstrip('0').replace(':','')
if len(variant_normalized) % 2 == 1: # Pad the variant name with a zero to make it an even length
variant_normalized = "0" + variant_normalized
variant_normalized = re.sub("(\d)(?=(\d{2})+(?!\d))", r"\1:", variant_normalized) # Put in ':' every two digits
variant_normalized = '*' + variant_normalized
variant_id = gene_names + variant_normalized
# Skip cancer drugs that are associated with a DNA/protein variant as likely somatic
if chemical_mesh_id in cancerDrugMeshIDs and variant_type in ['dna','protein']:
continue
# Remove some very frequent mismatches
if (chemical_normalized,variant_id) in obviousMistakes:
continue
sentence = doc.text.replace('’',"'")
formatted_sentence = utils.getFormattedDoc(doc, chemicals + variants + genes)
outData = [ pmid, title, journal, journal_short, year, month, day, is_pediatric_paper, is_adult_paper, age_mesh_tags, section, subsection, chemical_mesh_id, chemical_pharmgkb_id, chemical_drugbank_id, chemical_text, chemical_normalized, chemical_position, variant_id, variant_type, variant_text, variant_normalized, variant_position, gene_ids, gene_names, score, sentence, formatted_sentence ]
allowedUnicode = {'title','journal','journal_short','chemical_text','variant_text','sentence','formatted_sentence'}
assert len(outData) == len(headers)
for h,v in zip(headers,outData):
if not (h in allowedUnicode or isASCII(str(v))):
print('WARNING: Found non-ASCII "%s" in column "%s" (PMID=%s)' % (str(v),h,pmid))
outF.write("\t".join(map(str,outData)) + "\n")
predictedCount += 1
print("Predicted %d association(s) for %s variants" % (predictedCount, mode))
| meshID = 'MESH:'+meshID
chemMeshIDs.add(meshID)
if chem['isCancerDrug']:
cancerDrugMeshIDs.add(meshID)
meshIDsToChemName[meshID] = chem['name']
meshIDsToDrugBank[meshID] = chem['DrugBank']
if 'PharmGKB' in chem:
meshIDsToPharmGKB[meshID] = chem['PharmGKB'] | conditional_block |
weather_stn_data.py | # weather_stn_data.py - Used by extract_numbers.py, get_info_from_mshr.py, get_past_observations_cdo.py, get_daily_normals_cdo.py
# Location of each of the following numbers in the image corresponds to a city (or weather station) in the continental U.S.
# Location of (lower-left pixel of) first character of number in image when the number is 2 characters and positive, e.g. 12 or 63, not 1, 101, -1, -23
# Number in image is "weather number", e.g. max temperature prediction.
# 0-based row=y, column=x (origin=lower-left); character is 13 rows x 9 columns pixel array
# Data Science class project - General Assembly - Seattle - SEA-DAT1 (10/27/15 - 1/21/16)
# Developed by Bruce Aker 1/11/16 - 1/21/16
[{'icao_code':'KABE','row':494,'col':739,'stn_id_cdo':'GHCND:USW00014737','state':'PA','weather_station':'ALLENTOWN INTL AP'},
{'icao_code':'KABQ','row':275,'col':299,'stn_id_cdo':'GHCND:USW00023050','state':'NM','weather_station':'ALBUQUERQUE INTL AP'},
{'icao_code':'KACV','row':437,'col': 54,'stn_id_cdo':'GHCND:USW00024283','state':'CA','weather_station':'ARCATA EUREKA AP'},
{'icao_code':'KACY','row':480,'col':762,'stn_id_cdo':'GHCND:USW00093730','state':'NJ','weather_station':'ATLANTIC CITY INTL AP'},
{'icao_code':'KALB','row':540,'col':739,'stn_id_cdo':'GHCND:USW00014735','state':'NY','weather_station':'ALBANY AP'},
{'icao_code':'KANJ','row':541,'col':582,'stn_id_cdo':'GHCND:USW00014847','state':'MI','weather_station':'SAULT STE MARIE SANDERSON FLD'},
{'icao_code':'KATL','row':308,'col':669,'stn_id_cdo':'GHCND:USW00013874','state':'GA','weather_station':'ATLANTA HARTSFIELD INTL AP'},
{'icao_code':'KBCE','row':335,'col':215,'stn_id_cdo':'GHCND:USW00023159','state':'UT','weather_station':'BRYCE CANYON AP'},
{'icao_code':'KBGR','row':610,'col':774,'stn_id_cdo':'GHCND:USW00014606','state':'ME','weather_station':'BANGOR INTL AP'},
{'icao_code':'KBHM','row':294,'col':632,'stn_id_cdo':'GHCND:USW00013876','state':'AL','weather_station':'BIRMINGHAM AP'},
{'icao_code':'KBIL','row':484,'col':281,'stn_id_cdo':'GHCND:USW00024033','state':'MT','weather_station':'BILLINGS INTL AP'}, #wfo=byz
{'icao_code':'KBIS','row':502,'col':380,'stn_id_cdo':'GHCND:USW00024011','state':'ND','weather_station':'BISMARCK MUNI AP'},
{'icao_code':'KBNA','row':343,'col':617,'stn_id_cdo':'GHCND:USW00013897','state':'TN','weather_station':'NASHVILLE INTL AP'},
{'icao_code':'KBNO','row':464,'col':138,'stn_id_cdo':'GHCND:USW00094185','state':'OR','weather_station':'BURNS MUNI AP'},
{'icao_code':'KBOI','row':456,'col':174,'stn_id_cdo':'GHCND:USW00024131','state':'ID','weather_station':'BOISE AIR TERMINAL'}, #wfo=boi
{'icao_code':'KBOS','row':555,'col':775,'stn_id_cdo':'GHCND:USW00014739','state':'MA','weather_station':'BOSTON LOGAN INTL AP'},
{'icao_code':'KBPI','row':425,'col':255,'stn_id_cdo':'GHCND:USW00024164','state':'WY','weather_station':'BIG PINEY MARBLETON AP'},
{'icao_code':'KBTV','row':573,'col':729,'stn_id_cdo':'GHCND:USW00014742','state':'VT','weather_station':'BURLINGTON INTL AP'},
{'icao_code':'KBUF','row':510,'col':677,'stn_id_cdo':'GHCND:USW00014733','state':'NY','weather_station':'BUFFALO NIAGARA INTL AP'},
{'icao_code':'KCAE','row':335,'col':719,'stn_id_cdo':'GHCND:USW00013883','state':'SC','weather_station':'COLUMBIA METRO AP'},
{'icao_code':'KCLE','row':466,'col':649,'stn_id_cdo':'GHCND:USW00014820','state':'OH','weather_station':'CLEVELAND HOPKINS INTL AP'},
{'icao_code':'KCMH','row':435,'col':646,'stn_id_cdo':'GHCND:USW00014821','state':'OH','weather_station':'COLUMBUS PORT COLUMBUS INTL AP'},
{'icao_code':'KCOS','row':350,'col':330,'stn_id_cdo':'GHCND:USW00093037','state':'CO','weather_station':'COLORADO SPRINGS MUNI AP'}, #wfo=pub
{'icao_code':'KCOU','row':356,'col':517,'stn_id_cdo':'GHCND:USW00003945','state':'MO','weather_station':'COLUMBIA RGNL AP'},
{'icao_code':'KCPR','row':429,'col':306,'stn_id_cdo':'GHCND:USW00024089','state':'WY','weather_station':'Casper, Natrona County International Airport'},
{'icao_code':'KCRE','row':350,'col':756,'stn_id_cdo':'GHCND:USW00093718','state':'SC','weather_station':'N MYRTLE BCH AP'},
{'icao_code':'KCRP','row':132,'col':470,'stn_id_cdo':'GHCND:USW00012924','state':'TX','weather_station':'CORPUS CHRISTI INTL AP'},
{'icao_code':'KCRW','row':413,'col':676,'stn_id_cdo':'GHCND:USW00013866','state':'WV','weather_station':'CHARLESTON YEAGER AP'},
{'icao_code':'KDCA','row':452,'col':735,'stn_id_cdo':'GHCND:USW00013743','state':'VA','weather_station':'WASHINGTON REAGAN AP'},
{'icao_code':'KDFW','row':240,'col':463,'stn_id_cdo':'GHCND:USW00003927','state':'TX','weather_station':'Dallas-Fort Worth WSCMO AP'},
{'icao_code':'KDSM','row':418,'col':487,'stn_id_cdo':'GHCND:USW00014933','state':'IA','weather_station':'DES MOINES INTL AP'},
{'icao_code':'KEYW','row':146,'col':788,'stn_id_cdo':'GHCND:USW00012836','state':'FL','weather_station':'KEY WEST INTL AP'},
{'icao_code':'KFAR','row':510,'col':429,'stn_id_cdo':'GHCND:USW00014914','state':'ND','weather_station':'Fargo, Hector International Airport'}, #WFO=FGF, Grand Forks, ND
{'icao_code':'KFAT','row':339,'col': 94,'stn_id_cdo':'GHCND:USW00093193','state':'CA','weather_station':'Fresno Air Terminal'}, #wfo=hnx
{'icao_code':'KFLG','row':283,'col':216,'stn_id_cdo':'GHCND:USW00003103','state':'AZ','weather_station':'FLAGSTAFF PULLIAM AP'},
{'icao_code':'KFMY','row':187,'col':768,'stn_id_cdo':'GHCND:USW00012835','state':'FL','weather_station':'FT MYERS PAGE FLD AP'},
{'icao_code':'KFSD','row':449,'col':438,'stn_id_cdo':'GHCND:USW00014944','state':'SD','weather_station':'SIOUX FALLS FOSS FLD'},
{'icao_code':'KFST','row':190,'col':364,'stn_id_cdo':'GHCND:USW00023091','state':'TX','weather_station':'FT STOCKTON PECOS AP'},
{'icao_code':'KGEG','row':533,'col':173,'stn_id_cdo':'GHCND:USW00024157','state':'WA','weather_station':'SPOKANE INTL AP'}, #wfo=otx
{'icao_code':'KGGW','row':527,'col':306,'stn_id_cdo':'GHCND:USW00094008','state':'MT','weather_station':'GLASGOW INTL AP'},
{'icao_code':'KGRB','row':491,'col':548,'stn_id_cdo':'GHCND:USW00014898','state':'WI','weather_station':'GREEN BAY A S INTL AP'},
{'icao_code':'KGRR','row':473,'col':591,'stn_id_cdo':'GHCND:USW00094860','state':'MI','weather_station':'GRAND RAPIDS INTL AP'},
{'icao_code':'KGSO','row':383,'col':718,'stn_id_cdo':'GHCND:USW00013723','state':'NC','weather_station':'PIEDMONT TRIAD INTL AP'},
{'icao_code':'KHLN','row':502,'col':238,'stn_id_cdo':'GHCND:USW00024144','state':'MT','weather_station':'HELENA RGNL AP'}, #wfo=tfx
{'icao_code':'KIAH','row':185,'col':502,'stn_id_cdo':'GHCND:USW00012960','state':'TX','weather_station':'HOUSTON INTERCONT AP'},
{'icao_code':'KICT','row':334,'col':444,'stn_id_cdo':'GHCND:USW00003928','state':'KS','weather_station':'WICHITA DWIGHT D EISENHOWER NA'},
{'icao_code':'KINL','row':547,'col':465,'stn_id_cdo':'GHCND:USW00014918','state':'MN','weather_station':'International Falls Airport'},
{'icao_code':'KJAN','row':253,'col':584,'stn_id_cdo':'GHCND:USW00003940','state':'MS','weather_station':'JACKSON INTL AP'},
{'icao_code':'KJAX','row':266,'col':738,'stn_id_cdo':'GHCND:USW00013889','state':'FL','weather_station':'JACKSONVILLE INTL AP'},
{'icao_code':'KLAS','row':310,'col':163,'stn_id_cdo':'GHCND:USW00023169','state':'NV','weather_station':'LAS VEGAS MCCARRAN AP'},
{'icao_code':'KLAX','row':278,'col':101,'stn_id_cdo':'GHCND:USW00023174','state':'CA','weather_station':'Los Angeles International Airport'}, #wfo=lox
{'icao_code':'KLBB','row':248,'col':380,'stn_id_cdo':'GHCND:USW00023042','state':'TX','weather_station':'LUBBOCK INTL AP'},
{'icao_code':'KLBF','row':397,'col':388,'stn_id_cdo':'GHCND:USW00024023','state':'NE','weather_station':'N PLATTE RGNL AP'},
{'icao_code':'KLEX','row':390,'col':636,'stn_id_cdo':'GHCND:USW00093820','state':'KY','weather_station':'LEXINGTON BLUEGRASS AP'},
{'icao_code':'KLIT','row':292,'col':537,'stn_id_cdo':'GHCND:USW00013963','state':'AR','weather_station':'LITTLE ROCK AP ADAMS FLD'},
{'icao_code':'KMCI','row':373,'col':480,'stn_id_cdo':'GHCND:USW00003947','state':'MO','weather_station':'KANSAS CITY INTL AP'},
{'icao_code':'KMCO','row':228,'col':762,'stn_id_cdo':'GHCND:USW00012815','state':'FL','weather_station':'ORLANDO INTL AP'},
{'icao_code':'KMEM','row':307,'col':571,'stn_id_cdo':'GHCND:USW00013893','state':'TN','weather_station':'MEMPHIS INTL AP'}, | {'icao_code':'KOKC','row':290,'col':447,'stn_id_cdo':'GHCND:USW00013967','state':'OK','weather_station':'OKLAHOMA CITY WILL ROGERS AP'},
{'icao_code':'KORF','row':424,'col':765,'stn_id_cdo':'GHCND:USW00013737','state':'VA','weather_station':'NORFOLK INTL AP'},
{'icao_code':'KP60','row':462,'col':254,'stn_id_cdo':'GHCND:USW00094173','state':'WY','weather_station':'Yellowstone Lake'},
{'icao_code':'KPDX','row':513,'col':101,'stn_id_cdo':'GHCND:USW00024229','state':'OR','weather_station':'Portland International Airport'},
{'icao_code':'KPHX','row':249,'col':206,'stn_id_cdo':'GHCND:USW00023183','state':'AZ','weather_station':'Phoenix Sky Harbor International Airport'}, #wfo=psr
{'icao_code':'KPOU','row':521,'col':749,'stn_id_cdo':'GHCND:USW00014757','state':'NY','weather_station':'POUGHKEEPSIE DUTCHESS CO AP'},
{'icao_code':'KPWM','row':581,'col':770,'stn_id_cdo':'GHCND:USW00014764','state':'ME','weather_station':'PORTLAND INTL JETPORT'},
{'icao_code':'KRAP','row':450,'col':352,'stn_id_cdo':'GHCND:USW00024090','state':'SD','weather_station':'RAPID CITY REGIONAL AP'},
{'icao_code':'KRDD','row':420,'col': 76,'stn_id_cdo':'GHCND:USW00024257','state':'CA','weather_station':'REDDING MUNI AP'}, #wfo=sto
{'icao_code':'KRNO','row':391,'col':107,'stn_id_cdo':'GHCND:USW00023185','state':'NV','weather_station':'RENO TAHOE INTL AP'}, #wfo=rev
{'icao_code':'KROA','row':405,'col':708,'stn_id_cdo':'GHCND:USW00013741','state':'VA','weather_station':'ROANOKE RGNL AP'},
{'icao_code':'KRST','row':466,'col':494,'stn_id_cdo':'GHCND:USW00014925','state':'MN','weather_station':'ROCHESTER INTL AP'},
{'icao_code':'KSAN','row':250,'col':116,'stn_id_cdo':'GHCND:USW00023188','state':'CA','weather_station':'SAN DIEGO LINDBERGH FLD'}, #wfo=sgx
{'icao_code':'KSAT','row':167,'col':447,'stn_id_cdo':'GHCND:USW00012921','state':'TX','weather_station':'SAN ANTONIO INTL AP'},
{'icao_code':'KSAV','row':301,'col':732,'stn_id_cdo':'GHCND:USW00003822','state':'GA','weather_station':'SAVANNAH INTL AP'},
{'icao_code':'KSEA','row':545,'col':115,'stn_id_cdo':'GHCND:USW00024233','state':'WA','weather_station':'Seattle Tacoma Airport'}, #WFO=SEW, Sandpoint, Seattle, WA
{'icao_code':'KSHV','row':241,'col':520,'stn_id_cdo':'GHCND:USW00013957','state':'LA','weather_station':'SHREVEPORT RGNL AP'},
{'icao_code':'KSLC','row':394,'col':225,'stn_id_cdo':'GHCND:USW00024127','state':'UT','weather_station':'SALT LAKE CITY INTL AP'}, #wfo=slc
{'icao_code':'KSPI','row':400,'col':551,'stn_id_cdo':'GHCND:USW00093822','state':'IL','weather_station':'SPRINGFIELD CAPITAL AP'},
{'icao_code':'KTAD','row':319,'col':337,'stn_id_cdo':'GHCND:USW00023070','state':'CO','weather_station':'TRINIDAD PERRY STOKES AP'},
{'icao_code':'KTPA','row':210,'col':745,'stn_id_cdo':'GHCND:USW00012842','state':'FL','weather_station':'TAMPA INTL AP'},
{'icao_code':'KTUS','row':220,'col':222,'stn_id_cdo':'GHCND:USW00023160','state':'AZ','weather_station':'TUCSON INTL AP'},
{'icao_code':'KUIL','row':562,'col': 92,'stn_id_cdo':'GHCND:USW00094240','state':'WA','weather_station':'Quillayute State Airport'}, #WFO=SEW
{'icao_code':'KWMC','row':411,'col':142,'stn_id_cdo':'GHCND:USW00024128','state':'NV','weather_station':'WINNEMUCCA MUNI AP'}, #wfo=lkn
{'icao_code':'KYKM','row':523,'col':132,'stn_id_cdo':'GHCND:USW00024243','state':'WA','weather_station':'Yakima Air Terminal'}] #WFO=PDT, Pendleton, OR
# {'icao_code':'K','row':000,'col':000,'stn_id_cdo':'GHCND:USW000','state':'','weather_station':''}, | {'icao_code':'KMIA','row':184,'col':804,'stn_id_cdo':'GHCND:USW00012839','state':'FL','weather_station':'MIAMI INTL AP'},
{'icao_code':'KMIE','row':426,'col':610,'stn_id_cdo':'GHCND:USW00094895','state':'IN','weather_station':'MUNCIE DELAWARE CO AP'},
{'icao_code':'KMLI','row':427,'col':531,'stn_id_cdo':'GHCND:USW00014923','state':'IL','weather_station':'MOLINE QUAD CITY INTL AP'},
{'icao_code':'KMOB','row':229,'col':625,'stn_id_cdo':'GHCND:USW00013894','state':'AL','weather_station':'MOBILE RGNL AP'},
{'icao_code':'KMSY','row':205,'col':594,'stn_id_cdo':'GHCND:USW00012916','state':'LA','weather_station':'NEW ORLEANS INTL AP'}, | random_line_split |
coverage.py | """
Script: coverage.py
Identifies domains that only occur in multi-domain proteins. The main
script is master.
--------------------
Felix A Kruger
[email protected]
"""
####
#### import modules.
####
import queryDevice
import operator
import yaml
import time
####
#### Load parameters.
####
paramFile = open('local.yaml')
params = yaml.safe_load(paramFile)
paramFile.close()
#### Define functions.
#-----------------------------------------------------------------------------------------------------------------------
def get_el_targets(params):
"""Query the ChEMBL database for (almost) all activities that are subject to the mapping. Does not conver activities expressed in log-conversion eg pIC50 etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]
"""
data = queryDevice.queryDevice("""
SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)
FROM assays ass
JOIN(
SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc
FROM target_dictionary td
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON cs.component_id = tc.component_id
JOIN component_domains cd
ON cd.component_id = cs.component_id
WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')
GROUP BY td.tid
) as dc
ON dc.tid = ass.tid
JOIN activities act
ON act.assay_id = ass.assay_id
WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')
AND ass.relationship_type = 'D'
AND assay_type IN('B')
AND act.standard_relation IN('=')
AND standard_units = 'nM'
AND standard_value <= %s
GROUP BY dc.tid ORDER BY COUNT(activity_id)""" % (int(params['threshold']) * 1000) , params)
print "retrieved data for ", len(data), "tids."
return data
#-----------------------------------------------------------------------------------------------------------------------
def readfile(path, key_name, val_name):
|
#-----------------------------------------------------------------------------------------------------------------------
def get_archs(el_targets, pfam_lkp):
"""Find multi-domain architectures.
Inputs:
el_targets -- list of eligible targets
"""
act_lkp = {}
arch_lkp = {}
dom_lkp = {}
for ent in el_targets:
try:
doms = pfam_lkp[ent[0]]
except KeyError:
print "no doms in ", ent[0]
arch = ', '.join(sorted(doms))
try:
arch_lkp[arch] += 1
act_lkp[arch] += ent[4]
except KeyError:
arch_lkp[arch] = 1
act_lkp[arch] = ent[4]
if len(doms) <= 1:
continue
for dom in set(doms):
try:
dom_lkp[dom] += 1
except KeyError:
dom_lkp[dom] = 1
return(arch_lkp, dom_lkp, act_lkp)
#-----------------------------------------------------------------------------------------------------------------------
def get_doms(tids, params):
"""Get domains for a list of tids.
Inputs:
el_targets -- list of eligible targets
"""
pfam_lkp = {}
tidstr = "', '".join(str(t) for t in tids)
data = queryDevice.queryDevice("""
SELECT tid, domain_name
FROM target_components tc
JOIN component_domains cd
ON cd.component_id = tc.component_id
JOIN domains d
ON d.domain_id = cd.domain_id
WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'""" %tidstr, params)
for ent in data:
tid = ent[0]
dom = ent[1]
try:
pfam_lkp[tid].append(dom)
except KeyError:
pfam_lkp[tid] = [dom]
return pfam_lkp
#-----------------------------------------------------------------------------------------------------------------------
def count_valid(lkp, valid_doms):
"""Get count of architectures and activities covered by the mapping.
"""
valz = []
for arch in lkp.keys():
valid = False
doms = arch.split(', ')
for dom in doms:
if dom in valid_doms:
valid = True
break
valz.append((lkp[arch], valid))
valid = sum([x[0] for x in valz if x[1]])
allz = sum([x[0] for x in valz])
valid_archs = len([x[0] for x in valz if x[1]])
all_archs = len(sum([x[0] for x in valz]))
out = open('data/log.tab', 'a')
timestamp = time.strftime('%d %B %Y %T', time.gmtime())
comment = "only binding assays"
release = params['release']
threshold = params['threshold']
out.write("%(valid)s\t%(allz)s\t%(release)s\t%(threshold)s\t%(comment)s\t%(timestamp)s\t%(valid_archs)s\t%(all_archs)s\n"
% locals())
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_archs(arch_lkp, valid_doms, path):
'''Write out multi-domain architectures in markdown tables.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)
out = open('%s.md' % path ,'w')
out.write('|architecture|count|mapped|\n')
out.write('|:-----------|:---------|-----:|\n')
for arch in sorted_archs:
doms = str(arch[0]).split(', ')
if len(doms) <= 1:
continue
mapped = ', '.join([x for x in doms if x in valid_doms])
if len(mapped) == 0:
mapped = False
out.write("|%s|%s|%s|\n"%(arch[0], arch[1], mapped))
#-----------------------------------------------------------------------------------------------------------------------
def export_network(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
count = arch_lkp[arch]
if type(doms) is str:
continue
for i in range(len(doms)-1):
for j in range(i+1, len(doms)):
dom_key = ', '.join(sorted([doms[i],doms[j]]))
try:
lkp[dom_key] += count
except KeyError:
lkp[dom_key] = count
out = open('%s.tab' % path ,'w')
out.write('dom_1\tdom_2\tcount\n')
for link in lkp.keys():
doms = str(link).split(', ')
out.write("%s\t%s\t%s\n"%(doms[0], doms[1], lkp[link]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_attribs(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
out = open('%s.tab' % path ,'w')
out.write('dom\tvalid\n')
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
for dom in doms:
valid = False
if dom in valid_doms:
valid = True
lkp[dom] = valid
for it in lkp.items():
out.write("%s\t%s\n"%(it[0], it[1]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_doms(dom_lkp, valid_doms, path):
'''Write out identified architectures in markdown tables.
Inputs:
dom_lkp -- dictionary of domains occuring in multi-domain architectures.
'''
sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)
out = open('%s.md' % path ,'w')
out.write('|domain |count| validated|\n')
out.write('|:-----------|:-----|-------:|\n')
for dom in sorted_doms:
mapped = False
count = dom[1]
dom = str(dom[0])
if dom in valid_doms:
mapped = True
out.write("|%s|%s|%s|\n"%(dom, count, mapped))
#-----------------------------------------------------------------------------------------------------------------------
def master(version):
"""
Function: master
Run through all steps to identify mandatory muli-domain architectures.
"""
# Load the list of validated domains.
valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')
valid_doms = valid_dom_d.keys()
## Load eligible targets.
el_targets = get_el_targets(params)
## Get domains for tids.
pfam_lkp = get_doms([x[0] for x in el_targets], params)
## Add targets with given architecture.
(arch_lkp, dom_lkp, act_lkp) = get_archs(el_targets, pfam_lkp)
## Count covered acrchitectures.
count_valid(arch_lkp, valid_doms)
## Count covered activities.
count_valid(act_lkp, valid_doms)
## Write multi-domain architechtures to markdown tables.
export_archs(arch_lkp, valid_doms, 'data/multi_dom_archs_%s'% params['release'])
## Write domains from multi-domain architechtures to markdown tables.
export_doms(dom_lkp, valid_doms, 'data/multi_dom_doms_%s'% params['release'])
## export network file.
export_network(arch_lkp, valid_doms, 'data/multi_dom_network_%s'% params['release'])
## export network attribute file.
export_attribs(arch_lkp, valid_doms, 'data/multi_dom_attributes_%s'% params['release'])
#-----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
import sys
if len(sys.argv) != 2: # the program name and one argument
sys.exit("""Parameters are read from mpf.yaml but must specify
version for data/valid_pfam_v_%(version)s.tab""")
version = sys.argv[1]
master(version)
| """Read two columns from a tab-separated file into a dictionary.
Inputs:
path -- filepath
key_name -- name of the column holding the key
val_name -- name of the column holding the value
"""
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lkp = {}
els = lines[0].rstrip().split('\t')
for i, el in enumerate(els):
if el == key_name:
key_idx = i
if el == val_name:
val_idx = i
for line in lines[1:]:
elements = line.rstrip().split('\t')
lkp[elements[key_idx]] = elements[val_idx]
return lkp | identifier_body |
coverage.py | """
Script: coverage.py
Identifies domains that only occur in multi-domain proteins. The main
script is master.
--------------------
Felix A Kruger
[email protected]
"""
####
#### import modules.
####
import queryDevice
import operator
import yaml
import time
####
#### Load parameters.
####
paramFile = open('local.yaml')
params = yaml.safe_load(paramFile)
paramFile.close()
#### Define functions.
#-----------------------------------------------------------------------------------------------------------------------
def get_el_targets(params):
"""Query the ChEMBL database for (almost) all activities that are subject to the mapping. Does not conver activities expressed in log-conversion eg pIC50 etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]
"""
data = queryDevice.queryDevice("""
SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)
FROM assays ass
JOIN(
SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc
FROM target_dictionary td
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON cs.component_id = tc.component_id
JOIN component_domains cd
ON cd.component_id = cs.component_id
WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')
GROUP BY td.tid
) as dc
ON dc.tid = ass.tid
JOIN activities act
ON act.assay_id = ass.assay_id
WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')
AND ass.relationship_type = 'D'
AND assay_type IN('B')
AND act.standard_relation IN('=')
AND standard_units = 'nM'
AND standard_value <= %s
GROUP BY dc.tid ORDER BY COUNT(activity_id)""" % (int(params['threshold']) * 1000) , params)
print "retrieved data for ", len(data), "tids."
return data
#-----------------------------------------------------------------------------------------------------------------------
def readfile(path, key_name, val_name):
"""Read two columns from a tab-separated file into a dictionary.
Inputs:
path -- filepath
key_name -- name of the column holding the key
val_name -- name of the column holding the value
"""
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lkp = {}
els = lines[0].rstrip().split('\t')
for i, el in enumerate(els):
if el == key_name:
key_idx = i
if el == val_name:
val_idx = i
for line in lines[1:]:
elements = line.rstrip().split('\t')
lkp[elements[key_idx]] = elements[val_idx]
return lkp
#-----------------------------------------------------------------------------------------------------------------------
def get_archs(el_targets, pfam_lkp):
"""Find multi-domain architectures.
Inputs:
el_targets -- list of eligible targets
"""
act_lkp = {}
arch_lkp = {}
dom_lkp = {}
for ent in el_targets:
try:
doms = pfam_lkp[ent[0]]
except KeyError:
print "no doms in ", ent[0]
arch = ', '.join(sorted(doms))
try:
arch_lkp[arch] += 1
act_lkp[arch] += ent[4]
except KeyError:
arch_lkp[arch] = 1
act_lkp[arch] = ent[4]
if len(doms) <= 1:
continue
for dom in set(doms):
|
return(arch_lkp, dom_lkp, act_lkp)
#-----------------------------------------------------------------------------------------------------------------------
def get_doms(tids, params):
"""Get domains for a list of tids.
Inputs:
el_targets -- list of eligible targets
"""
pfam_lkp = {}
tidstr = "', '".join(str(t) for t in tids)
data = queryDevice.queryDevice("""
SELECT tid, domain_name
FROM target_components tc
JOIN component_domains cd
ON cd.component_id = tc.component_id
JOIN domains d
ON d.domain_id = cd.domain_id
WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'""" %tidstr, params)
for ent in data:
tid = ent[0]
dom = ent[1]
try:
pfam_lkp[tid].append(dom)
except KeyError:
pfam_lkp[tid] = [dom]
return pfam_lkp
#-----------------------------------------------------------------------------------------------------------------------
def count_valid(lkp, valid_doms):
"""Get count of architectures and activities covered by the mapping.
"""
valz = []
for arch in lkp.keys():
valid = False
doms = arch.split(', ')
for dom in doms:
if dom in valid_doms:
valid = True
break
valz.append((lkp[arch], valid))
valid = sum([x[0] for x in valz if x[1]])
allz = sum([x[0] for x in valz])
valid_archs = len([x[0] for x in valz if x[1]])
all_archs = len(sum([x[0] for x in valz]))
out = open('data/log.tab', 'a')
timestamp = time.strftime('%d %B %Y %T', time.gmtime())
comment = "only binding assays"
release = params['release']
threshold = params['threshold']
out.write("%(valid)s\t%(allz)s\t%(release)s\t%(threshold)s\t%(comment)s\t%(timestamp)s\t%(valid_archs)s\t%(all_archs)s\n"
% locals())
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_archs(arch_lkp, valid_doms, path):
'''Write out multi-domain architectures in markdown tables.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)
out = open('%s.md' % path ,'w')
out.write('|architecture|count|mapped|\n')
out.write('|:-----------|:---------|-----:|\n')
for arch in sorted_archs:
doms = str(arch[0]).split(', ')
if len(doms) <= 1:
continue
mapped = ', '.join([x for x in doms if x in valid_doms])
if len(mapped) == 0:
mapped = False
out.write("|%s|%s|%s|\n"%(arch[0], arch[1], mapped))
#-----------------------------------------------------------------------------------------------------------------------
def export_network(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
count = arch_lkp[arch]
if type(doms) is str:
continue
for i in range(len(doms)-1):
for j in range(i+1, len(doms)):
dom_key = ', '.join(sorted([doms[i],doms[j]]))
try:
lkp[dom_key] += count
except KeyError:
lkp[dom_key] = count
out = open('%s.tab' % path ,'w')
out.write('dom_1\tdom_2\tcount\n')
for link in lkp.keys():
doms = str(link).split(', ')
out.write("%s\t%s\t%s\n"%(doms[0], doms[1], lkp[link]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_attribs(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
out = open('%s.tab' % path ,'w')
out.write('dom\tvalid\n')
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
for dom in doms:
valid = False
if dom in valid_doms:
valid = True
lkp[dom] = valid
for it in lkp.items():
out.write("%s\t%s\n"%(it[0], it[1]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_doms(dom_lkp, valid_doms, path):
'''Write out identified architectures in markdown tables.
Inputs:
dom_lkp -- dictionary of domains occuring in multi-domain architectures.
'''
sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)
out = open('%s.md' % path ,'w')
out.write('|domain |count| validated|\n')
out.write('|:-----------|:-----|-------:|\n')
for dom in sorted_doms:
mapped = False
count = dom[1]
dom = str(dom[0])
if dom in valid_doms:
mapped = True
out.write("|%s|%s|%s|\n"%(dom, count, mapped))
#-----------------------------------------------------------------------------------------------------------------------
def master(version):
"""
Function: master
Run through all steps to identify mandatory muli-domain architectures.
"""
# Load the list of validated domains.
valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')
valid_doms = valid_dom_d.keys()
## Load eligible targets.
el_targets = get_el_targets(params)
## Get domains for tids.
pfam_lkp = get_doms([x[0] for x in el_targets], params)
## Add targets with given architecture.
(arch_lkp, dom_lkp, act_lkp) = get_archs(el_targets, pfam_lkp)
## Count covered acrchitectures.
count_valid(arch_lkp, valid_doms)
## Count covered activities.
count_valid(act_lkp, valid_doms)
## Write multi-domain architechtures to markdown tables.
export_archs(arch_lkp, valid_doms, 'data/multi_dom_archs_%s'% params['release'])
## Write domains from multi-domain architechtures to markdown tables.
export_doms(dom_lkp, valid_doms, 'data/multi_dom_doms_%s'% params['release'])
## export network file.
export_network(arch_lkp, valid_doms, 'data/multi_dom_network_%s'% params['release'])
## export network attribute file.
export_attribs(arch_lkp, valid_doms, 'data/multi_dom_attributes_%s'% params['release'])
#-----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
import sys
if len(sys.argv) != 2: # the program name and one argument
sys.exit("""Parameters are read from mpf.yaml but must specify
version for data/valid_pfam_v_%(version)s.tab""")
version = sys.argv[1]
master(version)
| try:
dom_lkp[dom] += 1
except KeyError:
dom_lkp[dom] = 1 | conditional_block |
coverage.py | """
Script: coverage.py
Identifies domains that only occur in multi-domain proteins. The main
script is master.
--------------------
Felix A Kruger
[email protected]
"""
####
#### import modules.
####
import queryDevice
import operator
import yaml
import time
####
#### Load parameters.
####
paramFile = open('local.yaml')
params = yaml.safe_load(paramFile)
paramFile.close()
#### Define functions.
#-----------------------------------------------------------------------------------------------------------------------
def get_el_targets(params):
"""Query the ChEMBL database for (almost) all activities that are subject to the mapping. Does not conver activities expressed in log-conversion eg pIC50 etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]
"""
data = queryDevice.queryDevice("""
SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)
FROM assays ass
JOIN(
SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc
FROM target_dictionary td
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON cs.component_id = tc.component_id
JOIN component_domains cd
ON cd.component_id = cs.component_id
WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')
GROUP BY td.tid
) as dc
ON dc.tid = ass.tid
JOIN activities act
ON act.assay_id = ass.assay_id
WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')
AND ass.relationship_type = 'D'
AND assay_type IN('B')
AND act.standard_relation IN('=')
AND standard_units = 'nM'
AND standard_value <= %s
GROUP BY dc.tid ORDER BY COUNT(activity_id)""" % (int(params['threshold']) * 1000) , params)
print "retrieved data for ", len(data), "tids."
return data
#-----------------------------------------------------------------------------------------------------------------------
def readfile(path, key_name, val_name):
"""Read two columns from a tab-separated file into a dictionary.
Inputs:
path -- filepath
key_name -- name of the column holding the key
val_name -- name of the column holding the value
"""
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lkp = {}
els = lines[0].rstrip().split('\t')
for i, el in enumerate(els):
if el == key_name:
key_idx = i
if el == val_name:
val_idx = i
for line in lines[1:]:
elements = line.rstrip().split('\t')
lkp[elements[key_idx]] = elements[val_idx]
return lkp
#-----------------------------------------------------------------------------------------------------------------------
def get_archs(el_targets, pfam_lkp):
"""Find multi-domain architectures.
Inputs:
el_targets -- list of eligible targets
"""
act_lkp = {}
arch_lkp = {}
dom_lkp = {}
for ent in el_targets:
try:
doms = pfam_lkp[ent[0]]
except KeyError:
print "no doms in ", ent[0]
arch = ', '.join(sorted(doms))
try:
arch_lkp[arch] += 1
act_lkp[arch] += ent[4]
except KeyError:
arch_lkp[arch] = 1
act_lkp[arch] = ent[4]
if len(doms) <= 1:
continue
for dom in set(doms):
try:
dom_lkp[dom] += 1
except KeyError:
dom_lkp[dom] = 1
return(arch_lkp, dom_lkp, act_lkp)
#-----------------------------------------------------------------------------------------------------------------------
def get_doms(tids, params):
"""Get domains for a list of tids.
Inputs:
el_targets -- list of eligible targets
"""
pfam_lkp = {}
tidstr = "', '".join(str(t) for t in tids)
data = queryDevice.queryDevice("""
SELECT tid, domain_name
FROM target_components tc
JOIN component_domains cd
ON cd.component_id = tc.component_id
JOIN domains d
ON d.domain_id = cd.domain_id
WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'""" %tidstr, params)
for ent in data:
tid = ent[0]
dom = ent[1]
try:
pfam_lkp[tid].append(dom)
except KeyError:
pfam_lkp[tid] = [dom]
return pfam_lkp
#-----------------------------------------------------------------------------------------------------------------------
def | (lkp, valid_doms):
"""Get count of architectures and activities covered by the mapping.
"""
valz = []
for arch in lkp.keys():
valid = False
doms = arch.split(', ')
for dom in doms:
if dom in valid_doms:
valid = True
break
valz.append((lkp[arch], valid))
valid = sum([x[0] for x in valz if x[1]])
allz = sum([x[0] for x in valz])
valid_archs = len([x[0] for x in valz if x[1]])
all_archs = len(sum([x[0] for x in valz]))
out = open('data/log.tab', 'a')
timestamp = time.strftime('%d %B %Y %T', time.gmtime())
comment = "only binding assays"
release = params['release']
threshold = params['threshold']
out.write("%(valid)s\t%(allz)s\t%(release)s\t%(threshold)s\t%(comment)s\t%(timestamp)s\t%(valid_archs)s\t%(all_archs)s\n"
% locals())
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_archs(arch_lkp, valid_doms, path):
'''Write out multi-domain architectures in markdown tables.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)
out = open('%s.md' % path ,'w')
out.write('|architecture|count|mapped|\n')
out.write('|:-----------|:---------|-----:|\n')
for arch in sorted_archs:
doms = str(arch[0]).split(', ')
if len(doms) <= 1:
continue
mapped = ', '.join([x for x in doms if x in valid_doms])
if len(mapped) == 0:
mapped = False
out.write("|%s|%s|%s|\n"%(arch[0], arch[1], mapped))
#-----------------------------------------------------------------------------------------------------------------------
def export_network(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
count = arch_lkp[arch]
if type(doms) is str:
continue
for i in range(len(doms)-1):
for j in range(i+1, len(doms)):
dom_key = ', '.join(sorted([doms[i],doms[j]]))
try:
lkp[dom_key] += count
except KeyError:
lkp[dom_key] = count
out = open('%s.tab' % path ,'w')
out.write('dom_1\tdom_2\tcount\n')
for link in lkp.keys():
doms = str(link).split(', ')
out.write("%s\t%s\t%s\n"%(doms[0], doms[1], lkp[link]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_attribs(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
out = open('%s.tab' % path ,'w')
out.write('dom\tvalid\n')
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
for dom in doms:
valid = False
if dom in valid_doms:
valid = True
lkp[dom] = valid
for it in lkp.items():
out.write("%s\t%s\n"%(it[0], it[1]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_doms(dom_lkp, valid_doms, path):
'''Write out identified architectures in markdown tables.
Inputs:
dom_lkp -- dictionary of domains occuring in multi-domain architectures.
'''
sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)
out = open('%s.md' % path ,'w')
out.write('|domain |count| validated|\n')
out.write('|:-----------|:-----|-------:|\n')
for dom in sorted_doms:
mapped = False
count = dom[1]
dom = str(dom[0])
if dom in valid_doms:
mapped = True
out.write("|%s|%s|%s|\n"%(dom, count, mapped))
#-----------------------------------------------------------------------------------------------------------------------
def master(version):
"""
Function: master
Run through all steps to identify mandatory muli-domain architectures.
"""
# Load the list of validated domains.
valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')
valid_doms = valid_dom_d.keys()
## Load eligible targets.
el_targets = get_el_targets(params)
## Get domains for tids.
pfam_lkp = get_doms([x[0] for x in el_targets], params)
## Add targets with given architecture.
(arch_lkp, dom_lkp, act_lkp) = get_archs(el_targets, pfam_lkp)
## Count covered acrchitectures.
count_valid(arch_lkp, valid_doms)
## Count covered activities.
count_valid(act_lkp, valid_doms)
## Write multi-domain architechtures to markdown tables.
export_archs(arch_lkp, valid_doms, 'data/multi_dom_archs_%s'% params['release'])
## Write domains from multi-domain architechtures to markdown tables.
export_doms(dom_lkp, valid_doms, 'data/multi_dom_doms_%s'% params['release'])
## export network file.
export_network(arch_lkp, valid_doms, 'data/multi_dom_network_%s'% params['release'])
## export network attribute file.
export_attribs(arch_lkp, valid_doms, 'data/multi_dom_attributes_%s'% params['release'])
#-----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
import sys
if len(sys.argv) != 2: # the program name and one argument
sys.exit("""Parameters are read from mpf.yaml but must specify
version for data/valid_pfam_v_%(version)s.tab""")
version = sys.argv[1]
master(version)
| count_valid | identifier_name |
coverage.py | """
Script: coverage.py
Identifies domains that only occur in multi-domain proteins. The main
script is master.
--------------------
Felix A Kruger
[email protected]
"""
####
#### import modules.
####
import queryDevice
import operator
import yaml
import time
####
#### Load parameters.
####
paramFile = open('local.yaml')
params = yaml.safe_load(paramFile)
paramFile.close()
#### Define functions.
#-----------------------------------------------------------------------------------------------------------------------
def get_el_targets(params):
"""Query the ChEMBL database for (almost) all activities that are subject to the mapping. Does not conver activities expressed in log-conversion eg pIC50 etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]
"""
data = queryDevice.queryDevice("""
SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)
FROM assays ass
JOIN(
SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc
FROM target_dictionary td
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON cs.component_id = tc.component_id
JOIN component_domains cd
ON cd.component_id = cs.component_id
WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')
GROUP BY td.tid
) as dc
ON dc.tid = ass.tid
JOIN activities act
ON act.assay_id = ass.assay_id
WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')
AND ass.relationship_type = 'D'
AND assay_type IN('B')
AND act.standard_relation IN('=')
AND standard_units = 'nM'
AND standard_value <= %s
GROUP BY dc.tid ORDER BY COUNT(activity_id)""" % (int(params['threshold']) * 1000) , params)
print "retrieved data for ", len(data), "tids."
return data
#-----------------------------------------------------------------------------------------------------------------------
def readfile(path, key_name, val_name):
"""Read two columns from a tab-separated file into a dictionary.
Inputs:
path -- filepath
key_name -- name of the column holding the key
val_name -- name of the column holding the value
"""
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lkp = {}
els = lines[0].rstrip().split('\t')
for i, el in enumerate(els):
if el == key_name:
key_idx = i
if el == val_name:
val_idx = i
for line in lines[1:]:
elements = line.rstrip().split('\t')
lkp[elements[key_idx]] = elements[val_idx]
return lkp
#-----------------------------------------------------------------------------------------------------------------------
def get_archs(el_targets, pfam_lkp):
"""Find multi-domain architectures.
Inputs:
el_targets -- list of eligible targets
"""
act_lkp = {}
arch_lkp = {}
dom_lkp = {}
for ent in el_targets:
try:
doms = pfam_lkp[ent[0]]
except KeyError:
print "no doms in ", ent[0]
arch = ', '.join(sorted(doms))
try:
arch_lkp[arch] += 1
act_lkp[arch] += ent[4]
except KeyError:
arch_lkp[arch] = 1
act_lkp[arch] = ent[4]
if len(doms) <= 1:
continue
for dom in set(doms):
try:
dom_lkp[dom] += 1
except KeyError:
dom_lkp[dom] = 1
return(arch_lkp, dom_lkp, act_lkp)
#-----------------------------------------------------------------------------------------------------------------------
def get_doms(tids, params):
"""Get domains for a list of tids.
Inputs:
el_targets -- list of eligible targets
"""
pfam_lkp = {}
tidstr = "', '".join(str(t) for t in tids)
data = queryDevice.queryDevice("""
SELECT tid, domain_name
FROM target_components tc
JOIN component_domains cd
ON cd.component_id = tc.component_id
JOIN domains d
ON d.domain_id = cd.domain_id
WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'""" %tidstr, params)
for ent in data:
tid = ent[0]
dom = ent[1]
try:
pfam_lkp[tid].append(dom)
except KeyError:
pfam_lkp[tid] = [dom]
return pfam_lkp
#-----------------------------------------------------------------------------------------------------------------------
def count_valid(lkp, valid_doms):
"""Get count of architectures and activities covered by the mapping.
"""
valz = []
for arch in lkp.keys():
valid = False
doms = arch.split(', ')
for dom in doms:
if dom in valid_doms:
valid = True
break
valz.append((lkp[arch], valid))
valid = sum([x[0] for x in valz if x[1]])
allz = sum([x[0] for x in valz])
valid_archs = len([x[0] for x in valz if x[1]])
all_archs = len(sum([x[0] for x in valz]))
out = open('data/log.tab', 'a')
timestamp = time.strftime('%d %B %Y %T', time.gmtime())
comment = "only binding assays"
release = params['release']
threshold = params['threshold']
out.write("%(valid)s\t%(allz)s\t%(release)s\t%(threshold)s\t%(comment)s\t%(timestamp)s\t%(valid_archs)s\t%(all_archs)s\n"
% locals())
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_archs(arch_lkp, valid_doms, path):
'''Write out multi-domain architectures in markdown tables.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)
out = open('%s.md' % path ,'w')
out.write('|architecture|count|mapped|\n')
out.write('|:-----------|:---------|-----:|\n')
for arch in sorted_archs:
doms = str(arch[0]).split(', ')
if len(doms) <= 1:
continue
mapped = ', '.join([x for x in doms if x in valid_doms])
if len(mapped) == 0:
mapped = False
out.write("|%s|%s|%s|\n"%(arch[0], arch[1], mapped))
#-----------------------------------------------------------------------------------------------------------------------
def export_network(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures. | doms = arch.split(', ')
if len(doms) <= 1:
continue
count = arch_lkp[arch]
if type(doms) is str:
continue
for i in range(len(doms)-1):
for j in range(i+1, len(doms)):
dom_key = ', '.join(sorted([doms[i],doms[j]]))
try:
lkp[dom_key] += count
except KeyError:
lkp[dom_key] = count
out = open('%s.tab' % path ,'w')
out.write('dom_1\tdom_2\tcount\n')
for link in lkp.keys():
doms = str(link).split(', ')
out.write("%s\t%s\t%s\n"%(doms[0], doms[1], lkp[link]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_attribs(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
out = open('%s.tab' % path ,'w')
out.write('dom\tvalid\n')
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
for dom in doms:
valid = False
if dom in valid_doms:
valid = True
lkp[dom] = valid
for it in lkp.items():
out.write("%s\t%s\n"%(it[0], it[1]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_doms(dom_lkp, valid_doms, path):
'''Write out identified architectures in markdown tables.
Inputs:
dom_lkp -- dictionary of domains occuring in multi-domain architectures.
'''
sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)
out = open('%s.md' % path ,'w')
out.write('|domain |count| validated|\n')
out.write('|:-----------|:-----|-------:|\n')
for dom in sorted_doms:
mapped = False
count = dom[1]
dom = str(dom[0])
if dom in valid_doms:
mapped = True
out.write("|%s|%s|%s|\n"%(dom, count, mapped))
#-----------------------------------------------------------------------------------------------------------------------
def master(version):
"""
Function: master
Run through all steps to identify mandatory muli-domain architectures.
"""
# Load the list of validated domains.
valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')
valid_doms = valid_dom_d.keys()
## Load eligible targets.
el_targets = get_el_targets(params)
## Get domains for tids.
pfam_lkp = get_doms([x[0] for x in el_targets], params)
## Add targets with given architecture.
(arch_lkp, dom_lkp, act_lkp) = get_archs(el_targets, pfam_lkp)
## Count covered acrchitectures.
count_valid(arch_lkp, valid_doms)
## Count covered activities.
count_valid(act_lkp, valid_doms)
## Write multi-domain architechtures to markdown tables.
export_archs(arch_lkp, valid_doms, 'data/multi_dom_archs_%s'% params['release'])
## Write domains from multi-domain architechtures to markdown tables.
export_doms(dom_lkp, valid_doms, 'data/multi_dom_doms_%s'% params['release'])
## export network file.
export_network(arch_lkp, valid_doms, 'data/multi_dom_network_%s'% params['release'])
## export network attribute file.
export_attribs(arch_lkp, valid_doms, 'data/multi_dom_attributes_%s'% params['release'])
#-----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
import sys
if len(sys.argv) != 2: # the program name and one argument
sys.exit("""Parameters are read from mpf.yaml but must specify
version for data/valid_pfam_v_%(version)s.tab""")
version = sys.argv[1]
master(version) | '''
lkp = {}
for arch in arch_lkp.keys(): | random_line_split |
fapui-accordion.js | /**
* UI组件
* @module widgets
*/
/**
*定义FAPUI.Layout.AccordionLayout布局组件
*<p>以下代码将演示如何使用AccordionLayout组件</p>
define(function(require) {
require("jquery");
require("widgets/fapui-accordion");
require("widgets/fapui-panel");
$(function () {
var a = new FAPUI.Layout.AccordionLayout({
renderTo: "ab",//要渲染的位置
width: 200,//指定宽度
height: 250,//指定高度
items: [new FAPUI.Panel({ //内部元素
border: false, //icons:"icon-edit",
itemId: "itemd_example1",
title: "title_example1",
html: "text_example1"
}), new FAPUI.Panel({
border: false,
itemId: "itemd_example2",
title: "title_example2",
html: "text_example2"
}), new FAPUI.Panel({
border: false,
itemId: "itemd_example3",
title: "title_example3",
html: "text_example3"
}), new FAPUI.Panel({
border: false,
itemId: "itemd_example4",
title: "title_example4",
html: "text_example4"
})]
})
})
})
* @class FAPUI.Layout.AccordionLayout
* @extends FAPUI.Component
*/
define ( function ( require, exports, module ) {
var importcss = "./themes/" + FAPUI.getTheme () + "/css/ui-accordion.css";
require.async ( importcss );
require ( "./fapui-component" );
FAPUI.define ( "FAPUI.Layout.AccordionLayout", {
extend : "FAPUI.Component",
props : {
/**
* 内部变量
* 手风琴布局中当前激活的内容的索引
* @private
*/
activeIndex : 0,
/**
* 宽度
* @property width
* @type Num
* @default 默认值为200
*/
width : 200,
/**
* 高度
* @property height
* @type Num
* @default 默认值为400
*/
height : 400,
/**
* 子组件集合
* @property items
* @type Array
*/
items : [],
/**
* 是否显示边框,true时为显示,false时为不显示
* @property border
* @type Boolean
* @default 默认值为true
*/
border : true,
/**
* 创建自定义的动画
* @property animate
* @type Boolean
* @default 默认值为false
*/
animate : false
},
override : {
/**
* 初始化配置
* @private
*/
initConfig : function () {
this.callParent ();
this.addEvents ( /**
* 标签点击的时候触发,同时触发的事件可能有collapse或者expand
* @event itemclick
* @param comp {Object} AccordionLayout组件本身
* @param index {int} 面板索引
* @param itemId {String} 当前点击面板的itemId属性
* @param title {String} 当前点击面板的title属性
* @param event {event} 事件对象
*/
"itemclick",
/**
* 面板关闭事件
* @event collapse
* @param comp {Object} AccordionLayout组件本身
* @param index {int} 面板索引
* @param itemId {String} 当前点击面板的itemId属性
* @param title {String} 当前点击面板的title属性
*/
"collapse",
/**
* 面板展开事件
* @event expand
* @param comp {Object} AccordionLayout组件本身
* @param index {int} 面板索引
* @param itemId {String} 当前点击面板的itemId属性
* @param title {String} 当前点击面板的title属性
*/
"expand" );
this.tpl = [
"{@if it.itemId!=null}",
"<div itemId=\"${it.itemId}\" index=\"${it.index}\" class=\"accordion\">",
"{@else}",
"<div class=\"accordion\" index=\"${it.index}\">",
"{@/if}", "{@if it.index==0}",
"<div class=\"accordion-title accordion-title-noborder\" onselectstart=\"return false\">",
"{@else}", "<div class=\"accordion-title\" onselectstart=\"return false\">",
"{@/if}", "{@if it.icons}", "<span class=\"accordion-icon ${it.icons}\"></span>",
"{@/if}", "<span>${it.title}</span>",
"<span class=\"accordion-tool accordion-collapse\"> </span>",
"</div>", "{@if it.index==0}",
"<div class=\"accordion-content accordion-content-first-default\" ></div>",
"{@else}",
"<div class=\"accordion-content\"></div>",
"{@/if}", "</div>" ].join ( "" );
juicer.set ( "cache", true );
juicer.set ( "errorhandling", false );
juicer.set ( "strip", true );
juicer.set ( "detection", false );
},
/**
*
* @param {*} el
*/
render : function ( el ) {
if ( ! FAPUI.isString ( el ) ) {
el.addClass ( "panel-noscroll" );
}
this.callParent ( [ el ] );
},
/**
*
*/
createDom : function () {
var me = this;
me._items = me.items.slice ( 0 );
me.id = me.id || FAPUI.getId ();
var html = [];
var divClass = "accordion-container";
if ( ! me.border ) {
divClass = divClass + " accordion-container-noborder";
}
html.push ( "<div id=\"" + me.id + "\" class=\"" + divClass + "\">" );
var cfg = {};
$ ( me._items ).each ( function ( i ) {
this.index = i;
cfg.it = this;
html.push ( juicer ( me.tpl, cfg ) );
} );
html.push ( "</div>" );
me.items = this._jsonToWidget ();
return html.join ( "" );
},
/**
*
*/
bindEvent : function () {
var me = this;
me.el = me.el || $ ( "#" + me.id );
me.el.click ( function ( event ) {
var target = $ ( event.target );
if ( target.parent ().is ( "div.accordion-title" ) ) {
target = target.parent ();
}
if ( target.is ( "div.accordion-title" ) ) {
var index = parseInt ( target.parent ().attr ( "index" ), 10 );
me.fireEvent ( "itemclick", me, index, target.parent ().attr ( "itemId" ), event );
me.expand ( index );
}
} );
me.afterRender ();
},
/**
*
*/
afterRender : function () {
var me = this;
var i = me.activeIndex || 0;
me.activeIndex = - 1;
me.expand ( i );
},
/**
*
*/
updateRender : function () {
this.callParent ();
},
/**
* 重新计算容器内子组件的宽和高,调用setHeight和setWidth方法后都会执行此方法
* @method doLayout
*/
doLayout : function () {
var me = this;
me.setHeight ( me.height );
me.setWidth ( me.width );
},
/**
*
* @param {*} h
*/
doLayoutH : function ( h ) {
var me = this;
//计算内容区域的高度
var items = this.el.children ();
var heightSum = 0;
$ ( items ).each ( function () {
heightSum = heightSum + $ ( "div:first", this ).outerHeight ();
} );
h = h - heightSum;
var _itemHeight = h;
$ ( items ).each ( function () {
$ ( this ).children ( "div.accordion-content" ).height ( h );
} );
$ ( me.items ).each ( function () {
var that = this;
if ( that.isRender ) {
that.setHeight ( _itemHeight );
}
} );
},
/**
*
* @param {*} w
*/
doLayoutW : function ( w ) {
var me = this;
var items = this.el.children ();
$ ( items ).each ( function () {
$ ( $ ( this ).children ( "div.accordion-title" ) ).width ( w - 5 );
$ ( $ ( this ).children ( "div.accordion-content" ) ).width ( w );
} );
$ ( me.items ).each ( function () {
var that = this;
if ( that.isRender ) {
that.setWidth ( me.width );
}
} );
},
/**
* 设置高
* @method setHeight
* @param {num} h
*/
setHeight : function ( h ) {
this.height = h;
this.el.height ( h );
this.doLayoutH ( h );
},
/**
* 设置宽
* @method setWidth
* @param {num} w
*/
setWidth : function ( w ) {
this.width = w;
this.el.width ( w );
this.doLayoutW ( w );
},
/**
* 得到AccordionLayout的高度
* @method getHeight
* @return {Num}
*/
getHeight : function () {
return this.height;
},
/**
* 得到AccordionLayout的宽度
* @method getWidth
* @return {Num}
*/
getWidth : function () {
return this.width;
},
/**
* 展开子组件。如果index为数字,则展开items中的第index组件;如果index为String,则展开子组件的itemId等于index的组件
* @method expand
* @param {string} index
*/
expand : function ( index ) {
var me = this;
if ( ! FAPUI.isNumber ( index ) ) {
index = $ ( me.el.children ( "div[itemId='" + index + "']" )[ 0 ] ).attr ( "index" );
index = parseInt ( index , 10 );
}
if ( index !== null && this.activeIndex !== index && me.items.length > 0 ) {
me.collapse ( this.activeIndex );
var contentArea = $ ( $ ( this.el ).
children ( "div[index=" + index + "]" )[ 0 ] ).
children ( "div.accordion-content" );
me._renderWidget ( index, contentArea );
var contentEl = me.items[ index ].el.parent ();
var headEl = contentEl.prev ();
headEl.addClass ( "accordion-title-active" );
$ ( "span.accordion-tool", headEl ).addClass ( "accordion-expand" );
if ( me.animate === true ) {
contentEl.slideDown ( "normal" );
} else {
contentEl.show ();
}
this.items[ index ].setWidth ( contentArea.innerWidth () );
this.items[ index ].setHeight ( contentArea.innerHeight () );
this.fireEvent ( "expand", this, index, this._items[ index ].itemId, this._items[ index ].title );
this.activeIndex = index;
}
},
/**
* 关闭子组件。如果index为数字,则关闭items中的第index组件;如果index为String,则关闭子组件的itemId等于index的组件
* @method collapse
* @param {string} index
*/
collapse : function ( index ) {
var me = this;
if ( index == - 1 ) {
return;
}//如果index为-1则说明所有的选项都关闭了
if ( ! FAPUI.isNumber ( index ) ) {
index = $ ( me.el.children ( "div[itemId=\"" + index + "\"]" )[ 0 ] ).attr ( "index" );
index = parseInt ( index );
}
if ( index !== null && this.activeIndex ==index && me.items.length > 0 ) {
var contentEl = me.items[ index ].el.parent ();
var headEl = contentEl.prev ();
if ( me.animate === true ) {
contentEl.slideUp ( "normal" );
} else {
contentEl.hide ();
}
contentEl.removeClass ( "accordion-content-first-default" );
headEl.removeClass ( "accordion-title-active" );
$ ( "span.accordion-tool", headEl ).removeClass ( "accordion-expand" );
this.fireEvent ( "collapse", this, index, this._items[ index ].itemId, this._items[ index ].title );
this.activeIndex = - 1;
}
},
/**
* 添加子组件
* @method addItems
* @param {Array} items 需要添加组件集合
*/
addItems : function ( items ) {
var me = this;
if ( ! FAPUI.isArray ( items ) ) {
items = [ items ];
}
var cfg = {};
var html = [];
$ ( items ).each ( function ( i ) {
this.index = me.items.length + i;
cfg.it = this;
html.push ( juicer ( me.tpl, cfg ) );
} );
me.el.append ( html.join ( "" ) );
me._items = me._items.concat ( items );
me.items = me.items.concat ( me._jsonToWidget ( items ) );
me.doLayout ();
},
/**
* 移除子组件。如果index为数字,则移除items中的第index组件;如果index为String,则移除子组件的itemId等于index的组件
* @method removeItem
* @param {string} index | */
removeItem : function ( index ) {
var me = this;
var comp;
if ( FAPUI.isNumber ( index ) ) {
comp = $ ( me.el.children ( "div[index=\"" + index + "\"]" )[ 0 ] );
} else {
comp = $ ( me.el.children ( "div[itemId=\"" + index + "\"]" )[ 0 ] );
index = parseInt ( comp.attr ( "index" ) );
}
if ( comp[ 0 ] === null ) {
return;
}
var siblings = comp.siblings ();
siblings.each ( function () {
var i = parseInt ( $ ( this ).attr ( "index" ) );
if ( i > index ) {
$ ( this ).attr ( "index", i - 1 );
}
} );
if ( me.activeIndex > index ) {
me.activeIndex = me.activeIndex - 1;
} else if ( me.activeIndex == index ) {
me.activeIndex = - 1;
}
comp.unbind ();
comp.remove ();
me.items.splice ( index, 1 );
me._items.splice ( index, 1 );
//删除后重新设置内容的高度
me.setHeight ( me.height );
},
/**
* @access private
* @param {*} items 把items转换成组件
*/
_jsonToWidget : function ( items ) {
items = items || this.items;
var newItems = [];
if ( items !== null && items.length > 0 ) {
$ ( items ).each ( function ( index ) {
var me = this;
var o = {};
FAPUI.apply ( o, me );
delete o.title;
o.isRender = false;
if ( me.isUI && me.isUI () ) {
newItems.push ( o );
} else {
var cmp = FAPUI.create ( me );
newItems.push ( cmp );
}
} );
}
return newItems;
},
/**
* 渲染组件
* @private
*/
_renderWidget : function ( index, contentArea ) {
if ( this.items && this.items[ index ] && ! this.items[ index ].isRender ) {
this.items[ index ].render ( contentArea );
this.items[ index ].isRender = true;
}
},
/**
*
*/
onDestroy : function () {
var me = this;
if ( me.items ) {
$ ( me.items ).each ( function () {
var that = this;
that.destroy ();
} );
}
me.callParent ();
}
}
} );
FAPUI.register ( "accordionLayout", FAPUI.Layout.AccordionLayout );
return FAPUI.Layout.AccordionLayout;
} ); | random_line_split |
|
fapui-accordion.js | /**
* UI组件
* @module widgets
*/
/**
*定义FAPUI.Layout.AccordionLayout布局组件
*<p>以下代码将演示如何使用AccordionLayout组件</p>
define(function(require) {
require("jquery");
require("widgets/fapui-accordion");
require("widgets/fapui-panel");
$(function () {
var a = new FAPUI.Layout.AccordionLayout({
renderTo: "ab",//要渲染的位置
width: 200,//指定宽度
height: 250,//指定高度
items: [new FAPUI.Panel({ //内部元素
border: false, //icons:"icon-edit",
itemId: "itemd_example1",
title: "title_example1",
html: "text_example1"
}), new FAPUI.Panel({
border: false,
itemId: "itemd_example2",
title: "title_example2",
html: "text_example2"
}), new FAPUI.Panel({
border: false,
itemId: "itemd_example3",
title: "title_example3",
html: "text_example3"
}), new FAPUI.Panel({
border: false,
itemId: "itemd_example4",
title: "title_example4",
html: "text_example4"
})]
})
})
})
* @class FAPUI.Layout.AccordionLayout
* @extends FAPUI.Component
*/
define ( function ( require, exports, module ) {
var importcss = "./themes/" + FAPUI.getTheme () + "/css/ui-accordion.css";
require.async ( importcss );
require ( "./fapui-component" );
FAPUI.define ( "FAPUI.Layout.AccordionLayout", {
extend : "FAPUI.Component",
props : {
/**
* 内部变量
* 手风琴布局中当前激活的内容的索引
* @private
*/
activeIndex : 0,
/**
* 宽度
* @property width
* @type Num
* @default 默认值为200
*/
width : 200,
/**
* 高度
* @property height
* @type Num
* @default 默认值为400
*/
height : 400,
/**
* 子组件集合
* @property items
* @type Array
*/
items : [],
/**
* 是否显示边框,true时为显示,false时为不显示
* @property border
* @type Boolean
* @default 默认值为true
*/
border : true,
/**
* 创建自定义的动画
* @property animate
* @type Boolean
* @default 默认值为false
*/
animate : false
},
override : {
/**
* 初始化配置
* @private
*/
initConfig : function () {
this.callParent ();
this.addEvents ( /**
* 标签点击的时候触发,同时触发的事件可能有collapse或者expand
* @event itemclick
* @param comp {Object} AccordionLayout组件本身
* @param index {int} 面板索引
* @param itemId {String} 当前点击面板的itemId属性
* @param title {String} 当前点击面板的title属性
* @param event {event} 事件对象
*/
"itemclick",
/**
* 面板关闭事件
* @event collapse
* @param comp {Object} AccordionLayout组件本身
* @param index {int} 面板索引
* @param itemId {String} 当前点击面板的itemId属性
* @param title {String} 当前点击面板的title属性
*/
"collapse",
/**
* 面板展开事件
* @event expand
* @param comp {Object} AccordionLayout组件本身
* @param index {int} 面板索引
* @param itemId {String} 当前点击面板的itemId属性
* @param title {String} 当前点击面板的title属性
*/
"expand" );
this.tpl = [
"{@if it.itemId!=null}",
"<div itemId=\"${it.itemId}\" index=\"${it.index}\" class=\"accordion\">",
"{@else}",
"<div class=\"accordion\" index=\"${it.index}\">",
"{@/if}", "{@if it.index==0}",
"<div class=\"accordion-title accordion-title-noborder\" onselectstart=\"return false\">",
"{@else}", "<div class=\"accordion-title\" onselectstart=\"return false\">",
"{@/if}", "{@if it.icons}", "<span class=\"accordion-icon ${it.icons}\"></span>",
"{@/if}", "<span>${it.title}</span>",
"<span class=\"accordion-tool accordion-collapse\"> </span>",
"</div>", "{@if it.index==0}",
"<div class=\"accordion-content accordion-content-first-default\" ></div>",
"{@else}",
"<div class=\"accordion-content\"></div>",
"{@/if}", "</div>" ].join ( "" );
juicer.set ( "cache", true );
juicer.set ( "errorhandling", false );
juicer.set ( "strip", true );
juicer.set ( "detection", false );
},
/**
*
* @param {*} el
*/
render : function ( el ) {
if ( ! FAPUI.isString ( el ) ) {
el.addClass ( "panel-noscroll" );
}
this.callParent ( [ el ] );
},
/**
*
*/
createDom : function () {
var me = this;
me._items = me.items.slice ( 0 );
me.id = me.id || FAPUI.getId ();
var html = [];
var divClass = "accordion-container";
if ( ! me.border ) {
divClass = divClass + " accordion-container-noborder";
}
html.push ( "<div id=\"" + me.id + "\" class=\"" + divClass + "\">" );
var cfg = {};
$ ( me._items ).each ( function ( i ) {
this.index = i;
cfg.it = this;
html.push ( juicer ( me.tpl, cfg ) );
} );
html.push ( "</div>" );
me.items = this._jsonToWidget ();
return html.join ( "" );
},
/**
*
*/
bindEvent : function () {
var me = this;
me.el = me.el || $ ( "#" + me.id );
me.el.click ( function ( event ) {
var target = $ ( event.target );
if ( target.parent ().is ( "div.accordion-title" ) ) {
target = target.parent ();
}
if ( target.is ( "div.accordion-title" ) ) {
var index = parseInt ( target.parent ().attr ( "index" ), 10 );
me.fireEvent ( "itemclick", me, index, target.parent ().attr ( "itemId" ), event );
me.expand ( index );
}
} );
me.afterRender ();
},
/**
*
*/
afterRender : function () {
var me = this;
var i = me.activeIndex || 0;
me.activeIndex = - 1;
me.expand ( i );
},
/**
*
*/
updateRender : function () {
this.callParent ();
},
/**
* 重新计算容器内子组件的宽和高,调用setHeight和setWidth方法后都会执行此方法
* @method doLayout
*/
doLayout : function () {
var me = this;
me.setHeight ( me.height );
me.setWidth ( me.width );
},
/**
*
* @param {*} h
*/
doLayoutH : function ( h ) {
var me = this;
//计算内容区域的高度
var items = this.el.children ();
var heightSum = 0;
$ ( items ).each ( function () {
heightSum = heightSum + $ ( "div:first", this ).outerHeight ();
} );
h = h - heightSum;
var _itemHeight = h;
$ ( items ).each ( function () {
$ ( this ).children ( "div.accordion-content" ).height ( h );
} );
$ ( me.items ).each ( function () {
var that = this;
if ( that.isRender ) {
that.setHeight ( _itemHeight );
}
} );
},
/**
*
* @param {*} w
*/
doLayoutW : function ( w ) {
var me = this;
var items = this.el.children ();
$ ( items ).each ( function () {
$ ( $ ( this ).children ( "div.accordion-title" ) ).width ( w - 5 );
$ ( $ ( this ).children ( "div.accordion-content" ) ).width ( w );
} );
$ ( me.items ).each ( function () {
var that = this;
if ( that.isRender ) {
that.setWidth ( me.width );
}
} );
},
/**
* 设置高
* @method setHeight
* @param {num} h
*/
setHeight : function ( h ) {
this.height = h;
this.el.height ( h );
this.doLayoutH ( h );
},
/**
* 设置宽
* @method setWidth
* @param {num} w
*/
setWidth : function ( w ) {
this.width = w;
this.el.width ( w );
this.doLayoutW ( w );
},
/**
* 得到AccordionLayout的高度
* @method getHeight
* @return {Num}
*/
getHeight : function () {
return this.height;
},
/**
* 得到AccordionLayout的宽度
* @method getWidth
* @return {Num}
*/
getWidth : function () {
return this.width;
},
/**
* 展开子组件。如果index为数字,则展开items中的第index组件;如果index为String,则展开子组件的itemId等于index的组件
* @method expand
* @param {string} index
*/
expand : function ( index ) {
var me = this;
if ( ! FAPUI.isNumber ( index ) ) {
index = $ ( me.el.children ( "div[itemId='" + index + "']" )[ 0 ] ).attr ( "index" );
index = parseInt ( index , 10 );
}
if ( index !== null && this.activeIndex !== index && me.items.length > 0 ) {
me.collapse ( this.activeIndex );
var contentArea = $ ( $ ( this.el ).
children ( "div[index=" + index + "]" )[ 0 ] ).
children ( "div.accordion-content" );
me._renderWidget ( index, contentArea );
var contentEl = me.items[ index ].el.parent ();
var headEl = contentEl.prev ();
headEl.addClass ( "accordion-title-active" );
$ ( "span.accordion-tool", headEl ).addClass ( "accordion-expand" );
if ( me.animate === true ) {
contentEl.slideDown ( "normal" );
} else {
contentEl.show ();
}
this.items[ index ].setWidth ( contentArea.innerWidth () );
this.items[ index ].setHeight ( contentArea.innerHeight () );
this.fireEvent ( "expand", this, index, this._items[ index ].itemId, this._items[ index ].title );
this.activeIndex = index;
}
},
/**
* 关闭子组件。如果index为数字,则关闭items中的第index组件;如果index为String,则关闭子组件的itemId等于index的组件
* @method collapse
* @param {string} index
*/
collapse : function ( index ) {
var me = this;
if ( index == - 1 ) {
return;
}//如果index为-1则说明所有的选项都关闭了
if ( ! FAPUI.isNumber ( index ) ) {
index = $ ( me.el.children ( "div[itemId=\"" + index + "\"]" )[ 0 ] ).attr ( "index" );
index = parseInt ( index );
}
if ( index !== null && this.activeIndex ==index && me.items.length > 0 ) {
var contentEl = me.items[ index ].el.parent ();
var headEl = contentEl.prev ();
if ( me.animate === true ) {
contentEl.slideUp ( "normal" );
} else {
contentEl.hide ();
}
contentEl.removeClass ( "accordion-content-first-default" );
headEl.removeClass ( "accordion-title-active" );
$ ( "span.accordion-tool", headEl ).removeClass ( "accordion-expand" );
this.fireEvent ( "collapse", this, index, this._items[ index ].itemId, this._items[ index ].title );
this.activeIndex = - 1;
}
},
/**
* 添加子组件
* @method addItems
* @param {Array} items 需要添加组件集合
*/
addItems : function ( items ) {
var me = this;
if ( ! FAPUI.isArray ( items ) ) {
items = [ items ];
}
var cfg = {};
var html = [];
$ ( items ).each ( function ( i ) {
this.index = me.items.length + i;
cfg.it = this;
html.push ( juicer ( me.tpl, cfg ) );
} );
me.el.append ( html.join ( "" ) );
me._items = me._items.concat ( items );
me.items = me.items.concat ( me._jsonToWidget ( items ) );
me.doLayout ();
},
/**
* 移除子组件。如果index为数字,则移除items中的第index组件;如果index为String,则移除子组件的itemId等于index的组件
* @method removeItem
* @param {string} index
*/
removeItem : function ( index ) {
var me = this;
var comp;
if ( FAPUI.isNumber ( index ) ) {
comp = $ ( me.el.children ( "div[index=\"" + index + "\"]" )[ 0 ] );
} else {
comp = $ ( me.el.children ( "div[itemId=\"" + index + "\"]" )[ 0 ] );
index = parseInt ( comp.attr ( "index" ) );
}
if ( comp[ 0 ] === null ) {
return;
}
var siblings = comp.siblings ();
siblings.each ( function () {
var i = parseInt ( $ ( this ).attr ( "index" ) );
if ( i > index ) {
$ ( this ).attr ( "index", i - 1 );
}
} );
if ( me.activeIndex > index ) {
me.activeIndex = me.activeIndex - 1;
} else if ( me.activeIndex == index ) {
me.activeIndex = - 1;
}
comp.unbind ();
comp.remove ();
me.items.splice ( index, 1 );
me._items.splice ( index, 1 );
//删除后重新设置内容的高度
me.setHeight ( me.height );
},
/**
* @access private
* @param {*} items 把items转换成组件
*/
_jsonToWidget : function ( items ) {
items = items || this.items;
var newItems = [];
if ( items !== null && items.length > 0 ) {
$ ( items ).each ( function ( index ) {
var me = this;
var o = {};
FAPUI.apply ( o, me );
delete o.title;
o.isRender = false;
if ( me.isUI && me.isUI () ) {
newItems.push ( o );
} else {
var cmp = FAPUI.create ( me );
newItems.push ( cmp );
}
} );
}
return newItems;
},
/**
* 渲染组件
* @private
*/
_renderWidget : function ( index, contentArea ) {
if ( this.items && this.items[ index ] && ! this.items[ index ].isRender ) {
this.items[ index ].render ( contentArea );
this.items[ index ].isRender = true;
}
},
/**
*
*/
onDestroy : function () {
var me = this;
if ( me.items ) {
$ ( me.items ).each ( function () {
var that = this;
that.destroy ();
} );
}
me.callParent ();
}
}
} );
FAPUI.register ( "accordionLayout", FAPUI.Layout.AccordionLayout );
return FAPUI.Layout.AccordionLayout;
} ); | conditional_block |
||
stoopid.py |
import torch
from typing import Tuple
from math import floor
class VisualQNetwork(torch.nn.Module):
def __init__(
self,
input_shape: Tuple[int, int, int],
encoding_size: int,
output_size: int
):
"""
Creates a neural network that takes as input a batch of images (3
dimensional tensors) and outputs a batch of outputs (1 dimensional
tensors)
"""
super(VisualQNetwork, self).__init__()
height = input_shape[0]
width = input_shape[1]
initial_channels = input_shape[2]
conv_1_hw = self.conv_output_shape((height, width), 8, 4)
conv_2_hw = self.conv_output_shape(conv_1_hw, 4, 2)
self.final_flat = conv_2_hw[0] * conv_2_hw[1] * 32
self.conv1 = torch.nn.Conv2d(initial_channels, 16, [8, 8], [4, 4])
self.conv2 = torch.nn.Conv2d(16, 32, [4, 4], [2, 2])
self.dense1 = torch.nn.Linear(self.final_flat, encoding_size)
self.dense2 = torch.nn.Linear(encoding_size, output_size)
def forward(self, visual_obs: torch.tensor):
visual_obs = visual_obs.permute(0, 3, 1, 2)
conv_1 = torch.relu(self.conv1(visual_obs))
conv_2 = torch.relu(self.conv2(conv_1))
hidden = self.dense1(conv_2.reshape([-1, self.final_flat]))
hidden = torch.relu(hidden)
hidden = self.dense2(hidden)
return hidden
@staticmethod
def conv_output_shape(
h_w: Tuple[int, int],
kernel_size: int = 1,
stride: int = 1,
pad: int = 0,
dilation: int = 1,
):
"""
Computes the height and width of the output of a convolution layer.
"""
h = floor(
((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
w = floor(
((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
return h, w
"""We will now create a few classes to help us store the data we will use to train the Q-Learning algorithm."""
import numpy as np
from typing import NamedTuple, List
class Experience(NamedTuple):
"""
An experience contains the data of one Agent transition.
- Observation
- Action
- Reward
- Done flag
- Next Observation
"""
obs: np.ndarray
action: np.ndarray
reward: float
done: bool
next_obs: np.ndarray
# A Trajectory is an ordered sequence of Experiences
Trajectory = List[Experience]
# A Buffer is an unordered list of Experiences from multiple Trajectories
Buffer = List[Experience]
"""Now, we can create our trainer class. The role of this trainer is to collect data from the Environment according to a Policy, and then train the Q-Network with that data."""
from mlagents_envs.environment import ActionTuple, BaseEnv
from typing import Dict
import random
class Trainer:
@staticmethod
def generate_trajectories(
env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float
):
"""
Given a Unity Environment and a Q-Network, this method will generate a
buffer of Experiences obtained by running the Environment with the Policy
derived from the Q-Network.
:param BaseEnv: The UnityEnvironment used.
:param q_net: The Q-Network used to collect the data.
:param buffer_size: The minimum size of the buffer this method will return.
:param epsilon: Will add a random normal variable with standard deviation.
epsilon to the value heads of the Q-Network to encourage exploration.
:returns: a Tuple containing the created buffer and the average cumulative
the Agents obtained.
"""
# Create an empty Buffer
buffer: Buffer = []
# Reset the environment
env.reset()
# Read and store the Behavior Name of the Environment
behavior_name = list(env.behavior_specs)[0]
# Read and store the Behavior Specs of the Environment
spec = env.behavior_specs[behavior_name]
# Create a Mapping from AgentId to Trajectories. This will help us create
# trajectories for each Agents
dict_trajectories_from_agent: Dict[int, Trajectory] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_obs_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_action_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to cumulative reward (Only for reporting)
dict_cumulative_reward_from_agent: Dict[int, float] = {}
# Create a list to store the cumulative rewards obtained so far
cumulative_rewards: List[float] = []
while len(buffer) < buffer_size: # While not enough data in the buffer
# Get the Decision Steps and Terminal Steps of the Agents
decision_steps, terminal_steps = env.get_steps(behavior_name)
# For all Agents with a Terminal Step:
for agent_id_terminated in terminal_steps:
# Create its last experience (is last because the Agent terminated)
last_experience = Experience(
obs=dict_last_obs_from_agent[agent_id_terminated].copy(),
reward=terminal_steps[agent_id_terminated].reward,
done=not terminal_steps[agent_id_terminated].interrupted,
action=dict_last_action_from_agent[agent_id_terminated].copy(),
next_obs=terminal_steps[agent_id_terminated].obs[0],
)
# Clear its last observation and action (Since the trajectory is over)
dict_last_obs_from_agent.pop(agent_id_terminated)
dict_last_action_from_agent.pop(agent_id_terminated)
# Report the cumulative reward
cumulative_reward = (
dict_cumulative_reward_from_agent.pop(agent_id_terminated)
+ terminal_steps[agent_id_terminated].reward
)
cumulative_rewards.append(cumulative_reward)
# Add the Trajectory and the last experience to the buffer
buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated))
buffer.append(last_experience)
# For all Agents with a Decision Step:
for agent_id_decisions in decision_steps:
# If the Agent does not have a Trajectory, create an empty one
if agent_id_decisions not in dict_trajectories_from_agent:
dict_trajectories_from_agent[agent_id_decisions] = []
dict_cumulative_reward_from_agent[agent_id_decisions] = 0
# If the Agent requesting a decision has a "last observation"
if agent_id_decisions in dict_last_obs_from_agent:
# Create an Experience from the last observation and the Decision Step
exp = Experience(
obs=dict_last_obs_from_agent[agent_id_decisions].copy(),
reward=decision_steps[agent_id_decisions].reward,
done=False,
action=dict_last_action_from_agent[agent_id_decisions].copy(),
next_obs=decision_steps[agent_id_decisions].obs[0],
)
# Update the Trajectory of the Agent and its cumulative reward
dict_trajectories_from_agent[agent_id_decisions].append(exp)
dict_cumulative_reward_from_agent[agent_id_decisions] += (
decision_steps[agent_id_decisions].reward
)
# Store the observation as the new "last observation"
dict_last_obs_from_agent[agent_id_decisions] = (
decision_steps[agent_id_decisions].obs[0]
)
# Generate an action for all the Agents that requested a decision
# Compute the values for each action given the observation
actions_values = (
q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy()
)
# Pick the best action using argmax
print("ACTION VALS", actions_values)
actions_values += epsilon * (
np.random.randn(actions_values.shape[0], actions_values.shape[1])
).astype(np.float32)
actions = np.argmax(actions_values, axis=1)
actions.resize((len(decision_steps), 1))
# Store the action that was picked, it will be put in the trajectory later
for agent_index, agent_id in enumerate(decision_steps.agent_id):
dict_last_action_from_agent[agent_id] = actions[agent_index]
# Set the actions in the environment
# Unity Environments expect ActionTuple instances.
action_tuple = ActionTuple()
action_tuple.add_discrete(actions)
env.set_actions(behavior_name, action_tuple)
# Perform a step in the simulation
env.step()
return buffer, np.mean(cumulative_rewards)
@staticmethod
def update_q_net(
q_net: VisualQNetwork,
optimizer: torch.optim,
buffer: Buffer,
action_size: int
):
|
"""### Run Training"""
# Commented out IPython magic to ensure Python compatibility.
# -----------------
# This code is used to close an env that might not have been closed before
try:
env.close()
except:
pass
# -----------------
from mlagents_envs.registry import default_registry
from mlagents_envs.environment import UnityEnvironment
import matplotlib.pyplot as plt
# %matplotlib inline
# Create the GridWorld Environment from the registry
env = default_registry["GridWorld"].make()
print("GridWorld environment created.")
# Create a new Q-Network.
qnet = VisualQNetwork((64, 84, 3), 126, 5)
experiences: Buffer = []
optim = torch.optim.Adam(qnet.parameters(), lr= 0.001)
cumulative_rewards: List[float] = []
# The number of training steps that will be performed
NUM_TRAINING_STEPS = 70
# The number of experiences to collect per training step
NUM_NEW_EXP = 1000
# The maximum size of the Buffer
BUFFER_SIZE = 10000
for n in range(NUM_TRAINING_STEPS):
new_exp,_ = Trainer.generate_trajectories(env, qnet, NUM_NEW_EXP, epsilon=0.1)
random.shuffle(experiences)
if len(experiences) > BUFFER_SIZE:
experiences = experiences[:BUFFER_SIZE]
experiences.extend(new_exp)
Trainer.update_q_net(qnet, optim, experiences, 5)
_, rewards = Trainer.generate_trajectories(env, qnet, 100, epsilon=0)
cumulative_rewards.append(rewards)
print("Training step ", n+1, "\treward ", rewards)
env.close()
# Show the training graph
plt.plot(range(NUM_TRAINING_STEPS), cumulative_rewards)
| """
Performs an update of the Q-Network using the provided optimizer and buffer
"""
BATCH_SIZE = 1000
NUM_EPOCH = 3
GAMMA = 0.9
batch_size = min(len(buffer), BATCH_SIZE)
random.shuffle(buffer)
# Split the buffer into batches
batches = [
buffer[batch_size * start : batch_size * (start + 1)]
for start in range(int(len(buffer) / batch_size))
]
for _ in range(NUM_EPOCH):
for batch in batches:
# Create the Tensors that will be fed in the network
obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))
reward = torch.from_numpy(
np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)
)
done = torch.from_numpy(
np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)
)
action = torch.from_numpy(np.stack([ex.action for ex in batch]))
next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))
# Use the Bellman equation to update the Q-Network
target = (
reward
+ (1.0 - done)
* GAMMA
* torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values
)
mask = torch.zeros((len(batch), action_size))
mask.scatter_(1, action, 1)
prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True)
criterion = torch.nn.MSELoss()
loss = criterion(prediction, target)
# Perform the backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step() | identifier_body |
stoopid.py |
import torch
from typing import Tuple
from math import floor
class VisualQNetwork(torch.nn.Module):
def __init__(
self,
input_shape: Tuple[int, int, int],
encoding_size: int,
output_size: int
):
"""
Creates a neural network that takes as input a batch of images (3
dimensional tensors) and outputs a batch of outputs (1 dimensional
tensors)
"""
super(VisualQNetwork, self).__init__()
height = input_shape[0]
width = input_shape[1]
initial_channels = input_shape[2]
conv_1_hw = self.conv_output_shape((height, width), 8, 4)
conv_2_hw = self.conv_output_shape(conv_1_hw, 4, 2)
self.final_flat = conv_2_hw[0] * conv_2_hw[1] * 32
self.conv1 = torch.nn.Conv2d(initial_channels, 16, [8, 8], [4, 4])
self.conv2 = torch.nn.Conv2d(16, 32, [4, 4], [2, 2])
self.dense1 = torch.nn.Linear(self.final_flat, encoding_size)
self.dense2 = torch.nn.Linear(encoding_size, output_size)
def forward(self, visual_obs: torch.tensor):
visual_obs = visual_obs.permute(0, 3, 1, 2)
conv_1 = torch.relu(self.conv1(visual_obs))
conv_2 = torch.relu(self.conv2(conv_1))
hidden = self.dense1(conv_2.reshape([-1, self.final_flat]))
hidden = torch.relu(hidden)
hidden = self.dense2(hidden)
return hidden
@staticmethod
def conv_output_shape(
h_w: Tuple[int, int],
kernel_size: int = 1,
stride: int = 1,
pad: int = 0,
dilation: int = 1,
):
"""
Computes the height and width of the output of a convolution layer.
"""
h = floor(
((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
w = floor(
((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
return h, w
"""We will now create a few classes to help us store the data we will use to train the Q-Learning algorithm."""
import numpy as np
from typing import NamedTuple, List
class Experience(NamedTuple):
"""
An experience contains the data of one Agent transition.
- Observation
- Action
- Reward
- Done flag
- Next Observation
"""
obs: np.ndarray
action: np.ndarray
reward: float
done: bool
next_obs: np.ndarray
# A Trajectory is an ordered sequence of Experiences
Trajectory = List[Experience]
# A Buffer is an unordered list of Experiences from multiple Trajectories
Buffer = List[Experience]
"""Now, we can create our trainer class. The role of this trainer is to collect data from the Environment according to a Policy, and then train the Q-Network with that data."""
from mlagents_envs.environment import ActionTuple, BaseEnv
from typing import Dict
import random
class Trainer:
@staticmethod
def generate_trajectories(
env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float
):
"""
Given a Unity Environment and a Q-Network, this method will generate a
buffer of Experiences obtained by running the Environment with the Policy
derived from the Q-Network.
:param BaseEnv: The UnityEnvironment used.
:param q_net: The Q-Network used to collect the data.
:param buffer_size: The minimum size of the buffer this method will return.
:param epsilon: Will add a random normal variable with standard deviation.
epsilon to the value heads of the Q-Network to encourage exploration.
:returns: a Tuple containing the created buffer and the average cumulative
the Agents obtained.
"""
# Create an empty Buffer
buffer: Buffer = []
# Reset the environment
env.reset()
# Read and store the Behavior Name of the Environment
behavior_name = list(env.behavior_specs)[0]
# Read and store the Behavior Specs of the Environment
spec = env.behavior_specs[behavior_name]
# Create a Mapping from AgentId to Trajectories. This will help us create
# trajectories for each Agents
dict_trajectories_from_agent: Dict[int, Trajectory] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_obs_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_action_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to cumulative reward (Only for reporting)
dict_cumulative_reward_from_agent: Dict[int, float] = {}
# Create a list to store the cumulative rewards obtained so far
cumulative_rewards: List[float] = []
while len(buffer) < buffer_size: # While not enough data in the buffer
# Get the Decision Steps and Terminal Steps of the Agents
decision_steps, terminal_steps = env.get_steps(behavior_name)
# For all Agents with a Terminal Step:
for agent_id_terminated in terminal_steps:
# Create its last experience (is last because the Agent terminated)
last_experience = Experience(
obs=dict_last_obs_from_agent[agent_id_terminated].copy(),
reward=terminal_steps[agent_id_terminated].reward,
done=not terminal_steps[agent_id_terminated].interrupted,
action=dict_last_action_from_agent[agent_id_terminated].copy(),
next_obs=terminal_steps[agent_id_terminated].obs[0],
)
# Clear its last observation and action (Since the trajectory is over)
dict_last_obs_from_agent.pop(agent_id_terminated)
dict_last_action_from_agent.pop(agent_id_terminated)
# Report the cumulative reward
cumulative_reward = (
dict_cumulative_reward_from_agent.pop(agent_id_terminated)
+ terminal_steps[agent_id_terminated].reward
)
cumulative_rewards.append(cumulative_reward)
# Add the Trajectory and the last experience to the buffer
buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated))
buffer.append(last_experience)
# For all Agents with a Decision Step:
for agent_id_decisions in decision_steps:
# If the Agent does not have a Trajectory, create an empty one
if agent_id_decisions not in dict_trajectories_from_agent:
dict_trajectories_from_agent[agent_id_decisions] = []
dict_cumulative_reward_from_agent[agent_id_decisions] = 0
# If the Agent requesting a decision has a "last observation"
if agent_id_decisions in dict_last_obs_from_agent:
# Create an Experience from the last observation and the Decision Step
exp = Experience(
obs=dict_last_obs_from_agent[agent_id_decisions].copy(),
reward=decision_steps[agent_id_decisions].reward,
done=False,
action=dict_last_action_from_agent[agent_id_decisions].copy(),
next_obs=decision_steps[agent_id_decisions].obs[0],
)
# Update the Trajectory of the Agent and its cumulative reward
dict_trajectories_from_agent[agent_id_decisions].append(exp)
dict_cumulative_reward_from_agent[agent_id_decisions] += (
decision_steps[agent_id_decisions].reward
)
# Store the observation as the new "last observation"
dict_last_obs_from_agent[agent_id_decisions] = (
decision_steps[agent_id_decisions].obs[0]
)
# Generate an action for all the Agents that requested a decision
# Compute the values for each action given the observation
actions_values = (
q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy()
)
# Pick the best action using argmax
print("ACTION VALS", actions_values)
actions_values += epsilon * (
np.random.randn(actions_values.shape[0], actions_values.shape[1])
).astype(np.float32)
actions = np.argmax(actions_values, axis=1)
actions.resize((len(decision_steps), 1))
# Store the action that was picked, it will be put in the trajectory later
for agent_index, agent_id in enumerate(decision_steps.agent_id):
dict_last_action_from_agent[agent_id] = actions[agent_index]
# Set the actions in the environment
# Unity Environments expect ActionTuple instances.
action_tuple = ActionTuple()
action_tuple.add_discrete(actions)
env.set_actions(behavior_name, action_tuple)
# Perform a step in the simulation
env.step()
return buffer, np.mean(cumulative_rewards)
@staticmethod
def update_q_net(
q_net: VisualQNetwork,
optimizer: torch.optim,
buffer: Buffer,
action_size: int
):
"""
Performs an update of the Q-Network using the provided optimizer and buffer
"""
BATCH_SIZE = 1000
NUM_EPOCH = 3
GAMMA = 0.9
batch_size = min(len(buffer), BATCH_SIZE)
random.shuffle(buffer)
# Split the buffer into batches
batches = [
buffer[batch_size * start : batch_size * (start + 1)]
for start in range(int(len(buffer) / batch_size))
]
for _ in range(NUM_EPOCH):
for batch in batches:
# Create the Tensors that will be fed in the network
obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))
reward = torch.from_numpy(
np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)
)
done = torch.from_numpy(
np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)
)
action = torch.from_numpy(np.stack([ex.action for ex in batch]))
next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))
# Use the Bellman equation to update the Q-Network
target = (
reward
+ (1.0 - done)
* GAMMA
* torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values
)
mask = torch.zeros((len(batch), action_size))
mask.scatter_(1, action, 1)
prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True)
criterion = torch.nn.MSELoss()
loss = criterion(prediction, target)
# Perform the backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
"""### Run Training"""
# Commented out IPython magic to ensure Python compatibility.
# -----------------
# This code is used to close an env that might not have been closed before
try:
env.close()
except:
pass
# -----------------
from mlagents_envs.registry import default_registry
from mlagents_envs.environment import UnityEnvironment
import matplotlib.pyplot as plt
# %matplotlib inline
# Create the GridWorld Environment from the registry
env = default_registry["GridWorld"].make()
print("GridWorld environment created.")
# Create a new Q-Network.
qnet = VisualQNetwork((64, 84, 3), 126, 5)
experiences: Buffer = []
optim = torch.optim.Adam(qnet.parameters(), lr= 0.001)
cumulative_rewards: List[float] = []
# The number of training steps that will be performed
NUM_TRAINING_STEPS = 70
# The number of experiences to collect per training step
NUM_NEW_EXP = 1000
# The maximum size of the Buffer
BUFFER_SIZE = 10000
for n in range(NUM_TRAINING_STEPS):
|
env.close()
# Show the training graph
plt.plot(range(NUM_TRAINING_STEPS), cumulative_rewards)
| new_exp,_ = Trainer.generate_trajectories(env, qnet, NUM_NEW_EXP, epsilon=0.1)
random.shuffle(experiences)
if len(experiences) > BUFFER_SIZE:
experiences = experiences[:BUFFER_SIZE]
experiences.extend(new_exp)
Trainer.update_q_net(qnet, optim, experiences, 5)
_, rewards = Trainer.generate_trajectories(env, qnet, 100, epsilon=0)
cumulative_rewards.append(rewards)
print("Training step ", n+1, "\treward ", rewards) | conditional_block |
stoopid.py |
import torch
from typing import Tuple
from math import floor
class VisualQNetwork(torch.nn.Module):
def __init__(
self,
input_shape: Tuple[int, int, int],
encoding_size: int,
output_size: int
):
"""
Creates a neural network that takes as input a batch of images (3
dimensional tensors) and outputs a batch of outputs (1 dimensional
tensors)
"""
super(VisualQNetwork, self).__init__()
height = input_shape[0]
width = input_shape[1]
initial_channels = input_shape[2]
conv_1_hw = self.conv_output_shape((height, width), 8, 4)
conv_2_hw = self.conv_output_shape(conv_1_hw, 4, 2)
self.final_flat = conv_2_hw[0] * conv_2_hw[1] * 32
self.conv1 = torch.nn.Conv2d(initial_channels, 16, [8, 8], [4, 4])
self.conv2 = torch.nn.Conv2d(16, 32, [4, 4], [2, 2])
self.dense1 = torch.nn.Linear(self.final_flat, encoding_size)
self.dense2 = torch.nn.Linear(encoding_size, output_size)
def forward(self, visual_obs: torch.tensor):
visual_obs = visual_obs.permute(0, 3, 1, 2)
conv_1 = torch.relu(self.conv1(visual_obs))
conv_2 = torch.relu(self.conv2(conv_1))
hidden = self.dense1(conv_2.reshape([-1, self.final_flat]))
hidden = torch.relu(hidden)
hidden = self.dense2(hidden)
return hidden
@staticmethod
def conv_output_shape(
h_w: Tuple[int, int],
kernel_size: int = 1,
stride: int = 1,
pad: int = 0,
dilation: int = 1,
):
"""
Computes the height and width of the output of a convolution layer.
"""
h = floor(
((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
w = floor(
((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
return h, w
"""We will now create a few classes to help us store the data we will use to train the Q-Learning algorithm."""
import numpy as np
from typing import NamedTuple, List
class Experience(NamedTuple):
"""
An experience contains the data of one Agent transition.
- Observation
- Action
- Reward
- Done flag
- Next Observation
"""
obs: np.ndarray
action: np.ndarray
reward: float
done: bool
next_obs: np.ndarray
# A Trajectory is an ordered sequence of Experiences
Trajectory = List[Experience]
# A Buffer is an unordered list of Experiences from multiple Trajectories
Buffer = List[Experience]
"""Now, we can create our trainer class. The role of this trainer is to collect data from the Environment according to a Policy, and then train the Q-Network with that data."""
from mlagents_envs.environment import ActionTuple, BaseEnv
from typing import Dict
import random
class Trainer:
@staticmethod
def | (
env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float
):
"""
Given a Unity Environment and a Q-Network, this method will generate a
buffer of Experiences obtained by running the Environment with the Policy
derived from the Q-Network.
:param BaseEnv: The UnityEnvironment used.
:param q_net: The Q-Network used to collect the data.
:param buffer_size: The minimum size of the buffer this method will return.
:param epsilon: Will add a random normal variable with standard deviation.
epsilon to the value heads of the Q-Network to encourage exploration.
:returns: a Tuple containing the created buffer and the average cumulative
the Agents obtained.
"""
# Create an empty Buffer
buffer: Buffer = []
# Reset the environment
env.reset()
# Read and store the Behavior Name of the Environment
behavior_name = list(env.behavior_specs)[0]
# Read and store the Behavior Specs of the Environment
spec = env.behavior_specs[behavior_name]
# Create a Mapping from AgentId to Trajectories. This will help us create
# trajectories for each Agents
dict_trajectories_from_agent: Dict[int, Trajectory] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_obs_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_action_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to cumulative reward (Only for reporting)
dict_cumulative_reward_from_agent: Dict[int, float] = {}
# Create a list to store the cumulative rewards obtained so far
cumulative_rewards: List[float] = []
while len(buffer) < buffer_size: # While not enough data in the buffer
# Get the Decision Steps and Terminal Steps of the Agents
decision_steps, terminal_steps = env.get_steps(behavior_name)
# For all Agents with a Terminal Step:
for agent_id_terminated in terminal_steps:
# Create its last experience (is last because the Agent terminated)
last_experience = Experience(
obs=dict_last_obs_from_agent[agent_id_terminated].copy(),
reward=terminal_steps[agent_id_terminated].reward,
done=not terminal_steps[agent_id_terminated].interrupted,
action=dict_last_action_from_agent[agent_id_terminated].copy(),
next_obs=terminal_steps[agent_id_terminated].obs[0],
)
# Clear its last observation and action (Since the trajectory is over)
dict_last_obs_from_agent.pop(agent_id_terminated)
dict_last_action_from_agent.pop(agent_id_terminated)
# Report the cumulative reward
cumulative_reward = (
dict_cumulative_reward_from_agent.pop(agent_id_terminated)
+ terminal_steps[agent_id_terminated].reward
)
cumulative_rewards.append(cumulative_reward)
# Add the Trajectory and the last experience to the buffer
buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated))
buffer.append(last_experience)
# For all Agents with a Decision Step:
for agent_id_decisions in decision_steps:
# If the Agent does not have a Trajectory, create an empty one
if agent_id_decisions not in dict_trajectories_from_agent:
dict_trajectories_from_agent[agent_id_decisions] = []
dict_cumulative_reward_from_agent[agent_id_decisions] = 0
# If the Agent requesting a decision has a "last observation"
if agent_id_decisions in dict_last_obs_from_agent:
# Create an Experience from the last observation and the Decision Step
exp = Experience(
obs=dict_last_obs_from_agent[agent_id_decisions].copy(),
reward=decision_steps[agent_id_decisions].reward,
done=False,
action=dict_last_action_from_agent[agent_id_decisions].copy(),
next_obs=decision_steps[agent_id_decisions].obs[0],
)
# Update the Trajectory of the Agent and its cumulative reward
dict_trajectories_from_agent[agent_id_decisions].append(exp)
dict_cumulative_reward_from_agent[agent_id_decisions] += (
decision_steps[agent_id_decisions].reward
)
# Store the observation as the new "last observation"
dict_last_obs_from_agent[agent_id_decisions] = (
decision_steps[agent_id_decisions].obs[0]
)
# Generate an action for all the Agents that requested a decision
# Compute the values for each action given the observation
actions_values = (
q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy()
)
# Pick the best action using argmax
print("ACTION VALS", actions_values)
actions_values += epsilon * (
np.random.randn(actions_values.shape[0], actions_values.shape[1])
).astype(np.float32)
actions = np.argmax(actions_values, axis=1)
actions.resize((len(decision_steps), 1))
# Store the action that was picked, it will be put in the trajectory later
for agent_index, agent_id in enumerate(decision_steps.agent_id):
dict_last_action_from_agent[agent_id] = actions[agent_index]
# Set the actions in the environment
# Unity Environments expect ActionTuple instances.
action_tuple = ActionTuple()
action_tuple.add_discrete(actions)
env.set_actions(behavior_name, action_tuple)
# Perform a step in the simulation
env.step()
return buffer, np.mean(cumulative_rewards)
@staticmethod
def update_q_net(
q_net: VisualQNetwork,
optimizer: torch.optim,
buffer: Buffer,
action_size: int
):
"""
Performs an update of the Q-Network using the provided optimizer and buffer
"""
BATCH_SIZE = 1000
NUM_EPOCH = 3
GAMMA = 0.9
batch_size = min(len(buffer), BATCH_SIZE)
random.shuffle(buffer)
# Split the buffer into batches
batches = [
buffer[batch_size * start : batch_size * (start + 1)]
for start in range(int(len(buffer) / batch_size))
]
for _ in range(NUM_EPOCH):
for batch in batches:
# Create the Tensors that will be fed in the network
obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))
reward = torch.from_numpy(
np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)
)
done = torch.from_numpy(
np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)
)
action = torch.from_numpy(np.stack([ex.action for ex in batch]))
next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))
# Use the Bellman equation to update the Q-Network
target = (
reward
+ (1.0 - done)
* GAMMA
* torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values
)
mask = torch.zeros((len(batch), action_size))
mask.scatter_(1, action, 1)
prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True)
criterion = torch.nn.MSELoss()
loss = criterion(prediction, target)
# Perform the backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
"""### Run Training"""
# Commented out IPython magic to ensure Python compatibility.
# -----------------
# This code is used to close an env that might not have been closed before
try:
env.close()
except:
pass
# -----------------
from mlagents_envs.registry import default_registry
from mlagents_envs.environment import UnityEnvironment
import matplotlib.pyplot as plt
# %matplotlib inline
# Create the GridWorld Environment from the registry
env = default_registry["GridWorld"].make()
print("GridWorld environment created.")
# Create a new Q-Network.
qnet = VisualQNetwork((64, 84, 3), 126, 5)
experiences: Buffer = []
optim = torch.optim.Adam(qnet.parameters(), lr= 0.001)
cumulative_rewards: List[float] = []
# The number of training steps that will be performed
NUM_TRAINING_STEPS = 70
# The number of experiences to collect per training step
NUM_NEW_EXP = 1000
# The maximum size of the Buffer
BUFFER_SIZE = 10000
for n in range(NUM_TRAINING_STEPS):
new_exp,_ = Trainer.generate_trajectories(env, qnet, NUM_NEW_EXP, epsilon=0.1)
random.shuffle(experiences)
if len(experiences) > BUFFER_SIZE:
experiences = experiences[:BUFFER_SIZE]
experiences.extend(new_exp)
Trainer.update_q_net(qnet, optim, experiences, 5)
_, rewards = Trainer.generate_trajectories(env, qnet, 100, epsilon=0)
cumulative_rewards.append(rewards)
print("Training step ", n+1, "\treward ", rewards)
env.close()
# Show the training graph
plt.plot(range(NUM_TRAINING_STEPS), cumulative_rewards)
| generate_trajectories | identifier_name |
stoopid.py | import torch
from typing import Tuple
from math import floor
class VisualQNetwork(torch.nn.Module):
def __init__(
self,
input_shape: Tuple[int, int, int],
encoding_size: int,
output_size: int
):
"""
Creates a neural network that takes as input a batch of images (3
dimensional tensors) and outputs a batch of outputs (1 dimensional
tensors)
"""
super(VisualQNetwork, self).__init__()
height = input_shape[0]
width = input_shape[1]
initial_channels = input_shape[2]
conv_1_hw = self.conv_output_shape((height, width), 8, 4)
conv_2_hw = self.conv_output_shape(conv_1_hw, 4, 2)
self.final_flat = conv_2_hw[0] * conv_2_hw[1] * 32
self.conv1 = torch.nn.Conv2d(initial_channels, 16, [8, 8], [4, 4])
self.conv2 = torch.nn.Conv2d(16, 32, [4, 4], [2, 2])
self.dense1 = torch.nn.Linear(self.final_flat, encoding_size)
self.dense2 = torch.nn.Linear(encoding_size, output_size)
def forward(self, visual_obs: torch.tensor):
visual_obs = visual_obs.permute(0, 3, 1, 2)
conv_1 = torch.relu(self.conv1(visual_obs))
conv_2 = torch.relu(self.conv2(conv_1))
hidden = self.dense1(conv_2.reshape([-1, self.final_flat]))
hidden = torch.relu(hidden)
hidden = self.dense2(hidden)
return hidden
@staticmethod
def conv_output_shape(
h_w: Tuple[int, int],
kernel_size: int = 1,
stride: int = 1,
pad: int = 0,
dilation: int = 1,
):
"""
Computes the height and width of the output of a convolution layer.
"""
h = floor(
((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
w = floor(
((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1
)
return h, w
"""We will now create a few classes to help us store the data we will use to train the Q-Learning algorithm."""
import numpy as np
from typing import NamedTuple, List
class Experience(NamedTuple):
"""
An experience contains the data of one Agent transition.
- Observation
- Action
- Reward
- Done flag
- Next Observation
"""
obs: np.ndarray
action: np.ndarray
reward: float
done: bool
next_obs: np.ndarray
# A Trajectory is an ordered sequence of Experiences
Trajectory = List[Experience]
# A Buffer is an unordered list of Experiences from multiple Trajectories
Buffer = List[Experience]
"""Now, we can create our trainer class. The role of this trainer is to collect data from the Environment according to a Policy, and then train the Q-Network with that data."""
from mlagents_envs.environment import ActionTuple, BaseEnv
from typing import Dict
import random
class Trainer:
@staticmethod
def generate_trajectories(
env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float
):
"""
Given a Unity Environment and a Q-Network, this method will generate a
buffer of Experiences obtained by running the Environment with the Policy
derived from the Q-Network.
:param BaseEnv: The UnityEnvironment used.
:param q_net: The Q-Network used to collect the data.
:param buffer_size: The minimum size of the buffer this method will return.
:param epsilon: Will add a random normal variable with standard deviation.
epsilon to the value heads of the Q-Network to encourage exploration.
:returns: a Tuple containing the created buffer and the average cumulative
the Agents obtained.
"""
# Create an empty Buffer
buffer: Buffer = []
# Reset the environment
env.reset()
# Read and store the Behavior Name of the Environment
behavior_name = list(env.behavior_specs)[0]
# Read and store the Behavior Specs of the Environment
spec = env.behavior_specs[behavior_name]
# Create a Mapping from AgentId to Trajectories. This will help us create
# trajectories for each Agents
dict_trajectories_from_agent: Dict[int, Trajectory] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_obs_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to the last observation of the Agent
dict_last_action_from_agent: Dict[int, np.ndarray] = {}
# Create a Mapping from AgentId to cumulative reward (Only for reporting)
dict_cumulative_reward_from_agent: Dict[int, float] = {}
# Create a list to store the cumulative rewards obtained so far
cumulative_rewards: List[float] = []
while len(buffer) < buffer_size: # While not enough data in the buffer
# Get the Decision Steps and Terminal Steps of the Agents
decision_steps, terminal_steps = env.get_steps(behavior_name) | for agent_id_terminated in terminal_steps:
# Create its last experience (is last because the Agent terminated)
last_experience = Experience(
obs=dict_last_obs_from_agent[agent_id_terminated].copy(),
reward=terminal_steps[agent_id_terminated].reward,
done=not terminal_steps[agent_id_terminated].interrupted,
action=dict_last_action_from_agent[agent_id_terminated].copy(),
next_obs=terminal_steps[agent_id_terminated].obs[0],
)
# Clear its last observation and action (Since the trajectory is over)
dict_last_obs_from_agent.pop(agent_id_terminated)
dict_last_action_from_agent.pop(agent_id_terminated)
# Report the cumulative reward
cumulative_reward = (
dict_cumulative_reward_from_agent.pop(agent_id_terminated)
+ terminal_steps[agent_id_terminated].reward
)
cumulative_rewards.append(cumulative_reward)
# Add the Trajectory and the last experience to the buffer
buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated))
buffer.append(last_experience)
# For all Agents with a Decision Step:
for agent_id_decisions in decision_steps:
# If the Agent does not have a Trajectory, create an empty one
if agent_id_decisions not in dict_trajectories_from_agent:
dict_trajectories_from_agent[agent_id_decisions] = []
dict_cumulative_reward_from_agent[agent_id_decisions] = 0
# If the Agent requesting a decision has a "last observation"
if agent_id_decisions in dict_last_obs_from_agent:
# Create an Experience from the last observation and the Decision Step
exp = Experience(
obs=dict_last_obs_from_agent[agent_id_decisions].copy(),
reward=decision_steps[agent_id_decisions].reward,
done=False,
action=dict_last_action_from_agent[agent_id_decisions].copy(),
next_obs=decision_steps[agent_id_decisions].obs[0],
)
# Update the Trajectory of the Agent and its cumulative reward
dict_trajectories_from_agent[agent_id_decisions].append(exp)
dict_cumulative_reward_from_agent[agent_id_decisions] += (
decision_steps[agent_id_decisions].reward
)
# Store the observation as the new "last observation"
dict_last_obs_from_agent[agent_id_decisions] = (
decision_steps[agent_id_decisions].obs[0]
)
# Generate an action for all the Agents that requested a decision
# Compute the values for each action given the observation
actions_values = (
q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy()
)
# Pick the best action using argmax
print("ACTION VALS", actions_values)
actions_values += epsilon * (
np.random.randn(actions_values.shape[0], actions_values.shape[1])
).astype(np.float32)
actions = np.argmax(actions_values, axis=1)
actions.resize((len(decision_steps), 1))
# Store the action that was picked, it will be put in the trajectory later
for agent_index, agent_id in enumerate(decision_steps.agent_id):
dict_last_action_from_agent[agent_id] = actions[agent_index]
# Set the actions in the environment
# Unity Environments expect ActionTuple instances.
action_tuple = ActionTuple()
action_tuple.add_discrete(actions)
env.set_actions(behavior_name, action_tuple)
# Perform a step in the simulation
env.step()
return buffer, np.mean(cumulative_rewards)
@staticmethod
def update_q_net(
q_net: VisualQNetwork,
optimizer: torch.optim,
buffer: Buffer,
action_size: int
):
"""
Performs an update of the Q-Network using the provided optimizer and buffer
"""
BATCH_SIZE = 1000
NUM_EPOCH = 3
GAMMA = 0.9
batch_size = min(len(buffer), BATCH_SIZE)
random.shuffle(buffer)
# Split the buffer into batches
batches = [
buffer[batch_size * start : batch_size * (start + 1)]
for start in range(int(len(buffer) / batch_size))
]
for _ in range(NUM_EPOCH):
for batch in batches:
# Create the Tensors that will be fed in the network
obs = torch.from_numpy(np.stack([ex.obs for ex in batch]))
reward = torch.from_numpy(
np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1)
)
done = torch.from_numpy(
np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1)
)
action = torch.from_numpy(np.stack([ex.action for ex in batch]))
next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch]))
# Use the Bellman equation to update the Q-Network
target = (
reward
+ (1.0 - done)
* GAMMA
* torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values
)
mask = torch.zeros((len(batch), action_size))
mask.scatter_(1, action, 1)
prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True)
criterion = torch.nn.MSELoss()
loss = criterion(prediction, target)
# Perform the backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
"""### Run Training"""
# Commented out IPython magic to ensure Python compatibility.
# -----------------
# This code is used to close an env that might not have been closed before
try:
env.close()
except:
pass
# -----------------
from mlagents_envs.registry import default_registry
from mlagents_envs.environment import UnityEnvironment
import matplotlib.pyplot as plt
# %matplotlib inline
# Create the GridWorld Environment from the registry
env = default_registry["GridWorld"].make()
print("GridWorld environment created.")
# Create a new Q-Network.
qnet = VisualQNetwork((64, 84, 3), 126, 5)
experiences: Buffer = []
optim = torch.optim.Adam(qnet.parameters(), lr= 0.001)
cumulative_rewards: List[float] = []
# The number of training steps that will be performed
NUM_TRAINING_STEPS = 70
# The number of experiences to collect per training step
NUM_NEW_EXP = 1000
# The maximum size of the Buffer
BUFFER_SIZE = 10000
for n in range(NUM_TRAINING_STEPS):
new_exp,_ = Trainer.generate_trajectories(env, qnet, NUM_NEW_EXP, epsilon=0.1)
random.shuffle(experiences)
if len(experiences) > BUFFER_SIZE:
experiences = experiences[:BUFFER_SIZE]
experiences.extend(new_exp)
Trainer.update_q_net(qnet, optim, experiences, 5)
_, rewards = Trainer.generate_trajectories(env, qnet, 100, epsilon=0)
cumulative_rewards.append(rewards)
print("Training step ", n+1, "\treward ", rewards)
env.close()
# Show the training graph
plt.plot(range(NUM_TRAINING_STEPS), cumulative_rewards) |
# For all Agents with a Terminal Step: | random_line_split |
spider.py | """
Spiderplots & Density Spiderplots
==================================
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# sphinx_gallery_thumbnail_number = 4
########################################################################################
# Here we'll set up an example which uses EMORB as a starting point. Typically we'll
# normalise trace element compositions to a reference composition
# to be able to link the diagram to 'relative enrichement' occuring during geological
# processes, so here we're normalising to a Primitive Mantle composition first.
# We're here taking this normalised composition and adding some noise in log-space to
# generate multiple compositions about this mean (i.e. a compositional distribution).
# For simplicility, this is handled by
# :func:`~pyrolite.util.synthetic.example_spider_data`:
#
from pyrolite.util.synthetic import example_spider_data
normdf = example_spider_data(start="EMORB_SM89", norm_to="PM_PON")
########################################################################################
# .. seealso:: `Normalisation <../geochem/normalization.html>`__
#
########################################################################################
# Basic spider plots are straightforward to produce:
#
import pyrolite.plot
ax = normdf.pyroplot.spider(color="0.5", alpha=0.5, unity_line=True, figsize=(10, 4))
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Index Ordering
# --------------
#
# The default ordering here follows that of the dataframe columns, but we typically
# want to reorder these based on some physical ordering. A :code:`index_order` keyword
# argument can be used to supply a function which will reorder the elements before
# plotting. Here we order the elements by relative incompatibility (using
# :func:`pyrolite.geochem.ind.by_incompatibility` behind the scenes):
from pyrolite.geochem.ind import by_incompatibility
ax = normdf.pyroplot.spider(
color="k",
alpha=0.1,
unity_line=True,
index_order="incompatibility",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Similarly, you can also rearrange elements to be in order of atomic number:
#
from pyrolite.geochem.ind import by_number
ax = normdf.pyroplot.spider(
color="k",
alpha=0.1,
unity_line=True,
index_order="number",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Color Mapping
# -------------
#
# We can also specify either continuous or categorical values to use for the colors,
# and even map categorical values to specific colors where useful:
#
fig, ax = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(10, 8))
ax[0].set_title("Continuous Values")
normdf.pyroplot.spider(
ax=ax[0],
unity_line=True,
index_order="incompatibility",
cmap="plasma",
alpha=0.1,
color=np.log(normdf["Li"]), # a range of continous values
)
ax[1].set_title("Boolean/Categorical Values")
normdf.pyroplot.spider(
ax=ax[1],
alpha=0.1,
unity_line=True,
index_order="incompatibility",
color=normdf["Cs"] > 3.5, # a boolean/categorical set of values
)
ax[2].set_title("Boolean/Categorical Values with Color Mapping")
normdf.pyroplot.spider(
ax=ax[2],
alpha=0.1,
unity_line=True,
index_order="incompatibility",
color=normdf["Cs"] > 3.5, # a boolean/categorical set of values
color_mappings={ # mapping the boolean values to specific colors
"color": {True: "green", False: "purple"}
},
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
plt.show()
########################################################################################
# Legend Proxies for Spiderplots
# ------------------------------
#
# While it's relatively straightforward to style spider plots as you wish, for the
# moment can be a bit more involved to create a legend for these styles. Where you're
# creating styles based on a set of categories or labels, a few of pyrolite's utility
# functions might come in handy. Below we'll go through such an example, after creating
# a few labels (here based on a binning of the Cs abundance):
#
labels = pd.cut(
np.log(normdf["Cs"]), bins=4, labels=["Low", "Mid. Low", "Mid High", "High"]
)
pd.unique(labels)
########################################################################################
# Below we'll use :func:`~pyrolite.plot.color.process_color` and
# :func:`~pyrolite.util.plot.legend.proxy_line` to construct a set of legend proxies.
# Note that we need to pass the same configuration to both
# :func:`~pyrolite.plot.pyroplot.spider` and :func:`~pyrolite.plot.color.process_color`
# in order to get the same results, and that the order of labels in the legend
# will depend on which labels appear first in your dataframe or series (and hence the
# ordering of the unique values which are returned).
#
from pyrolite.plot.color import process_color
from pyrolite.util.plot.legend import proxy_line
ax = normdf.pyroplot.spider(
unity_line=True,
index_order="incompatibility",
color=labels, # a categorical set of values
cmap="Paired",
alpha=0.5,
figsize=(11, 4),
)
legend_labels = pd.unique(labels) # process_color uses this behind the scenes
proxy_colors = process_color(color=legend_labels, cmap="Paired", alpha=0.5)["c"]
legend_proxies = [proxy_line(color=c, marker="D") for c in proxy_colors]
ax.legend(legend_proxies, legend_labels)
plt.show()
########################################################################################
# If the specific order of the labels in your legend is important or you only want to
# include some of the legend entries for some reason, you could use a dictionary to
# store the key-value pairs and remap the order of the legend entries manually:
#
proxies = {
label: proxy_line(color=c, marker="D")
for label, c in zip(legend_labels, proxy_colors)
}
ordered_labels = ["High", "Mid High", "Mid. Low", "Low"]
ax.legend([proxies[l] for l in ordered_labels], ordered_labels)
plt.show()
########################################################################################
# Split Configuration
# -------------------
#
# If you have potential conflicts between desired configurations for the lines and
# markers of your plots, you can explictly separate the configuration using the
# :code:`scatter_kw` and :code:`line_kw` keyword arguments:
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(10, 4))
ax.set_title("Split Configuration")
normdf.pyroplot.spider(
ax=ax,
unity_line=True,
index_order="incompatibility",
scatter_kw=dict(cmap="magma_r", color=np.log(normdf["Li"])),
line_kw=dict(
color=normdf["Cs"] > 5,
color_mappings={"color": {True: "green", False: "purple"}},
),
alpha=0.2, # common alpha config between lines and markers
s=25, # argument for scatter which won't be passed to lines
)
plt.show()
########################################################################################
# Filled Ranges
# -------------
#
# The spiderplot can be extended to provide visualisations of ranges and density via the
# various modes. We could now plot the range of compositions as a filled range:
#
ax = normdf.pyroplot.spider(
mode="fill",
color="green",
alpha=0.5,
unity_line=True,
index_order="incompatibility",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Spider Density Plots
# --------------------
#
# Alternatively, we can plot a conditional density spider plot:
#
fig, ax = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(10, 6))
normdf.pyroplot.spider(
ax=ax[0], color="k", alpha=0.05, unity_line=True, index_order=by_incompatibility
)
normdf.pyroplot.spider(
ax=ax[1],
mode="binkde",
vmin=0.05, # 95th percentile,
resolution=10,
unity_line=True,
index_order="incompatibility",
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
plt.show()
########################################################################################
# We can now assemble a more complete comparison of some of the conditional density
# modes for spider plots:
#
modes = [
("plot", "plot", [], dict(color="k", alpha=0.01)),
("fill", "fill", [], dict(color="k", alpha=0.5)),
("binkde", "binkde", [], dict(resolution=5)),
(
"binkde",
"binkde contours specified",
[],
dict(contours=[0.95], resolution=5), # 95th percentile contour
),
("histogram", "histogram", [], dict(resolution=5, bins=30)),
]
########################################################################################
down, across = len(modes), 1
fig, ax = plt.subplots(
down, across, sharey=True, sharex=True, figsize=(across * 8, 2 * down)
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
for a, (m, name, args, kwargs) in zip(ax, modes):
|
ax = ax.flat
for mix, (m, name, args, kwargs) in enumerate(modes):
normdf.pyroplot.spider(
mode=m,
ax=ax[mix],
vmin=0.05, # minimum percentile
fontsize=8,
unity_line=True,
index_order="incompatibility",
*args,
**kwargs
)
plt.tight_layout()
########################################################################################
# REE Density Plots
# -----------------
#
# Note that this can also be used for REE-indexed plots, in both configurations. Here
# we first specify a set of common keyword-argument configurations and use them for
# both plots:
#
REE_config = dict(unity_line=True, mode="binkde", vmin=0.05, resolution=10)
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(12, 4))
normdf.pyroplot.REE(ax=ax[0], **REE_config)
normdf.pyroplot.REE(ax=ax[1], index="radii", **REE_config)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
plt.show()
########################################################################################
# .. seealso:: `Heatscatter Plots <heatscatter.html>`__,
# `Density Diagrams <density.html>`__
| a.annotate( # label the axes rows
"Mode: {}".format(name),
xy=(0.1, 1.05),
xycoords=a.transAxes,
fontsize=8,
ha="left",
va="bottom",
) | conditional_block |
spider.py | """
Spiderplots & Density Spiderplots
================================== | import pandas as pd
# sphinx_gallery_thumbnail_number = 4
########################################################################################
# Here we'll set up an example which uses EMORB as a starting point. Typically we'll
# normalise trace element compositions to a reference composition
# to be able to link the diagram to 'relative enrichement' occuring during geological
# processes, so here we're normalising to a Primitive Mantle composition first.
# We're here taking this normalised composition and adding some noise in log-space to
# generate multiple compositions about this mean (i.e. a compositional distribution).
# For simplicility, this is handled by
# :func:`~pyrolite.util.synthetic.example_spider_data`:
#
from pyrolite.util.synthetic import example_spider_data
normdf = example_spider_data(start="EMORB_SM89", norm_to="PM_PON")
########################################################################################
# .. seealso:: `Normalisation <../geochem/normalization.html>`__
#
########################################################################################
# Basic spider plots are straightforward to produce:
#
import pyrolite.plot
ax = normdf.pyroplot.spider(color="0.5", alpha=0.5, unity_line=True, figsize=(10, 4))
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Index Ordering
# --------------
#
# The default ordering here follows that of the dataframe columns, but we typically
# want to reorder these based on some physical ordering. A :code:`index_order` keyword
# argument can be used to supply a function which will reorder the elements before
# plotting. Here we order the elements by relative incompatibility (using
# :func:`pyrolite.geochem.ind.by_incompatibility` behind the scenes):
from pyrolite.geochem.ind import by_incompatibility
ax = normdf.pyroplot.spider(
color="k",
alpha=0.1,
unity_line=True,
index_order="incompatibility",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Similarly, you can also rearrange elements to be in order of atomic number:
#
from pyrolite.geochem.ind import by_number
ax = normdf.pyroplot.spider(
color="k",
alpha=0.1,
unity_line=True,
index_order="number",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Color Mapping
# -------------
#
# We can also specify either continuous or categorical values to use for the colors,
# and even map categorical values to specific colors where useful:
#
fig, ax = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(10, 8))
ax[0].set_title("Continuous Values")
normdf.pyroplot.spider(
ax=ax[0],
unity_line=True,
index_order="incompatibility",
cmap="plasma",
alpha=0.1,
color=np.log(normdf["Li"]), # a range of continous values
)
ax[1].set_title("Boolean/Categorical Values")
normdf.pyroplot.spider(
ax=ax[1],
alpha=0.1,
unity_line=True,
index_order="incompatibility",
color=normdf["Cs"] > 3.5, # a boolean/categorical set of values
)
ax[2].set_title("Boolean/Categorical Values with Color Mapping")
normdf.pyroplot.spider(
ax=ax[2],
alpha=0.1,
unity_line=True,
index_order="incompatibility",
color=normdf["Cs"] > 3.5, # a boolean/categorical set of values
color_mappings={ # mapping the boolean values to specific colors
"color": {True: "green", False: "purple"}
},
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
plt.show()
########################################################################################
# Legend Proxies for Spiderplots
# ------------------------------
#
# While it's relatively straightforward to style spider plots as you wish, for the
# moment can be a bit more involved to create a legend for these styles. Where you're
# creating styles based on a set of categories or labels, a few of pyrolite's utility
# functions might come in handy. Below we'll go through such an example, after creating
# a few labels (here based on a binning of the Cs abundance):
#
labels = pd.cut(
np.log(normdf["Cs"]), bins=4, labels=["Low", "Mid. Low", "Mid High", "High"]
)
pd.unique(labels)
########################################################################################
# Below we'll use :func:`~pyrolite.plot.color.process_color` and
# :func:`~pyrolite.util.plot.legend.proxy_line` to construct a set of legend proxies.
# Note that we need to pass the same configuration to both
# :func:`~pyrolite.plot.pyroplot.spider` and :func:`~pyrolite.plot.color.process_color`
# in order to get the same results, and that the order of labels in the legend
# will depend on which labels appear first in your dataframe or series (and hence the
# ordering of the unique values which are returned).
#
from pyrolite.plot.color import process_color
from pyrolite.util.plot.legend import proxy_line
ax = normdf.pyroplot.spider(
unity_line=True,
index_order="incompatibility",
color=labels, # a categorical set of values
cmap="Paired",
alpha=0.5,
figsize=(11, 4),
)
legend_labels = pd.unique(labels) # process_color uses this behind the scenes
proxy_colors = process_color(color=legend_labels, cmap="Paired", alpha=0.5)["c"]
legend_proxies = [proxy_line(color=c, marker="D") for c in proxy_colors]
ax.legend(legend_proxies, legend_labels)
plt.show()
########################################################################################
# If the specific order of the labels in your legend is important or you only want to
# include some of the legend entries for some reason, you could use a dictionary to
# store the key-value pairs and remap the order of the legend entries manually:
#
proxies = {
label: proxy_line(color=c, marker="D")
for label, c in zip(legend_labels, proxy_colors)
}
ordered_labels = ["High", "Mid High", "Mid. Low", "Low"]
ax.legend([proxies[l] for l in ordered_labels], ordered_labels)
plt.show()
########################################################################################
# Split Configuration
# -------------------
#
# If you have potential conflicts between desired configurations for the lines and
# markers of your plots, you can explictly separate the configuration using the
# :code:`scatter_kw` and :code:`line_kw` keyword arguments:
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(10, 4))
ax.set_title("Split Configuration")
normdf.pyroplot.spider(
ax=ax,
unity_line=True,
index_order="incompatibility",
scatter_kw=dict(cmap="magma_r", color=np.log(normdf["Li"])),
line_kw=dict(
color=normdf["Cs"] > 5,
color_mappings={"color": {True: "green", False: "purple"}},
),
alpha=0.2, # common alpha config between lines and markers
s=25, # argument for scatter which won't be passed to lines
)
plt.show()
########################################################################################
# Filled Ranges
# -------------
#
# The spiderplot can be extended to provide visualisations of ranges and density via the
# various modes. We could now plot the range of compositions as a filled range:
#
ax = normdf.pyroplot.spider(
mode="fill",
color="green",
alpha=0.5,
unity_line=True,
index_order="incompatibility",
figsize=(10, 4),
)
ax.set_ylabel("X / $X_{Primitive Mantle}$")
plt.show()
########################################################################################
# Spider Density Plots
# --------------------
#
# Alternatively, we can plot a conditional density spider plot:
#
fig, ax = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(10, 6))
normdf.pyroplot.spider(
ax=ax[0], color="k", alpha=0.05, unity_line=True, index_order=by_incompatibility
)
normdf.pyroplot.spider(
ax=ax[1],
mode="binkde",
vmin=0.05, # 95th percentile,
resolution=10,
unity_line=True,
index_order="incompatibility",
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
plt.show()
########################################################################################
# We can now assemble a more complete comparison of some of the conditional density
# modes for spider plots:
#
modes = [
("plot", "plot", [], dict(color="k", alpha=0.01)),
("fill", "fill", [], dict(color="k", alpha=0.5)),
("binkde", "binkde", [], dict(resolution=5)),
(
"binkde",
"binkde contours specified",
[],
dict(contours=[0.95], resolution=5), # 95th percentile contour
),
("histogram", "histogram", [], dict(resolution=5, bins=30)),
]
########################################################################################
down, across = len(modes), 1
fig, ax = plt.subplots(
down, across, sharey=True, sharex=True, figsize=(across * 8, 2 * down)
)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
for a, (m, name, args, kwargs) in zip(ax, modes):
a.annotate( # label the axes rows
"Mode: {}".format(name),
xy=(0.1, 1.05),
xycoords=a.transAxes,
fontsize=8,
ha="left",
va="bottom",
)
ax = ax.flat
for mix, (m, name, args, kwargs) in enumerate(modes):
normdf.pyroplot.spider(
mode=m,
ax=ax[mix],
vmin=0.05, # minimum percentile
fontsize=8,
unity_line=True,
index_order="incompatibility",
*args,
**kwargs
)
plt.tight_layout()
########################################################################################
# REE Density Plots
# -----------------
#
# Note that this can also be used for REE-indexed plots, in both configurations. Here
# we first specify a set of common keyword-argument configurations and use them for
# both plots:
#
REE_config = dict(unity_line=True, mode="binkde", vmin=0.05, resolution=10)
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(12, 4))
normdf.pyroplot.REE(ax=ax[0], **REE_config)
normdf.pyroplot.REE(ax=ax[1], index="radii", **REE_config)
[a.set_ylabel("X / $X_{Primitive Mantle}$") for a in ax]
plt.show()
########################################################################################
# .. seealso:: `Heatscatter Plots <heatscatter.html>`__,
# `Density Diagrams <density.html>`__ | """
import matplotlib.pyplot as plt
import numpy as np | random_line_split |
types.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StorageClass describes the parameters for a class of storage for
// which PersistentVolumes can be dynamically provisioned.
//
// StorageClasses are non-namespaced; the name of the storage class
// according to etcd is in ObjectMeta.Name.
type StorageClass struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// provisioner indicates the type of the provisioner.
Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"`
// parameters holds the parameters for the provisioner that should
// create volumes of this storage class.
// +optional
Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"`
// reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class.
// Defaults to Delete.
// +optional
ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"`
// mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class.
// e.g. ["ro", "soft"]. Not validated -
// mount of the PVs will simply fail if one is invalid.
// +optional
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"`
// allowVolumeExpansion shows whether the storage class allow volume expand.
// +optional
AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"`
// volumeBindingMode indicates how PersistentVolumeClaims should be
// provisioned and bound. When unset, VolumeBindingImmediate is used.
// This field is only honored by servers that enable the VolumeScheduling feature.
// +optional
VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"`
// allowedTopologies restrict the node topologies where volumes can be dynamically provisioned.
// Each volume plugin defines its own supported topology specifications.
// An empty TopologySelectorTerm list means there is no topology restriction.
// This field is only honored by servers that enable the VolumeScheduling feature.
// +optional
// +listType=atomic
AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StorageClassList is a collection of storage classes.
type StorageClassList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is the list of StorageClasses
Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// VolumeBindingMode indicates how PersistentVolumeClaims should be bound.
// +enum
type VolumeBindingMode string
const (
// VolumeBindingImmediate indicates that PersistentVolumeClaims should be
// immediately provisioned and bound. This is the default mode.
VolumeBindingImmediate VolumeBindingMode = "Immediate"
// VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims
// should not be provisioned and bound until the first Pod is created that
// references the PeristentVolumeClaim. The volume provisioning and
// binding will occur during Pod scheduing.
VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VolumeAttachment captures the intent to attach or detach the specified volume
// to/from the specified node.
//
// VolumeAttachment objects are non-namespaced.
type VolumeAttachment struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec represents specification of the desired attach/detach volume behavior.
// Populated by the Kubernetes system.
Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// status represents status of the VolumeAttachment request.
// Populated by the entity completing the attach or detach
// operation, i.e. the external-attacher.
// +optional
Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VolumeAttachmentList is a collection of VolumeAttachment objects.
type VolumeAttachmentList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is the list of VolumeAttachments
Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
type VolumeAttachmentSpec struct {
// attacher indicates the name of the volume driver that MUST handle this
// request. This is the name returned by GetPluginName().
Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"`
// source represents the volume that should be attached.
Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"`
// nodeName represents the node that the volume should be attached to.
NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"`
}
| // Exactly one member can be set.
type VolumeAttachmentSource struct {
// persistentVolumeName represents the name of the persistent volume to attach.
// +optional
PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"`
// inlineVolumeSpec contains all the information necessary to attach
// a persistent volume defined by a pod's inline VolumeSource. This field
// is populated only for the CSIMigration feature. It contains
// translated fields from a pod's inline VolumeSource to a
// PersistentVolumeSpec. This field is beta-level and is only
// honored by servers that enabled the CSIMigration feature.
// +optional
InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"`
}
// VolumeAttachmentStatus is the status of a VolumeAttachment request.
type VolumeAttachmentStatus struct {
// attached indicates the volume is successfully attached.
// This field must only be set by the entity completing the attach
// operation, i.e. the external-attacher.
Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"`
// attachmentMetadata is populated with any
// information returned by the attach operation, upon successful attach, that must be passed
// into subsequent WaitForAttach or Mount calls.
// This field must only be set by the entity completing the attach
// operation, i.e. the external-attacher.
// +optional
AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"`
// attachError represents the last error encountered during attach operation, if any.
// This field must only be set by the entity completing the attach
// operation, i.e. the external-attacher.
// +optional
AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"`
// detachError represents the last error encountered during detach operation, if any.
// This field must only be set by the entity completing the detach
// operation, i.e. the external-attacher.
// +optional
DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"`
}
// VolumeError captures an error encountered during a volume operation.
type VolumeError struct {
// time represents the time the error was encountered.
// +optional
Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"`
// message represents the error encountered during Attach or Detach operation.
// This string may be logged, so it should not contain sensitive
// information.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CSIDriver captures information about a Container Storage Interface (CSI)
// volume driver deployed on the cluster.
// Kubernetes attach detach controller uses this object to determine whether attach is required.
// Kubelet uses this object to determine whether pod information needs to be passed on mount.
// CSIDriver objects are non-namespaced.
type CSIDriver struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata.
// metadata.Name indicates the name of the CSI driver that this object
// refers to; it MUST be the same name returned by the CSI GetPluginName()
// call for that driver.
// The driver name must be 63 characters or less, beginning and ending with
// an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and
// alphanumerics between.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec represents the specification of the CSI Driver.
Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CSIDriverList is a collection of CSIDriver objects.
type CSIDriverList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is the list of CSIDriver
Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// CSIDriverSpec is the specification of a CSIDriver.
type CSIDriverSpec struct {
// attachRequired indicates this CSI volume driver requires an attach
// operation (because it implements the CSI ControllerPublishVolume()
// method), and that the Kubernetes attach detach controller should call
// the attach volume interface which checks the volumeattachment status
// and waits until the volume is attached before proceeding to mounting.
// The CSI external-attacher coordinates with CSI volume driver and updates
// the volumeattachment status when the attach operation is complete.
// If the CSIDriverRegistry feature gate is enabled and the value is
// specified to false, the attach operation will be skipped.
// Otherwise the attach operation will be called.
//
// This field is immutable.
//
// +optional
AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"`
// podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.)
// during mount operations, if set to true.
// If set to false, pod information will not be passed on mount.
// Default is false.
//
// The CSI driver specifies podInfoOnMount as part of driver deployment.
// If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls.
// The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.
//
// The following VolumeConext will be passed if podInfoOnMount is set to true.
// This list might grow, but the prefix will be used.
// "csi.storage.k8s.io/pod.name": pod.Name
// "csi.storage.k8s.io/pod.namespace": pod.Namespace
// "csi.storage.k8s.io/pod.uid": string(pod.UID)
// "csi.storage.k8s.io/ephemeral": "true" if the volume is an ephemeral inline volume
// defined by a CSIVolumeSource, otherwise "false"
//
// "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only
// required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode.
// Other drivers can leave pod info disabled and/or ignore this field.
// As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when
// deployed on such a cluster and the deployment determines which mode that is, for example
// via a command line parameter of the driver.
//
// This field is immutable.
//
// +optional
PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"`
// volumeLifecycleModes defines what kind of volumes this CSI volume driver supports.
// The default if the list is empty is "Persistent", which is the usage defined by the
// CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.
//
// The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec
// with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod.
// A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.
//
// For more information about implementing this mode, see
// https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
// A driver can support one or more of these modes and more modes may be added in the future.
//
// This field is beta.
// This field is immutable.
//
// +optional
// +listType=set
VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"`
// storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage
// capacity that the driver deployment will report by creating
// CSIStorageCapacity objects with capacity information, if set to true.
//
// The check can be enabled immediately when deploying a driver.
// In that case, provisioning new volumes with late binding
// will pause until the driver deployment has published
// some suitable CSIStorageCapacity object.
//
// Alternatively, the driver can be deployed with the field
// unset or false and it can be flipped later when storage
// capacity information has been published.
//
// This field was immutable in Kubernetes <= 1.22 and now is mutable.
//
// +optional
// +featureGate=CSIStorageCapacity
StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"`
// fsGroupPolicy defines if the underlying volume supports changing ownership and
// permission of the volume before being mounted.
// Refer to the specific FSGroupPolicy values for additional details.
//
// This field is immutable.
//
// Defaults to ReadWriteOnceWithFSType, which will examine each volume
// to determine if Kubernetes should modify ownership and permissions of the volume.
// With the default policy the defined fsGroup will only be applied
// if a fstype is defined and the volume's access mode contains ReadWriteOnce.
//
// +optional
FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"`
// tokenRequests indicates the CSI driver needs pods' service account
// tokens it is mounting volume for to do necessary authentication. Kubelet
// will pass the tokens in VolumeContext in the CSI NodePublishVolume calls.
// The CSI driver should parse and validate the following VolumeContext:
// "csi.storage.k8s.io/serviceAccount.tokens": {
// "<audience>": {
// "token": <token>,
// "expirationTimestamp": <expiration timestamp in RFC3339>,
// },
// ...
// }
//
// Note: Audience in each TokenRequest should be different and at
// most one token is empty string. To receive a new token after expiry,
// RequiresRepublish can be used to trigger NodePublishVolume periodically.
//
// +optional
// +listType=atomic
TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"`
// requiresRepublish indicates the CSI driver wants `NodePublishVolume`
// being periodically called to reflect any possible change in the mounted
// volume. This field defaults to false.
//
// Note: After a successful initial NodePublishVolume call, subsequent calls
// to NodePublishVolume should only update the contents of the volume. New
// mount points will not be seen by a running container.
//
// +optional
RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"`
// seLinuxMount specifies if the CSI driver supports "-o context"
// mount option.
//
// When "true", the CSI driver must ensure that all volumes provided by this CSI
// driver can be mounted separately with different `-o context` options. This is
// typical for storage backends that provide volumes as filesystems on block
// devices or as independent shared volumes.
// Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount
// option when mounting a ReadWriteOncePod volume used in Pod that has
// explicitly set SELinux context. In the future, it may be expanded to other
// volume AccessModes. In any case, Kubernetes will ensure that the volume is
// mounted only with a single SELinux context.
//
// When "false", Kubernetes won't pass any special SELinux mount options to the driver.
// This is typical for volumes that represent subdirectories of a bigger shared filesystem.
//
// Default is "false".
//
// +featureGate=SELinuxMountReadWriteOncePod
// +optional
SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"`
}
// FSGroupPolicy specifies if a CSI Driver supports modifying
// volume ownership and permissions of the volume to be mounted.
// More modes may be added in the future.
type FSGroupPolicy string
const (
// ReadWriteOnceWithFSTypeFSGroupPolicy indicates that each volume will be examined
// to determine if the volume ownership and permissions
// should be modified. If a fstype is defined and the volume's access mode
// contains ReadWriteOnce, then the defined fsGroup will be applied.
// This mode should be defined if it's expected that the
// fsGroup may need to be modified depending on the pod's SecurityPolicy.
// This is the default behavior if no other FSGroupPolicy is defined.
ReadWriteOnceWithFSTypeFSGroupPolicy FSGroupPolicy = "ReadWriteOnceWithFSType"
// FileFSGroupPolicy indicates that CSI driver supports volume ownership
// and permission change via fsGroup, and Kubernetes will change the permissions
// and ownership of every file in the volume to match the user requested fsGroup in
// the pod's SecurityPolicy regardless of fstype or access mode.
// Use this mode if Kubernetes should modify the permissions and ownership
// of the volume.
FileFSGroupPolicy FSGroupPolicy = "File"
// NoneFSGroupPolicy indicates that volumes will be mounted without performing
// any ownership or permission modifications, as the CSIDriver does not support
// these operations.
// This mode should be selected if the CSIDriver does not support fsGroup modifications,
// for example when Kubernetes cannot change ownership and permissions on a volume due
// to root-squash settings on a NFS volume.
NoneFSGroupPolicy FSGroupPolicy = "None"
)
// VolumeLifecycleMode is an enumeration of possible usage modes for a volume
// provided by a CSI driver. More modes may be added in the future.
type VolumeLifecycleMode string
// TokenRequest contains parameters of a service account token.
type TokenRequest struct {
// audience is the intended audience of the token in "TokenRequestSpec".
// It will default to the audiences of kube apiserver.
Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"`
// expirationSeconds is the duration of validity of the token in "TokenRequestSpec".
// It has the same default value of "ExpirationSeconds" in "TokenRequestSpec".
//
// +optional
ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"`
}
const (
// VolumeLifecyclePersistent explicitly confirms that the driver implements
// the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not
// set. Such volumes are managed in Kubernetes via the persistent volume
// claim mechanism and have a lifecycle that is independent of the pods which
// use them.
VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent"
// VolumeLifecycleEphemeral indicates that the driver can be used for
// ephemeral inline volumes. Such volumes are specified inside the pod
// spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have
// a lifecycle that is tied to the lifecycle of the pod. For example, such
// a volume might contain data that gets created specifically for that pod,
// like secrets.
// But how the volume actually gets created and managed is entirely up to
// the driver. It might also use reference counting to share the same volume
// instance among different pods if the CSIVolumeSource of those pods is
// identical.
VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CSINode holds information about all CSI drivers installed on a node.
// CSI drivers do not need to create the CSINode object directly. As long as
// they use the node-driver-registrar sidecar container, the kubelet will
// automatically populate the CSINode object for the CSI driver as part of
// kubelet plugin registration.
// CSINode has the same name as a node. If the object is missing, it means either
// there are no CSI Drivers available on the node, or the Kubelet version is low
// enough that it doesn't create this object.
// CSINode has an OwnerReference that points to the corresponding node object.
type CSINode struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// metadata.name must be the Kubernetes node name.
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec is the specification of CSINode
Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
}
// CSINodeSpec holds information about the specification of all CSI drivers installed on a node
type CSINodeSpec struct {
// drivers is a list of information of all CSI Drivers existing on a node.
// If all drivers in the list are uninstalled, this can become empty.
// +patchMergeKey=name
// +patchStrategy=merge
Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"`
}
// CSINodeDriver holds information about the specification of one CSI driver installed on a node
type CSINodeDriver struct {
// name represents the name of the CSI driver that this object refers to.
// This MUST be the same name returned by the CSI GetPluginName() call for
// that driver.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// nodeID of the node from the driver point of view.
// This field enables Kubernetes to communicate with storage systems that do
// not share the same nomenclature for nodes. For example, Kubernetes may
// refer to a given node as "node1", but the storage system may refer to
// the same node as "nodeA". When Kubernetes issues a command to the storage
// system to attach a volume to a specific node, it can use this field to
// refer to the node name using the ID that the storage system will
// understand, e.g. "nodeA" instead of "node1". This field is required.
NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"`
// topologyKeys is the list of keys supported by the driver.
// When a driver is initialized on a cluster, it provides a set of topology
// keys that it understands (e.g. "company.com/zone", "company.com/region").
// When a driver is initialized on a node, it provides the same topology keys
// along with values. Kubelet will expose these topology keys as labels
// on its own node object.
// When Kubernetes does topology aware provisioning, it can use this list to
// determine which labels it should retrieve from the node object and pass
// back to the driver.
// It is possible for different nodes to use different topology keys.
// This can be empty if driver does not support topology.
// +optional
TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"`
// allocatable represents the volume resources of a node that are available for scheduling.
// This field is beta.
// +optional
Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"`
}
// VolumeNodeResources is a set of resource limits for scheduling of volumes.
type VolumeNodeResources struct {
// count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node.
// A volume that is both attached and mounted on a node is considered to be used once, not twice.
// The same rule applies for a unique volume that is shared among multiple pods on the same node.
// If this field is not specified, then the supported number of volumes on this node is unbounded.
// +optional
Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CSINodeList is a collection of CSINode objects.
type CSINodeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is the list of CSINode
Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CSIStorageCapacity stores the result of one CSI GetCapacity call.
// For a given StorageClass, this describes the available capacity in a
// particular topology segment. This can be used when considering where to
// instantiate new PersistentVolumes.
//
// For example this can express things like:
// - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1"
// - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
//
// The following three cases all imply that no capacity is available for
// a certain combination:
// - no object exists with suitable topology and storage class name
// - such an object exists, but the capacity is unset
// - such an object exists, but the capacity is zero
//
// The producer of these objects can decide which approach is more suitable.
//
// They are consumed by the kube-scheduler when a CSI driver opts into
// capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler
// compares the MaximumVolumeSize against the requested size of pending volumes
// to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back
// to a comparison against the less precise Capacity. If that is also unset,
// the scheduler assumes that capacity is insufficient and tries some other
// node.
type CSIStorageCapacity struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters).
// To ensure that there are no conflicts with other CSI drivers on the cluster,
// the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name
// which ends with the unique CSI driver name.
//
// Objects are namespaced.
//
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// nodeTopology defines which nodes have access to the storage
// for which capacity was reported. If not set, the storage is
// not accessible from any node in the cluster. If empty, the
// storage is accessible from all nodes. This field is
// immutable.
//
// +optional
NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"`
// storageClassName represents the name of the StorageClass that the reported capacity applies to.
// It must meet the same requirements as the name of a StorageClass
// object (non-empty, DNS subdomain). If that object no longer exists,
// the CSIStorageCapacity object is obsolete and should be removed by its
// creator.
// This field is immutable.
StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"`
// capacity is the value reported by the CSI driver in its GetCapacityResponse
// for a GetCapacityRequest with topology and parameters that match the
// previous fields.
//
// The semantic is currently (CSI spec 1.2) defined as:
// The available capacity, in bytes, of the storage that can be used
// to provision volumes. If not set, that information is currently
// unavailable.
//
// +optional
Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"`
// maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse
// for a GetCapacityRequest with topology and parameters that match the
// previous fields.
//
// This is defined since CSI spec 1.4.0 as the largest size
// that may be used in a
// CreateVolumeRequest.capacity_range.required_bytes field to
// create a volume with the same parameters as those in
// GetCapacityRequest. The corresponding value in the Kubernetes
// API is ResourceRequirements.Requests in a volume claim.
//
// +optional
MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty" protobuf:"bytes,5,opt,name=maximumVolumeSize"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CSIStorageCapacityList is a collection of CSIStorageCapacity objects.
type CSIStorageCapacityList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is the list of CSIStorageCapacity objects.
// +listType=map
// +listMapKey=name
Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"`
} | // VolumeAttachmentSource represents a volume that should be attached.
// Right now only PersistenVolumes can be attached via external attacher,
// in future we may allow also inline volumes in pods. | random_line_split |
DatePicker.ts | // NG2
import { animate, state, style, transition, trigger } from '@angular/animations';
import { ChangeDetectorRef, Component, ElementRef, EventEmitter, forwardRef, HostBinding, Input, OnInit, Output } from '@angular/core';
import { ControlValueAccessor, NG_VALUE_ACCESSOR } from '@angular/forms';
import { DomSanitizer } from '@angular/platform-browser';
// Vendor
import { isDate, isValid, subDays } from 'date-fns';
// APP
import { NovoLabelService } from 'novo-elements/services';
import { BooleanInput, DataTableRangeModel, DatePickerSelectModes, DateUtil, Helpers, modelTypes, RangeModel, rangeSelectModes } from 'novo-elements/utils';
// Value accessor for the component (supports ngModel)
const DATE_PICKER_VALUE_ACCESSOR = {
provide: NG_VALUE_ACCESSOR,
useExisting: forwardRef(() => NovoDatePickerElement),
multi: true,
};
@Component({
selector: 'novo-date-picker',
providers: [DATE_PICKER_VALUE_ACCESSOR],
animations: [
trigger('startDateTextState', [
state(
'startDate',
style({
opacity: '1.0',
}),
),
state(
'endDate',
style({
opacity: '0.6',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
trigger('endDateTextState', [
state(
'startDate',
style({
opacity: '0.6',
}),
),
state(
'endDate',
style({
opacity: '1.0',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
trigger('indicatorState', [
state(
'startDate',
style({
transform: 'translateX(0%)',
}),
),
state(
'endDate',
style({
transform: 'translateX(100%)',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
],
template: `
<div class="date-picker-container">
<div class="date-range-tabs" *ngIf="range" [class.week-select-mode]="weekRangeSelect">
<span
class="range-tab"
(click)="toggleRangeSelect('startDate')"
[@startDateTextState]="rangeSelectMode"
data-automation-id="calendar-start-date"
>{{ startDateLabel }}</span
>
<span
class="range-tab"
(click)="toggleRangeSelect('endDate')"
[@endDateTextState]="rangeSelectMode"
data-automation-id="calendar-end-date"
>{{ endDateLabel }}</span
>
<i class="indicator" [@indicatorState]="rangeSelectMode"></i>
</div>
<novo-calendar
[activeDate]="activeDate"
[(selected)]="selection"
(selectedChange)="updateSelection($event)"
[mode]="mode"
[numberOfMonths]="numberOfMonths"
[weekStartsOn]="weekStart"
[disabledDateMessage]="disabledDateMessage"
[minDate]="start"
[maxDate]="end"
></novo-calendar>
<div class="calendar-footer" [hidden]="hideFooter">
<novo-button (click)="setToday()" class="today" size="small" data-automation-id="calendar-today">{{ labels.today }}</novo-button>
</div>
</div>
`,
styleUrls: ['./DatePicker.scss'],
})
export class NovoDatePickerElement implements ControlValueAccessor, OnInit {
/**
* The minimum year to allow selected in year select view
**/
@Input()
minYear: string | number;
/**
* The maximum year to allow selected in year select view
**/
@Input()
maxYear: string | number;
/**
* The minimum date that can be selected.
**/
@Input()
start: Date;
/**
* The maximum date that can be selected.
**/
@Input()
end: Date;
/**
* **Deprecated** Whether the date-picker is used outside of an overlay.
**/
@Input()
@BooleanInput()
inline: boolean;
/**
* Day of the week the calendar should display first, Sunday=0...Saturday=6
**/
@Input()
weekStart: Day = 0;
/**
* Certain dates that are already selected.
**/
@Input()
preselected: Date[] = [];
/**
* Whether the days for the previous and next month should be hidden.
**/
@Input()
@BooleanInput()
@HostBinding('class.hide-overflow-days')
public hideOverflowDays: boolean = false;
/**
* Whether the footer which contains `today` button should be hidden.
**/
@Input()
@BooleanInput()
public hideFooter: boolean = false;
@Input()
disabledDateMessage: string;
// Select callback for output
@Output()
onSelect: EventEmitter<any> = new EventEmitter(false);
_mode: DatePickerSelectModes = 'single';
_range: boolean;
_weekRangeSelect: boolean;
_numberOfMonths: number[] = [0];
/**
* Number of months to display at once.
* @default 1
**/
@Input()
get numberOfMonths(): number {
return this._numberOfMonths.length;
}
set numberOfMonths(value) {
this._numberOfMonths = Array.from(Array(Number(value)).keys());
}
/**
* How the date selection should work.
* @default single
**/
@Input()
get mode(): DatePickerSelectModes {
return this._mode;
}
set mode(value) {
if (this._mode !== value) {
this._mode = value;
}
}
/**
* **deprecated** please use `mode="range"`.
**/
@Input()
get range(): boolean {
return ['range', 'week'].includes(this.mode) || this._range;
}
set range(value) {
console.warn(`'range' property is deprecated, please use 'mode="range"'.`);
if (this._range !== value) {
this._range = value;
this.mode = 'range';
}
}
/**
* **deprecated** please use `mode="week"`.
**/
@Input()
get weekRangeSelect(): boolean {
return this._mode === 'week' || this._weekRangeSelect;
}
set weekRangeSelect(value) {
console.warn(`'weekRangeSelect' property is deprecated, please use 'mode="week"'.`);
if (this._weekRangeSelect !== value) {
this._weekRangeSelect = value;
this.mode = 'week';
}
}
// @HostBinding('style.width')
// get hb_width() {
// return this._sanitizer.bypassSecurityTrustStyle(`${this.numberOfMonths * 228}px`);
// }
model: modelTypes;
activeDate: Date;
_selection: Date[] = [];
preview: Date[] = [];
startDateLabel: string;
endDateLabel: string;
rangeSelectMode: rangeSelectModes = 'startDate';
_onChange: Function = () => {};
_onTouched: Function = () => {};
get selection(): Date[] {
return this._selection;
}
set selection(value) {
this._selection = value ? value.filter(isDate).map((d) => DateUtil.startOfDay(d)) : [];
}
constructor(
public labels: NovoLabelService,
private element: ElementRef,
private cdr: ChangeDetectorRef,
private _sanitizer: DomSanitizer,
) {}
ngOnInit() {
// Determine the year array
const now = new Date();
// Set labels
if (this.model) {
this.modelToSelection(this.model);
}
if (this.selection && this.selection.length) {
this.updateView(this.selection[0]);
}
}
updateView(date) {
const value: any = date ? new Date(date) : new Date();
this.activeDate = new Date(value);
}
updateSelection(selected: Date[], fireEvents = true) {
// Helpers.swallowEvent(event);
this.selection = selected;
this.startDateLabel = this.labels.formatDateWithFormat(this.selection[0], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
this.endDateLabel = this.labels.formatDateWithFormat(this.selection[1], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
if (fireEvents) {
switch (this.mode) {
case 'multiple':
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection);
this.model = this.selection;
break;
case 'range':
case 'week':
if (this.selection.filter(Boolean).length === 2) {
this.fireRangeSelect();
// Also, update the ngModel
const model = {
startDate: this.selection[0],
endDate: this.selection[1],
};
this._onChange(model);
this.model = model;
}
break;
case 'single':
default:
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection[0]);
this.model = this.selection[0];
break;
}
}
this.cdr.markForCheck();
}
eventData(date: Date) {
return {
year: date.getFullYear(),
month: this.labels.formatDateWithFormat(date, { month: 'long' }),
day: this.labels.formatDateWithFormat(date, { weekday: 'long' }),
date,
};
}
fireSelect() {
if (this.mode === 'multiple') {
this.onSelect.next(this.selection);
} else {
this.onSelect.next(this.eventData(this.selection[0]));
}
}
fireRangeSelect() {
// Make sure the start date is before the end date
if (this.selection.filter(Boolean).length === 2) {
const [start, end] = this.selection;
this.onSelect.next({
startDate: this.eventData(start),
endDate: this.eventData(end),
});
}
}
setToday() {
const tmp = new Date();
this.updateView(tmp);
this.updateSelection(Array.of(tmp));
}
| (range: rangeSelectModes): void {
this.rangeSelectMode = range;
if (range === 'startDate' && this.selection.length) {
this.updateView(this.selection[0]);
}
if (range === 'endDate' && this.selection.length === 2) {
this.updateView(this.selection[1]);
}
}
modelToSelection(model: modelTypes) {
switch (this.mode) {
case 'multiple':
this.selection = model as Date[];
break;
case 'range':
case 'week':
this.setRangeSelection();
case 'single':
default:
this.selection = [model as Date];
break;
}
}
// ValueAccessor Functions
writeValue(model: modelTypes): void {
this.model = model;
if (this.mode === 'multiple') {
this.selection = this.model as Date[];
}
if (this.mode === 'range') {
this.setRangeSelection();
}
if (Helpers.isDate(model)) {
this.updateView(model);
this.modelToSelection(model);
} else if (Helpers.isString(model)) {
const date = DateUtil.parse(model as any);
if (isValid(date)) {
this.updateView(date);
this.modelToSelection(date);
}
}
}
setRangeSelection() {
if (this.model?.hasOwnProperty('startDate')) {
// coming from standalone date picker
const range = this.model as RangeModel;
this.selection = [range.startDate, range.endDate].filter(Boolean);
} else if (this.model?.hasOwnProperty('min')) {
// coming from data-table filter where model end date is the beginning of the next day
const range = this.model as DataTableRangeModel;
this.selection = [range.min, subDays(range.max, 1)].filter(Boolean);
}
}
registerOnChange(fn: Function): void {
this._onChange = fn;
}
registerOnTouched(fn: Function): void {
this._onTouched = fn;
}
}
| toggleRangeSelect | identifier_name |
DatePicker.ts | // NG2
import { animate, state, style, transition, trigger } from '@angular/animations';
import { ChangeDetectorRef, Component, ElementRef, EventEmitter, forwardRef, HostBinding, Input, OnInit, Output } from '@angular/core';
import { ControlValueAccessor, NG_VALUE_ACCESSOR } from '@angular/forms';
import { DomSanitizer } from '@angular/platform-browser';
// Vendor
import { isDate, isValid, subDays } from 'date-fns';
// APP
import { NovoLabelService } from 'novo-elements/services';
import { BooleanInput, DataTableRangeModel, DatePickerSelectModes, DateUtil, Helpers, modelTypes, RangeModel, rangeSelectModes } from 'novo-elements/utils';
// Value accessor for the component (supports ngModel)
const DATE_PICKER_VALUE_ACCESSOR = {
provide: NG_VALUE_ACCESSOR,
useExisting: forwardRef(() => NovoDatePickerElement),
multi: true,
};
@Component({
selector: 'novo-date-picker',
providers: [DATE_PICKER_VALUE_ACCESSOR],
animations: [
trigger('startDateTextState', [
state(
'startDate',
style({
opacity: '1.0',
}),
),
state(
'endDate',
style({
opacity: '0.6',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
trigger('endDateTextState', [
state(
'startDate',
style({
opacity: '0.6',
}),
),
state(
'endDate',
style({
opacity: '1.0',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
trigger('indicatorState', [
state(
'startDate',
style({
transform: 'translateX(0%)',
}),
),
state(
'endDate',
style({
transform: 'translateX(100%)',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
],
template: `
<div class="date-picker-container">
<div class="date-range-tabs" *ngIf="range" [class.week-select-mode]="weekRangeSelect">
<span
class="range-tab"
(click)="toggleRangeSelect('startDate')"
[@startDateTextState]="rangeSelectMode"
data-automation-id="calendar-start-date"
>{{ startDateLabel }}</span
>
<span
class="range-tab"
(click)="toggleRangeSelect('endDate')"
[@endDateTextState]="rangeSelectMode"
data-automation-id="calendar-end-date"
>{{ endDateLabel }}</span
>
<i class="indicator" [@indicatorState]="rangeSelectMode"></i>
</div>
<novo-calendar
[activeDate]="activeDate"
[(selected)]="selection"
(selectedChange)="updateSelection($event)"
[mode]="mode"
[numberOfMonths]="numberOfMonths"
[weekStartsOn]="weekStart"
[disabledDateMessage]="disabledDateMessage"
[minDate]="start"
[maxDate]="end"
></novo-calendar>
<div class="calendar-footer" [hidden]="hideFooter">
<novo-button (click)="setToday()" class="today" size="small" data-automation-id="calendar-today">{{ labels.today }}</novo-button>
</div>
</div>
`,
styleUrls: ['./DatePicker.scss'],
})
export class NovoDatePickerElement implements ControlValueAccessor, OnInit {
/**
* The minimum year to allow selected in year select view
**/
@Input()
minYear: string | number;
/**
* The maximum year to allow selected in year select view
**/
@Input()
maxYear: string | number;
/**
* The minimum date that can be selected.
**/
@Input()
start: Date;
/**
* The maximum date that can be selected.
**/
@Input()
end: Date;
/**
* **Deprecated** Whether the date-picker is used outside of an overlay.
**/
@Input()
@BooleanInput()
inline: boolean;
/**
* Day of the week the calendar should display first, Sunday=0...Saturday=6
**/
@Input()
weekStart: Day = 0;
/**
* Certain dates that are already selected.
**/
@Input()
preselected: Date[] = [];
/**
* Whether the days for the previous and next month should be hidden.
**/
@Input()
@BooleanInput()
@HostBinding('class.hide-overflow-days')
public hideOverflowDays: boolean = false;
/**
* Whether the footer which contains `today` button should be hidden.
**/
@Input()
@BooleanInput()
public hideFooter: boolean = false;
@Input()
disabledDateMessage: string;
// Select callback for output
@Output()
onSelect: EventEmitter<any> = new EventEmitter(false);
_mode: DatePickerSelectModes = 'single';
_range: boolean;
_weekRangeSelect: boolean;
_numberOfMonths: number[] = [0];
/**
* Number of months to display at once.
* @default 1
**/
@Input()
get numberOfMonths(): number {
return this._numberOfMonths.length;
}
set numberOfMonths(value) {
this._numberOfMonths = Array.from(Array(Number(value)).keys());
}
/**
* How the date selection should work.
* @default single
**/
@Input()
get mode(): DatePickerSelectModes {
return this._mode;
}
set mode(value) {
if (this._mode !== value) {
this._mode = value;
}
}
/**
* **deprecated** please use `mode="range"`.
**/
@Input()
get range(): boolean {
return ['range', 'week'].includes(this.mode) || this._range;
}
set range(value) {
console.warn(`'range' property is deprecated, please use 'mode="range"'.`);
if (this._range !== value) {
this._range = value;
this.mode = 'range';
}
}
/**
* **deprecated** please use `mode="week"`.
**/
@Input()
get weekRangeSelect(): boolean {
return this._mode === 'week' || this._weekRangeSelect;
}
set weekRangeSelect(value) {
console.warn(`'weekRangeSelect' property is deprecated, please use 'mode="week"'.`);
if (this._weekRangeSelect !== value) {
this._weekRangeSelect = value;
this.mode = 'week';
}
}
// @HostBinding('style.width')
// get hb_width() {
// return this._sanitizer.bypassSecurityTrustStyle(`${this.numberOfMonths * 228}px`);
// }
model: modelTypes;
activeDate: Date;
_selection: Date[] = [];
preview: Date[] = [];
startDateLabel: string;
endDateLabel: string;
rangeSelectMode: rangeSelectModes = 'startDate';
_onChange: Function = () => {};
_onTouched: Function = () => {};
get selection(): Date[] {
return this._selection;
}
set selection(value) {
this._selection = value ? value.filter(isDate).map((d) => DateUtil.startOfDay(d)) : [];
}
constructor(
public labels: NovoLabelService,
private element: ElementRef,
private cdr: ChangeDetectorRef,
private _sanitizer: DomSanitizer,
) {}
ngOnInit() {
// Determine the year array
const now = new Date();
// Set labels
if (this.model) {
this.modelToSelection(this.model);
}
if (this.selection && this.selection.length) {
this.updateView(this.selection[0]);
}
}
updateView(date) {
const value: any = date ? new Date(date) : new Date();
this.activeDate = new Date(value);
}
updateSelection(selected: Date[], fireEvents = true) {
// Helpers.swallowEvent(event);
this.selection = selected;
this.startDateLabel = this.labels.formatDateWithFormat(this.selection[0], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
this.endDateLabel = this.labels.formatDateWithFormat(this.selection[1], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
if (fireEvents) {
switch (this.mode) {
case 'multiple':
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection);
this.model = this.selection;
break;
case 'range':
case 'week':
if (this.selection.filter(Boolean).length === 2) {
this.fireRangeSelect();
// Also, update the ngModel
const model = {
startDate: this.selection[0],
endDate: this.selection[1],
};
this._onChange(model);
this.model = model;
}
break;
case 'single':
default:
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection[0]);
this.model = this.selection[0];
break;
}
}
this.cdr.markForCheck();
}
eventData(date: Date) |
fireSelect() {
if (this.mode === 'multiple') {
this.onSelect.next(this.selection);
} else {
this.onSelect.next(this.eventData(this.selection[0]));
}
}
fireRangeSelect() {
// Make sure the start date is before the end date
if (this.selection.filter(Boolean).length === 2) {
const [start, end] = this.selection;
this.onSelect.next({
startDate: this.eventData(start),
endDate: this.eventData(end),
});
}
}
setToday() {
const tmp = new Date();
this.updateView(tmp);
this.updateSelection(Array.of(tmp));
}
toggleRangeSelect(range: rangeSelectModes): void {
this.rangeSelectMode = range;
if (range === 'startDate' && this.selection.length) {
this.updateView(this.selection[0]);
}
if (range === 'endDate' && this.selection.length === 2) {
this.updateView(this.selection[1]);
}
}
modelToSelection(model: modelTypes) {
switch (this.mode) {
case 'multiple':
this.selection = model as Date[];
break;
case 'range':
case 'week':
this.setRangeSelection();
case 'single':
default:
this.selection = [model as Date];
break;
}
}
// ValueAccessor Functions
writeValue(model: modelTypes): void {
this.model = model;
if (this.mode === 'multiple') {
this.selection = this.model as Date[];
}
if (this.mode === 'range') {
this.setRangeSelection();
}
if (Helpers.isDate(model)) {
this.updateView(model);
this.modelToSelection(model);
} else if (Helpers.isString(model)) {
const date = DateUtil.parse(model as any);
if (isValid(date)) {
this.updateView(date);
this.modelToSelection(date);
}
}
}
setRangeSelection() {
if (this.model?.hasOwnProperty('startDate')) {
// coming from standalone date picker
const range = this.model as RangeModel;
this.selection = [range.startDate, range.endDate].filter(Boolean);
} else if (this.model?.hasOwnProperty('min')) {
// coming from data-table filter where model end date is the beginning of the next day
const range = this.model as DataTableRangeModel;
this.selection = [range.min, subDays(range.max, 1)].filter(Boolean);
}
}
registerOnChange(fn: Function): void {
this._onChange = fn;
}
registerOnTouched(fn: Function): void {
this._onTouched = fn;
}
}
| {
return {
year: date.getFullYear(),
month: this.labels.formatDateWithFormat(date, { month: 'long' }),
day: this.labels.formatDateWithFormat(date, { weekday: 'long' }),
date,
};
} | identifier_body |
DatePicker.ts | // NG2
import { animate, state, style, transition, trigger } from '@angular/animations';
import { ChangeDetectorRef, Component, ElementRef, EventEmitter, forwardRef, HostBinding, Input, OnInit, Output } from '@angular/core';
import { ControlValueAccessor, NG_VALUE_ACCESSOR } from '@angular/forms';
import { DomSanitizer } from '@angular/platform-browser';
// Vendor
import { isDate, isValid, subDays } from 'date-fns';
// APP
import { NovoLabelService } from 'novo-elements/services';
import { BooleanInput, DataTableRangeModel, DatePickerSelectModes, DateUtil, Helpers, modelTypes, RangeModel, rangeSelectModes } from 'novo-elements/utils';
// Value accessor for the component (supports ngModel)
const DATE_PICKER_VALUE_ACCESSOR = {
provide: NG_VALUE_ACCESSOR,
useExisting: forwardRef(() => NovoDatePickerElement),
multi: true,
};
@Component({
selector: 'novo-date-picker',
providers: [DATE_PICKER_VALUE_ACCESSOR],
animations: [
trigger('startDateTextState', [
state(
'startDate',
style({
opacity: '1.0',
}),
),
state(
'endDate',
style({
opacity: '0.6',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
trigger('endDateTextState', [
state(
'startDate',
style({
opacity: '0.6',
}),
),
state(
'endDate',
style({
opacity: '1.0',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
trigger('indicatorState', [
state(
'startDate',
style({
transform: 'translateX(0%)',
}),
),
state(
'endDate',
style({
transform: 'translateX(100%)',
}),
),
transition('startDate <=> endDate', animate('200ms ease-in')),
]),
],
template: `
<div class="date-picker-container">
<div class="date-range-tabs" *ngIf="range" [class.week-select-mode]="weekRangeSelect">
<span
class="range-tab"
(click)="toggleRangeSelect('startDate')"
[@startDateTextState]="rangeSelectMode"
data-automation-id="calendar-start-date"
>{{ startDateLabel }}</span
>
<span
class="range-tab"
(click)="toggleRangeSelect('endDate')"
[@endDateTextState]="rangeSelectMode"
data-automation-id="calendar-end-date"
>{{ endDateLabel }}</span
>
<i class="indicator" [@indicatorState]="rangeSelectMode"></i>
</div>
<novo-calendar
[activeDate]="activeDate"
[(selected)]="selection"
(selectedChange)="updateSelection($event)"
[mode]="mode"
[numberOfMonths]="numberOfMonths"
[weekStartsOn]="weekStart"
[disabledDateMessage]="disabledDateMessage"
[minDate]="start"
[maxDate]="end"
></novo-calendar>
<div class="calendar-footer" [hidden]="hideFooter">
<novo-button (click)="setToday()" class="today" size="small" data-automation-id="calendar-today">{{ labels.today }}</novo-button>
</div>
</div>
`,
styleUrls: ['./DatePicker.scss'],
})
export class NovoDatePickerElement implements ControlValueAccessor, OnInit {
/**
* The minimum year to allow selected in year select view
**/
@Input()
minYear: string | number;
/**
* The maximum year to allow selected in year select view
**/
@Input()
maxYear: string | number;
/**
* The minimum date that can be selected.
**/
@Input()
start: Date;
/**
* The maximum date that can be selected.
**/
@Input()
end: Date;
/**
* **Deprecated** Whether the date-picker is used outside of an overlay.
**/
@Input()
@BooleanInput()
inline: boolean;
/**
* Day of the week the calendar should display first, Sunday=0...Saturday=6
**/
@Input()
weekStart: Day = 0;
/**
* Certain dates that are already selected.
**/
@Input()
preselected: Date[] = [];
/**
* Whether the days for the previous and next month should be hidden.
**/
@Input()
@BooleanInput()
@HostBinding('class.hide-overflow-days')
public hideOverflowDays: boolean = false;
/**
* Whether the footer which contains `today` button should be hidden.
**/
@Input()
@BooleanInput()
public hideFooter: boolean = false;
@Input()
disabledDateMessage: string;
// Select callback for output
@Output()
onSelect: EventEmitter<any> = new EventEmitter(false);
_mode: DatePickerSelectModes = 'single';
_range: boolean;
_weekRangeSelect: boolean;
_numberOfMonths: number[] = [0];
/**
* Number of months to display at once.
* @default 1
**/
@Input()
get numberOfMonths(): number {
return this._numberOfMonths.length;
}
set numberOfMonths(value) {
this._numberOfMonths = Array.from(Array(Number(value)).keys());
}
/**
* How the date selection should work.
* @default single
**/
@Input()
get mode(): DatePickerSelectModes {
return this._mode;
}
set mode(value) {
if (this._mode !== value) {
this._mode = value;
}
}
/**
* **deprecated** please use `mode="range"`.
**/
@Input()
get range(): boolean {
return ['range', 'week'].includes(this.mode) || this._range;
}
set range(value) {
console.warn(`'range' property is deprecated, please use 'mode="range"'.`);
if (this._range !== value) {
this._range = value;
this.mode = 'range';
}
}
/**
* **deprecated** please use `mode="week"`.
**/
@Input()
get weekRangeSelect(): boolean {
return this._mode === 'week' || this._weekRangeSelect;
}
set weekRangeSelect(value) {
console.warn(`'weekRangeSelect' property is deprecated, please use 'mode="week"'.`);
if (this._weekRangeSelect !== value) {
this._weekRangeSelect = value;
this.mode = 'week';
}
}
// @HostBinding('style.width')
// get hb_width() {
// return this._sanitizer.bypassSecurityTrustStyle(`${this.numberOfMonths * 228}px`);
// }
model: modelTypes;
activeDate: Date;
_selection: Date[] = [];
preview: Date[] = [];
startDateLabel: string;
endDateLabel: string;
rangeSelectMode: rangeSelectModes = 'startDate';
_onChange: Function = () => {};
_onTouched: Function = () => {};
get selection(): Date[] {
return this._selection;
}
set selection(value) {
this._selection = value ? value.filter(isDate).map((d) => DateUtil.startOfDay(d)) : [];
}
constructor(
public labels: NovoLabelService,
private element: ElementRef,
private cdr: ChangeDetectorRef,
private _sanitizer: DomSanitizer,
) {}
ngOnInit() {
// Determine the year array
const now = new Date();
// Set labels
if (this.model) {
this.modelToSelection(this.model);
}
if (this.selection && this.selection.length) {
this.updateView(this.selection[0]);
}
}
updateView(date) {
const value: any = date ? new Date(date) : new Date();
this.activeDate = new Date(value);
}
updateSelection(selected: Date[], fireEvents = true) {
// Helpers.swallowEvent(event);
this.selection = selected;
this.startDateLabel = this.labels.formatDateWithFormat(this.selection[0], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
this.endDateLabel = this.labels.formatDateWithFormat(this.selection[1], {
month: 'short',
day: '2-digit',
year: 'numeric',
});
if (fireEvents) {
switch (this.mode) {
case 'multiple':
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection);
this.model = this.selection;
break;
case 'range':
case 'week':
if (this.selection.filter(Boolean).length === 2) {
this.fireRangeSelect();
// Also, update the ngModel
const model = {
startDate: this.selection[0],
endDate: this.selection[1],
};
this._onChange(model);
this.model = model;
}
break;
case 'single':
default:
this.fireSelect();
// Also, update the ngModel
this._onChange(this.selection[0]);
this.model = this.selection[0];
break;
}
}
this.cdr.markForCheck();
}
eventData(date: Date) {
return {
year: date.getFullYear(),
month: this.labels.formatDateWithFormat(date, { month: 'long' }),
day: this.labels.formatDateWithFormat(date, { weekday: 'long' }),
date,
};
}
fireSelect() {
if (this.mode === 'multiple') {
this.onSelect.next(this.selection);
} else {
this.onSelect.next(this.eventData(this.selection[0]));
}
}
fireRangeSelect() {
// Make sure the start date is before the end date
if (this.selection.filter(Boolean).length === 2) {
const [start, end] = this.selection;
this.onSelect.next({
startDate: this.eventData(start),
endDate: this.eventData(end),
});
}
}
setToday() {
const tmp = new Date();
this.updateView(tmp);
this.updateSelection(Array.of(tmp));
}
toggleRangeSelect(range: rangeSelectModes): void {
this.rangeSelectMode = range;
if (range === 'startDate' && this.selection.length) {
this.updateView(this.selection[0]);
}
if (range === 'endDate' && this.selection.length === 2) {
this.updateView(this.selection[1]);
}
}
modelToSelection(model: modelTypes) {
switch (this.mode) {
case 'multiple':
this.selection = model as Date[];
break;
case 'range':
case 'week':
this.setRangeSelection();
case 'single':
default:
this.selection = [model as Date];
break;
}
}
// ValueAccessor Functions
writeValue(model: modelTypes): void {
this.model = model;
if (this.mode === 'multiple') {
this.selection = this.model as Date[];
}
if (this.mode === 'range') {
this.setRangeSelection();
}
if (Helpers.isDate(model)) {
this.updateView(model);
this.modelToSelection(model);
} else if (Helpers.isString(model)) {
const date = DateUtil.parse(model as any);
if (isValid(date)) {
this.updateView(date);
this.modelToSelection(date);
}
}
}
setRangeSelection() {
if (this.model?.hasOwnProperty('startDate')) {
// coming from standalone date picker
const range = this.model as RangeModel;
this.selection = [range.startDate, range.endDate].filter(Boolean);
} else if (this.model?.hasOwnProperty('min')) {
// coming from data-table filter where model end date is the beginning of the next day
const range = this.model as DataTableRangeModel;
this.selection = [range.min, subDays(range.max, 1)].filter(Boolean);
}
}
| }
registerOnTouched(fn: Function): void {
this._onTouched = fn;
}
} | registerOnChange(fn: Function): void {
this._onChange = fn; | random_line_split |
cluster.go | package cluster
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/flynn/flynn/cli/config"
"github.com/flynn/flynn/discoverd/client"
"github.com/flynn/flynn/pkg/iotool"
"github.com/flynn/flynn/pkg/random"
)
type ClusterType uint8
const (
ClusterTypeDefault ClusterType = iota
ClusterTypeRelease
ClusterTypeNone
)
type BootConfig struct {
User string
Kernel string
Network string
NatIface string
Backend string
}
type Cluster struct {
ID string `json:"id"`
Instances instances `json:"instances"`
BackoffPeriod time.Duration `json:"backoff_period"`
ClusterDomain string `json:"cluster_domain"`
ControllerPin string `json:"controller_pin"`
ControllerKey string `json:"controller_key"`
RouterIP string `json:"router_ip"`
defaultInstances []*Instance
releaseInstances []*Instance
discMtx sync.Mutex
disc *discoverd.Client
bc BootConfig
vm *VMManager
out io.Writer
bridge *Bridge
rootFS string
}
func (c *Cluster) ControllerDomain() string {
return "controller." + c.ClusterDomain
}
type instances []*Instance
func (i instances) Get(id string) (*Instance, error) {
for _, inst := range i {
if inst.ID == id {
return inst, nil
}
}
return nil, fmt.Errorf("no such host: %s", id)
}
func (c *Cluster) discoverdClient(ip string) *discoverd.Client {
c.discMtx.Lock()
defer c.discMtx.Unlock()
if c.disc == nil {
c.disc = discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", ip))
}
return c.disc
}
type Streams struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
func New(bc BootConfig, out io.Writer) *Cluster {
return &Cluster{
ID: random.String(8),
bc: bc,
out: out,
}
}
func BuildFlynn(bc BootConfig, rootFS, commit string, merge bool, out io.Writer) (string, error) {
c := New(bc, out)
defer c.Shutdown()
return c.BuildFlynn(rootFS, commit, merge, false)
}
func (c *Cluster) log(a ...interface{}) (int, error) {
return fmt.Fprintln(c.out, append([]interface{}{"++", time.Now().Format("15:04:05.000")}, a...)...)
}
func (c *Cluster) logf(f string, a ...interface{}) (int, error) {
return fmt.Fprintf(c.out, strings.Join([]string{"++", time.Now().Format("15:04:05.000"), f}, " "), a...)
}
func (c *Cluster) BuildFlynn(rootFS, commit string, merge bool, runTests bool) (string, error) {
c.log("Building Flynn...")
if err := c.setup(); err != nil {
return "", err
}
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return "", err
}
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "4096",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false},
},
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, dumpLogs io.Writer, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dumpLogs != nil && len(c.Instances) > 0 {
c.DumpLogs(dumpLogs)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// Clean shutdown requires waiting for that host to unadvertise on discoverd.
// Specifically: Wait for router-api services to disappear to indicate host
// removal (rather than using StreamHostEvents), so that other
// tests won't try and connect to this host via service discovery.
ip := c.defaultInstances[0].IP
events := make(chan *discoverd.Event)
stream, err := c.discoverdClient(ip).Service("router-api").Watch(events)
if err != nil {
return err
}
defer stream.Close()
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
if err := inst.Run(cmd, nil); err != nil {
return err
}
loop:
for {
select {
case event := <-events:
if event.Kind == discoverd.EventKindDown {
break loop
}
case <-time.After(20 * time.Second):
return fmt.Errorf("timed out waiting for host removal")
}
}
return nil
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial |
peers = append(peers, fmt.Sprintf("%s=http://%s:2380", inst.ID, inst.IP))
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
EtcdProxy: !inst.initial,
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command string, s *Streams, env map[string]string) error {
if len(c.Instances) == 0 {
return errors.New("no booted servers in cluster")
}
return c.Instances[0].RunWithEnv(command, s, env)
}
func (c *Cluster) CLIConfig() (*config.Config, error) {
conf := &config.Config{}
s := &config.Cluster{
Name: "default",
URL: "https://" + c.ControllerDomain(),
Key: c.ControllerKey,
GitHost: c.ClusterDomain + ":2222",
TLSPin: c.ControllerPin,
}
if err := conf.Add(s, true /*force*/); err != nil {
return nil, err
}
return conf, nil
}
func (c *Cluster) Shutdown() {
for i, inst := range c.Instances {
c.logf("killing instance %d [id: %s]\n", i, inst.ID)
if err := inst.Kill(); err != nil {
c.logf("error killing instance %d: %s\n", i, err)
}
}
if c.bridge != nil {
c.logf("deleting network bridge %s\n", c.bridge.name)
if err := deleteBridge(c.bridge); err != nil {
c.logf("error deleting network bridge %s: %s\n", c.bridge.name, err)
}
c.bridge = nil
}
}
var flynnBuildScript = template.Must(template.New("flynn-build").Parse(`
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
if [ ! -d $flynn ]; then
git clone https://github.com/flynn/flynn $flynn
fi
cd $flynn
# Also fetch Github PR commits
if ! git config --get-all remote.origin.fetch | grep -q '^+refs/pull'; then
git config --add remote.origin.fetch '+refs/pull/*/head:refs/remotes/origin/pr/*'
fi
git fetch
git checkout --quiet {{ .Commit }}
{{ if .Merge }}
git config user.email "[email protected]"
git config user.name "CI"
git merge origin/master
{{ end }}
test/scripts/wait-for-docker
make
if [[ -f test/scripts/debug-info.sh ]]; then
sudo cp test/scripts/debug-info.sh /usr/local/bin/debug-info.sh
fi
sudo cp host/bin/flynn-* /usr/local/bin
sudo cp host/bin/manifest.json /etc/flynn-host.json
sudo cp bootstrap/bin/manifest.json /etc/flynn-bootstrap.json
`[1:]))
type buildData struct {
Commit string
Merge bool
}
func buildFlynn(inst *Instance, commit string, merge bool, out io.Writer) error {
var b bytes.Buffer
flynnBuildScript.Execute(&b, buildData{commit, merge})
return inst.RunWithTimeout("bash", &Streams{Stdin: &b, Stdout: out, Stderr: out}, 30*time.Minute)
}
var flynnUnitTestScript = `
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
cd $flynn
if [[ -f test/scripts/test-unit.sh ]]; then
timeout --signal=QUIT --kill-after=10 5m test/scripts/test-unit.sh
fi
`[1:]
func runUnitTests(inst *Instance, out io.Writer) error {
return inst.Run("bash", &Streams{Stdin: bytes.NewBufferString(flynnUnitTestScript), Stdout: out, Stderr: out})
}
type hostScriptData struct {
ID string
IP string
Peers string
EtcdProxy bool
}
var flynnHostScripts = map[string]*template.Template{
"libvirt-lxc": template.Must(template.New("flynn-host-libvirt").Parse(`
if [[ -f /usr/local/bin/debug-info.sh ]]; then
/usr/local/bin/debug-info.sh &>/tmp/debug-info.log &
fi
sudo start-stop-daemon \
--start \
--background \
--no-close \
--make-pidfile \
--pidfile /var/run/flynn-host.pid \
--exec /usr/bin/env \
-- \
ETCD_NAME={{ .ID }} \
ETCD_INITIAL_CLUSTER={{ .Peers }} \
ETCD_INITIAL_CLUSTER_STATE=new \
{{ if .EtcdProxy }} ETCD_PROXY=on {{ end }} \
flynn-host \
daemon \
--id {{ .ID }} \
--manifest /etc/flynn-host.json \
--external {{ .IP }} \
--force \
--backend libvirt-lxc \
&>/tmp/flynn-host.log
`[1:])),
}
type bootstrapMsg struct {
Id string `json:"id"`
State string `json:"state"`
Data json.RawMessage `json:"data"`
Error string `json:"error"`
}
type controllerCert struct {
Pin string `json:"pin"`
}
func (c *Cluster) bootstrapLayer1(instances []*Instance) error {
inst := instances[0]
c.ClusterDomain = fmt.Sprintf("flynn-%s.local", random.String(16))
c.ControllerKey = random.String(16)
c.BackoffPeriod = 5 * time.Second
rd, wr := io.Pipe()
var cmdErr error
go func() {
command := fmt.Sprintf(
"DISCOVERD=%s:1111 CLUSTER_DOMAIN=%s CONTROLLER_KEY=%s BACKOFF_PERIOD=%fs flynn-host bootstrap --json --min-hosts=%d /etc/flynn-bootstrap.json",
inst.IP, c.ClusterDomain, c.ControllerKey, c.BackoffPeriod.Seconds(), len(instances),
)
cmdErr = inst.Run(command, &Streams{Stdout: wr, Stderr: os.Stderr})
wr.Close()
}()
// grab the controller tls pin from the bootstrap output
var cert controllerCert
dec := json.NewDecoder(rd)
for {
var msg bootstrapMsg
if err := dec.Decode(&msg); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("failed to parse bootstrap JSON output: %s", err)
}
c.log("bootstrap ===>", msg.Id, msg.State)
if msg.State == "error" {
c.log(msg.Error)
}
if msg.Id == "controller-cert" && msg.State == "done" {
json.Unmarshal(msg.Data, &cert)
}
}
if cmdErr != nil {
return cmdErr
}
if cert.Pin == "" {
return errors.New("could not determine controller cert from bootstrap output")
}
c.ControllerPin = cert.Pin
// grab the router IP from discoverd
disc := discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", inst.IP))
leader, err := disc.Service("router-api").Leader()
if err != nil {
return fmt.Errorf("could not detect router ip: %s", err)
}
if err = setLocalDNS([]string{c.ClusterDomain, c.ControllerDomain()}, leader.Host()); err != nil {
return fmt.Errorf("could not set cluster DNS entries: %s", err)
}
c.RouterIP = leader.Host()
return nil
}
func setLocalDNS(domains []string, ip string) error {
command := fmt.Sprintf(
`grep -q "^%[1]s" /etc/hosts && sed "s/^%[1]s.*/%[1]s %s/" -i /etc/hosts || echo %[1]s %s >> /etc/hosts`,
ip, strings.Join(domains, " "),
)
cmd := exec.Command("bash", "-c", command)
return cmd.Run()
}
func lookupUser(name string) (int, int, error) {
u, err := user.Lookup(name)
if err != nil {
return 0, 0, err
}
uid, _ := strconv.Atoi(u.Uid)
gid, _ := strconv.Atoi(u.Gid)
return uid, gid, nil
}
func (c *Cluster) DumpLogs(w io.Writer) {
tw := iotool.NewTimeoutWriter(w, 60*time.Second)
c.dumpLogs(tw)
tw.Finished()
}
func (c *Cluster) dumpLogs(w io.Writer) {
streams := &Streams{Stdout: w, Stderr: w}
run := func(inst *Instance, cmd string) error {
fmt.Fprint(w, "\n\n***** ***** ***** ***** ***** ***** ***** ***** ***** *****\n\n")
fmt.Fprintln(w, "HostID:", inst.ID, "-", cmd)
fmt.Fprintln(w)
err := inst.Run(cmd, streams)
fmt.Fprintln(w)
return err
}
fmt.Fprint(w, "\n\n***** ***** ***** DUMPING ALL LOGS ***** ***** *****\n\n")
for _, inst := range c.Instances {
run(inst, "ps faux")
run(inst, "cat /tmp/flynn-host.log")
run(inst, "cat /tmp/debug-info.log")
run(inst, "sudo cat /var/log/libvirt/libvirtd.log")
}
printLogs := func(instances []*Instance) {
fallback := func() {
fmt.Fprintf(w, "\n*** Error getting job logs via flynn-host, falling back to tail log dump\n\n")
for _, inst := range instances {
run(inst, "sudo bash -c 'tail -n +1 /var/log/flynn/**/*.log'")
}
}
run(instances[0], "flynn-host ps -a")
var out bytes.Buffer
if err := instances[0].Run("flynn-host ps -a -q", &Streams{Stdout: &out, Stderr: w}); err != nil {
io.Copy(w, &out)
fallback()
return
}
ids := strings.Split(strings.TrimSpace(out.String()), "\n")
for _, id := range ids {
if err := run(instances[0], fmt.Sprintf("flynn-host inspect %s", id)); err != nil {
fallback()
return
}
run(instances[0], fmt.Sprintf("flynn-host log --init %s", id))
}
}
printLogs(c.defaultInstances)
if len(c.releaseInstances) > 0 {
printLogs(c.releaseInstances)
}
}
| {
continue
} | conditional_block |
cluster.go | package cluster
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/flynn/flynn/cli/config"
"github.com/flynn/flynn/discoverd/client"
"github.com/flynn/flynn/pkg/iotool"
"github.com/flynn/flynn/pkg/random"
)
type ClusterType uint8
const (
ClusterTypeDefault ClusterType = iota
ClusterTypeRelease
ClusterTypeNone
)
type BootConfig struct {
User string
Kernel string
Network string
NatIface string
Backend string
}
type Cluster struct {
ID string `json:"id"`
Instances instances `json:"instances"`
BackoffPeriod time.Duration `json:"backoff_period"`
ClusterDomain string `json:"cluster_domain"`
ControllerPin string `json:"controller_pin"`
ControllerKey string `json:"controller_key"`
RouterIP string `json:"router_ip"`
defaultInstances []*Instance
releaseInstances []*Instance
discMtx sync.Mutex
disc *discoverd.Client
bc BootConfig
vm *VMManager
out io.Writer
bridge *Bridge
rootFS string
}
func (c *Cluster) ControllerDomain() string {
return "controller." + c.ClusterDomain
}
type instances []*Instance
func (i instances) Get(id string) (*Instance, error) {
for _, inst := range i {
if inst.ID == id {
return inst, nil
}
}
return nil, fmt.Errorf("no such host: %s", id)
}
func (c *Cluster) discoverdClient(ip string) *discoverd.Client {
c.discMtx.Lock()
defer c.discMtx.Unlock()
if c.disc == nil {
c.disc = discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", ip))
}
return c.disc
}
type Streams struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
func New(bc BootConfig, out io.Writer) *Cluster {
return &Cluster{
ID: random.String(8),
bc: bc,
out: out,
}
}
func BuildFlynn(bc BootConfig, rootFS, commit string, merge bool, out io.Writer) (string, error) {
c := New(bc, out)
defer c.Shutdown()
return c.BuildFlynn(rootFS, commit, merge, false)
}
func (c *Cluster) log(a ...interface{}) (int, error) {
return fmt.Fprintln(c.out, append([]interface{}{"++", time.Now().Format("15:04:05.000")}, a...)...)
}
func (c *Cluster) | (f string, a ...interface{}) (int, error) {
return fmt.Fprintf(c.out, strings.Join([]string{"++", time.Now().Format("15:04:05.000"), f}, " "), a...)
}
func (c *Cluster) BuildFlynn(rootFS, commit string, merge bool, runTests bool) (string, error) {
c.log("Building Flynn...")
if err := c.setup(); err != nil {
return "", err
}
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return "", err
}
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "4096",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false},
},
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, dumpLogs io.Writer, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dumpLogs != nil && len(c.Instances) > 0 {
c.DumpLogs(dumpLogs)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// Clean shutdown requires waiting for that host to unadvertise on discoverd.
// Specifically: Wait for router-api services to disappear to indicate host
// removal (rather than using StreamHostEvents), so that other
// tests won't try and connect to this host via service discovery.
ip := c.defaultInstances[0].IP
events := make(chan *discoverd.Event)
stream, err := c.discoverdClient(ip).Service("router-api").Watch(events)
if err != nil {
return err
}
defer stream.Close()
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
if err := inst.Run(cmd, nil); err != nil {
return err
}
loop:
for {
select {
case event := <-events:
if event.Kind == discoverd.EventKindDown {
break loop
}
case <-time.After(20 * time.Second):
return fmt.Errorf("timed out waiting for host removal")
}
}
return nil
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial {
continue
}
peers = append(peers, fmt.Sprintf("%s=http://%s:2380", inst.ID, inst.IP))
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
EtcdProxy: !inst.initial,
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command string, s *Streams, env map[string]string) error {
if len(c.Instances) == 0 {
return errors.New("no booted servers in cluster")
}
return c.Instances[0].RunWithEnv(command, s, env)
}
func (c *Cluster) CLIConfig() (*config.Config, error) {
conf := &config.Config{}
s := &config.Cluster{
Name: "default",
URL: "https://" + c.ControllerDomain(),
Key: c.ControllerKey,
GitHost: c.ClusterDomain + ":2222",
TLSPin: c.ControllerPin,
}
if err := conf.Add(s, true /*force*/); err != nil {
return nil, err
}
return conf, nil
}
func (c *Cluster) Shutdown() {
for i, inst := range c.Instances {
c.logf("killing instance %d [id: %s]\n", i, inst.ID)
if err := inst.Kill(); err != nil {
c.logf("error killing instance %d: %s\n", i, err)
}
}
if c.bridge != nil {
c.logf("deleting network bridge %s\n", c.bridge.name)
if err := deleteBridge(c.bridge); err != nil {
c.logf("error deleting network bridge %s: %s\n", c.bridge.name, err)
}
c.bridge = nil
}
}
var flynnBuildScript = template.Must(template.New("flynn-build").Parse(`
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
if [ ! -d $flynn ]; then
git clone https://github.com/flynn/flynn $flynn
fi
cd $flynn
# Also fetch Github PR commits
if ! git config --get-all remote.origin.fetch | grep -q '^+refs/pull'; then
git config --add remote.origin.fetch '+refs/pull/*/head:refs/remotes/origin/pr/*'
fi
git fetch
git checkout --quiet {{ .Commit }}
{{ if .Merge }}
git config user.email "[email protected]"
git config user.name "CI"
git merge origin/master
{{ end }}
test/scripts/wait-for-docker
make
if [[ -f test/scripts/debug-info.sh ]]; then
sudo cp test/scripts/debug-info.sh /usr/local/bin/debug-info.sh
fi
sudo cp host/bin/flynn-* /usr/local/bin
sudo cp host/bin/manifest.json /etc/flynn-host.json
sudo cp bootstrap/bin/manifest.json /etc/flynn-bootstrap.json
`[1:]))
type buildData struct {
Commit string
Merge bool
}
func buildFlynn(inst *Instance, commit string, merge bool, out io.Writer) error {
var b bytes.Buffer
flynnBuildScript.Execute(&b, buildData{commit, merge})
return inst.RunWithTimeout("bash", &Streams{Stdin: &b, Stdout: out, Stderr: out}, 30*time.Minute)
}
var flynnUnitTestScript = `
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
cd $flynn
if [[ -f test/scripts/test-unit.sh ]]; then
timeout --signal=QUIT --kill-after=10 5m test/scripts/test-unit.sh
fi
`[1:]
func runUnitTests(inst *Instance, out io.Writer) error {
return inst.Run("bash", &Streams{Stdin: bytes.NewBufferString(flynnUnitTestScript), Stdout: out, Stderr: out})
}
type hostScriptData struct {
ID string
IP string
Peers string
EtcdProxy bool
}
var flynnHostScripts = map[string]*template.Template{
"libvirt-lxc": template.Must(template.New("flynn-host-libvirt").Parse(`
if [[ -f /usr/local/bin/debug-info.sh ]]; then
/usr/local/bin/debug-info.sh &>/tmp/debug-info.log &
fi
sudo start-stop-daemon \
--start \
--background \
--no-close \
--make-pidfile \
--pidfile /var/run/flynn-host.pid \
--exec /usr/bin/env \
-- \
ETCD_NAME={{ .ID }} \
ETCD_INITIAL_CLUSTER={{ .Peers }} \
ETCD_INITIAL_CLUSTER_STATE=new \
{{ if .EtcdProxy }} ETCD_PROXY=on {{ end }} \
flynn-host \
daemon \
--id {{ .ID }} \
--manifest /etc/flynn-host.json \
--external {{ .IP }} \
--force \
--backend libvirt-lxc \
&>/tmp/flynn-host.log
`[1:])),
}
type bootstrapMsg struct {
Id string `json:"id"`
State string `json:"state"`
Data json.RawMessage `json:"data"`
Error string `json:"error"`
}
type controllerCert struct {
Pin string `json:"pin"`
}
func (c *Cluster) bootstrapLayer1(instances []*Instance) error {
inst := instances[0]
c.ClusterDomain = fmt.Sprintf("flynn-%s.local", random.String(16))
c.ControllerKey = random.String(16)
c.BackoffPeriod = 5 * time.Second
rd, wr := io.Pipe()
var cmdErr error
go func() {
command := fmt.Sprintf(
"DISCOVERD=%s:1111 CLUSTER_DOMAIN=%s CONTROLLER_KEY=%s BACKOFF_PERIOD=%fs flynn-host bootstrap --json --min-hosts=%d /etc/flynn-bootstrap.json",
inst.IP, c.ClusterDomain, c.ControllerKey, c.BackoffPeriod.Seconds(), len(instances),
)
cmdErr = inst.Run(command, &Streams{Stdout: wr, Stderr: os.Stderr})
wr.Close()
}()
// grab the controller tls pin from the bootstrap output
var cert controllerCert
dec := json.NewDecoder(rd)
for {
var msg bootstrapMsg
if err := dec.Decode(&msg); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("failed to parse bootstrap JSON output: %s", err)
}
c.log("bootstrap ===>", msg.Id, msg.State)
if msg.State == "error" {
c.log(msg.Error)
}
if msg.Id == "controller-cert" && msg.State == "done" {
json.Unmarshal(msg.Data, &cert)
}
}
if cmdErr != nil {
return cmdErr
}
if cert.Pin == "" {
return errors.New("could not determine controller cert from bootstrap output")
}
c.ControllerPin = cert.Pin
// grab the router IP from discoverd
disc := discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", inst.IP))
leader, err := disc.Service("router-api").Leader()
if err != nil {
return fmt.Errorf("could not detect router ip: %s", err)
}
if err = setLocalDNS([]string{c.ClusterDomain, c.ControllerDomain()}, leader.Host()); err != nil {
return fmt.Errorf("could not set cluster DNS entries: %s", err)
}
c.RouterIP = leader.Host()
return nil
}
func setLocalDNS(domains []string, ip string) error {
command := fmt.Sprintf(
`grep -q "^%[1]s" /etc/hosts && sed "s/^%[1]s.*/%[1]s %s/" -i /etc/hosts || echo %[1]s %s >> /etc/hosts`,
ip, strings.Join(domains, " "),
)
cmd := exec.Command("bash", "-c", command)
return cmd.Run()
}
func lookupUser(name string) (int, int, error) {
u, err := user.Lookup(name)
if err != nil {
return 0, 0, err
}
uid, _ := strconv.Atoi(u.Uid)
gid, _ := strconv.Atoi(u.Gid)
return uid, gid, nil
}
func (c *Cluster) DumpLogs(w io.Writer) {
tw := iotool.NewTimeoutWriter(w, 60*time.Second)
c.dumpLogs(tw)
tw.Finished()
}
func (c *Cluster) dumpLogs(w io.Writer) {
streams := &Streams{Stdout: w, Stderr: w}
run := func(inst *Instance, cmd string) error {
fmt.Fprint(w, "\n\n***** ***** ***** ***** ***** ***** ***** ***** ***** *****\n\n")
fmt.Fprintln(w, "HostID:", inst.ID, "-", cmd)
fmt.Fprintln(w)
err := inst.Run(cmd, streams)
fmt.Fprintln(w)
return err
}
fmt.Fprint(w, "\n\n***** ***** ***** DUMPING ALL LOGS ***** ***** *****\n\n")
for _, inst := range c.Instances {
run(inst, "ps faux")
run(inst, "cat /tmp/flynn-host.log")
run(inst, "cat /tmp/debug-info.log")
run(inst, "sudo cat /var/log/libvirt/libvirtd.log")
}
printLogs := func(instances []*Instance) {
fallback := func() {
fmt.Fprintf(w, "\n*** Error getting job logs via flynn-host, falling back to tail log dump\n\n")
for _, inst := range instances {
run(inst, "sudo bash -c 'tail -n +1 /var/log/flynn/**/*.log'")
}
}
run(instances[0], "flynn-host ps -a")
var out bytes.Buffer
if err := instances[0].Run("flynn-host ps -a -q", &Streams{Stdout: &out, Stderr: w}); err != nil {
io.Copy(w, &out)
fallback()
return
}
ids := strings.Split(strings.TrimSpace(out.String()), "\n")
for _, id := range ids {
if err := run(instances[0], fmt.Sprintf("flynn-host inspect %s", id)); err != nil {
fallback()
return
}
run(instances[0], fmt.Sprintf("flynn-host log --init %s", id))
}
}
printLogs(c.defaultInstances)
if len(c.releaseInstances) > 0 {
printLogs(c.releaseInstances)
}
}
| logf | identifier_name |
cluster.go | package cluster
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/flynn/flynn/cli/config"
"github.com/flynn/flynn/discoverd/client"
"github.com/flynn/flynn/pkg/iotool"
"github.com/flynn/flynn/pkg/random"
)
type ClusterType uint8
const (
ClusterTypeDefault ClusterType = iota
ClusterTypeRelease
ClusterTypeNone
)
type BootConfig struct {
User string
Kernel string
Network string
NatIface string
Backend string
}
type Cluster struct {
ID string `json:"id"`
Instances instances `json:"instances"`
BackoffPeriod time.Duration `json:"backoff_period"`
ClusterDomain string `json:"cluster_domain"`
ControllerPin string `json:"controller_pin"`
ControllerKey string `json:"controller_key"`
RouterIP string `json:"router_ip"`
defaultInstances []*Instance
releaseInstances []*Instance
discMtx sync.Mutex
disc *discoverd.Client
bc BootConfig
vm *VMManager
out io.Writer
bridge *Bridge
rootFS string
}
func (c *Cluster) ControllerDomain() string {
return "controller." + c.ClusterDomain
}
type instances []*Instance
func (i instances) Get(id string) (*Instance, error) {
for _, inst := range i {
if inst.ID == id {
return inst, nil
}
}
return nil, fmt.Errorf("no such host: %s", id)
}
func (c *Cluster) discoverdClient(ip string) *discoverd.Client {
c.discMtx.Lock()
defer c.discMtx.Unlock()
if c.disc == nil {
c.disc = discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", ip))
}
return c.disc
}
type Streams struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
func New(bc BootConfig, out io.Writer) *Cluster {
return &Cluster{
ID: random.String(8),
bc: bc,
out: out,
}
}
func BuildFlynn(bc BootConfig, rootFS, commit string, merge bool, out io.Writer) (string, error) {
c := New(bc, out)
defer c.Shutdown()
return c.BuildFlynn(rootFS, commit, merge, false)
}
func (c *Cluster) log(a ...interface{}) (int, error) {
return fmt.Fprintln(c.out, append([]interface{}{"++", time.Now().Format("15:04:05.000")}, a...)...)
}
func (c *Cluster) logf(f string, a ...interface{}) (int, error) {
return fmt.Fprintf(c.out, strings.Join([]string{"++", time.Now().Format("15:04:05.000"), f}, " "), a...)
}
func (c *Cluster) BuildFlynn(rootFS, commit string, merge bool, runTests bool) (string, error) |
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, dumpLogs io.Writer, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dumpLogs != nil && len(c.Instances) > 0 {
c.DumpLogs(dumpLogs)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// Clean shutdown requires waiting for that host to unadvertise on discoverd.
// Specifically: Wait for router-api services to disappear to indicate host
// removal (rather than using StreamHostEvents), so that other
// tests won't try and connect to this host via service discovery.
ip := c.defaultInstances[0].IP
events := make(chan *discoverd.Event)
stream, err := c.discoverdClient(ip).Service("router-api").Watch(events)
if err != nil {
return err
}
defer stream.Close()
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
if err := inst.Run(cmd, nil); err != nil {
return err
}
loop:
for {
select {
case event := <-events:
if event.Kind == discoverd.EventKindDown {
break loop
}
case <-time.After(20 * time.Second):
return fmt.Errorf("timed out waiting for host removal")
}
}
return nil
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial {
continue
}
peers = append(peers, fmt.Sprintf("%s=http://%s:2380", inst.ID, inst.IP))
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
EtcdProxy: !inst.initial,
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command string, s *Streams, env map[string]string) error {
if len(c.Instances) == 0 {
return errors.New("no booted servers in cluster")
}
return c.Instances[0].RunWithEnv(command, s, env)
}
func (c *Cluster) CLIConfig() (*config.Config, error) {
conf := &config.Config{}
s := &config.Cluster{
Name: "default",
URL: "https://" + c.ControllerDomain(),
Key: c.ControllerKey,
GitHost: c.ClusterDomain + ":2222",
TLSPin: c.ControllerPin,
}
if err := conf.Add(s, true /*force*/); err != nil {
return nil, err
}
return conf, nil
}
func (c *Cluster) Shutdown() {
for i, inst := range c.Instances {
c.logf("killing instance %d [id: %s]\n", i, inst.ID)
if err := inst.Kill(); err != nil {
c.logf("error killing instance %d: %s\n", i, err)
}
}
if c.bridge != nil {
c.logf("deleting network bridge %s\n", c.bridge.name)
if err := deleteBridge(c.bridge); err != nil {
c.logf("error deleting network bridge %s: %s\n", c.bridge.name, err)
}
c.bridge = nil
}
}
var flynnBuildScript = template.Must(template.New("flynn-build").Parse(`
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
if [ ! -d $flynn ]; then
git clone https://github.com/flynn/flynn $flynn
fi
cd $flynn
# Also fetch Github PR commits
if ! git config --get-all remote.origin.fetch | grep -q '^+refs/pull'; then
git config --add remote.origin.fetch '+refs/pull/*/head:refs/remotes/origin/pr/*'
fi
git fetch
git checkout --quiet {{ .Commit }}
{{ if .Merge }}
git config user.email "[email protected]"
git config user.name "CI"
git merge origin/master
{{ end }}
test/scripts/wait-for-docker
make
if [[ -f test/scripts/debug-info.sh ]]; then
sudo cp test/scripts/debug-info.sh /usr/local/bin/debug-info.sh
fi
sudo cp host/bin/flynn-* /usr/local/bin
sudo cp host/bin/manifest.json /etc/flynn-host.json
sudo cp bootstrap/bin/manifest.json /etc/flynn-bootstrap.json
`[1:]))
type buildData struct {
Commit string
Merge bool
}
func buildFlynn(inst *Instance, commit string, merge bool, out io.Writer) error {
var b bytes.Buffer
flynnBuildScript.Execute(&b, buildData{commit, merge})
return inst.RunWithTimeout("bash", &Streams{Stdin: &b, Stdout: out, Stderr: out}, 30*time.Minute)
}
var flynnUnitTestScript = `
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
cd $flynn
if [[ -f test/scripts/test-unit.sh ]]; then
timeout --signal=QUIT --kill-after=10 5m test/scripts/test-unit.sh
fi
`[1:]
func runUnitTests(inst *Instance, out io.Writer) error {
return inst.Run("bash", &Streams{Stdin: bytes.NewBufferString(flynnUnitTestScript), Stdout: out, Stderr: out})
}
type hostScriptData struct {
ID string
IP string
Peers string
EtcdProxy bool
}
var flynnHostScripts = map[string]*template.Template{
"libvirt-lxc": template.Must(template.New("flynn-host-libvirt").Parse(`
if [[ -f /usr/local/bin/debug-info.sh ]]; then
/usr/local/bin/debug-info.sh &>/tmp/debug-info.log &
fi
sudo start-stop-daemon \
--start \
--background \
--no-close \
--make-pidfile \
--pidfile /var/run/flynn-host.pid \
--exec /usr/bin/env \
-- \
ETCD_NAME={{ .ID }} \
ETCD_INITIAL_CLUSTER={{ .Peers }} \
ETCD_INITIAL_CLUSTER_STATE=new \
{{ if .EtcdProxy }} ETCD_PROXY=on {{ end }} \
flynn-host \
daemon \
--id {{ .ID }} \
--manifest /etc/flynn-host.json \
--external {{ .IP }} \
--force \
--backend libvirt-lxc \
&>/tmp/flynn-host.log
`[1:])),
}
type bootstrapMsg struct {
Id string `json:"id"`
State string `json:"state"`
Data json.RawMessage `json:"data"`
Error string `json:"error"`
}
type controllerCert struct {
Pin string `json:"pin"`
}
func (c *Cluster) bootstrapLayer1(instances []*Instance) error {
inst := instances[0]
c.ClusterDomain = fmt.Sprintf("flynn-%s.local", random.String(16))
c.ControllerKey = random.String(16)
c.BackoffPeriod = 5 * time.Second
rd, wr := io.Pipe()
var cmdErr error
go func() {
command := fmt.Sprintf(
"DISCOVERD=%s:1111 CLUSTER_DOMAIN=%s CONTROLLER_KEY=%s BACKOFF_PERIOD=%fs flynn-host bootstrap --json --min-hosts=%d /etc/flynn-bootstrap.json",
inst.IP, c.ClusterDomain, c.ControllerKey, c.BackoffPeriod.Seconds(), len(instances),
)
cmdErr = inst.Run(command, &Streams{Stdout: wr, Stderr: os.Stderr})
wr.Close()
}()
// grab the controller tls pin from the bootstrap output
var cert controllerCert
dec := json.NewDecoder(rd)
for {
var msg bootstrapMsg
if err := dec.Decode(&msg); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("failed to parse bootstrap JSON output: %s", err)
}
c.log("bootstrap ===>", msg.Id, msg.State)
if msg.State == "error" {
c.log(msg.Error)
}
if msg.Id == "controller-cert" && msg.State == "done" {
json.Unmarshal(msg.Data, &cert)
}
}
if cmdErr != nil {
return cmdErr
}
if cert.Pin == "" {
return errors.New("could not determine controller cert from bootstrap output")
}
c.ControllerPin = cert.Pin
// grab the router IP from discoverd
disc := discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", inst.IP))
leader, err := disc.Service("router-api").Leader()
if err != nil {
return fmt.Errorf("could not detect router ip: %s", err)
}
if err = setLocalDNS([]string{c.ClusterDomain, c.ControllerDomain()}, leader.Host()); err != nil {
return fmt.Errorf("could not set cluster DNS entries: %s", err)
}
c.RouterIP = leader.Host()
return nil
}
func setLocalDNS(domains []string, ip string) error {
command := fmt.Sprintf(
`grep -q "^%[1]s" /etc/hosts && sed "s/^%[1]s.*/%[1]s %s/" -i /etc/hosts || echo %[1]s %s >> /etc/hosts`,
ip, strings.Join(domains, " "),
)
cmd := exec.Command("bash", "-c", command)
return cmd.Run()
}
func lookupUser(name string) (int, int, error) {
u, err := user.Lookup(name)
if err != nil {
return 0, 0, err
}
uid, _ := strconv.Atoi(u.Uid)
gid, _ := strconv.Atoi(u.Gid)
return uid, gid, nil
}
func (c *Cluster) DumpLogs(w io.Writer) {
tw := iotool.NewTimeoutWriter(w, 60*time.Second)
c.dumpLogs(tw)
tw.Finished()
}
func (c *Cluster) dumpLogs(w io.Writer) {
streams := &Streams{Stdout: w, Stderr: w}
run := func(inst *Instance, cmd string) error {
fmt.Fprint(w, "\n\n***** ***** ***** ***** ***** ***** ***** ***** ***** *****\n\n")
fmt.Fprintln(w, "HostID:", inst.ID, "-", cmd)
fmt.Fprintln(w)
err := inst.Run(cmd, streams)
fmt.Fprintln(w)
return err
}
fmt.Fprint(w, "\n\n***** ***** ***** DUMPING ALL LOGS ***** ***** *****\n\n")
for _, inst := range c.Instances {
run(inst, "ps faux")
run(inst, "cat /tmp/flynn-host.log")
run(inst, "cat /tmp/debug-info.log")
run(inst, "sudo cat /var/log/libvirt/libvirtd.log")
}
printLogs := func(instances []*Instance) {
fallback := func() {
fmt.Fprintf(w, "\n*** Error getting job logs via flynn-host, falling back to tail log dump\n\n")
for _, inst := range instances {
run(inst, "sudo bash -c 'tail -n +1 /var/log/flynn/**/*.log'")
}
}
run(instances[0], "flynn-host ps -a")
var out bytes.Buffer
if err := instances[0].Run("flynn-host ps -a -q", &Streams{Stdout: &out, Stderr: w}); err != nil {
io.Copy(w, &out)
fallback()
return
}
ids := strings.Split(strings.TrimSpace(out.String()), "\n")
for _, id := range ids {
if err := run(instances[0], fmt.Sprintf("flynn-host inspect %s", id)); err != nil {
fallback()
return
}
run(instances[0], fmt.Sprintf("flynn-host log --init %s", id))
}
}
printLogs(c.defaultInstances)
if len(c.releaseInstances) > 0 {
printLogs(c.releaseInstances)
}
}
| {
c.log("Building Flynn...")
if err := c.setup(); err != nil {
return "", err
}
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return "", err
}
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "4096",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false},
},
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
} | identifier_body |
cluster.go | package cluster
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/flynn/flynn/cli/config"
"github.com/flynn/flynn/discoverd/client"
"github.com/flynn/flynn/pkg/iotool"
"github.com/flynn/flynn/pkg/random"
)
type ClusterType uint8
const (
ClusterTypeDefault ClusterType = iota
ClusterTypeRelease
ClusterTypeNone
)
type BootConfig struct {
User string
Kernel string
Network string
NatIface string
Backend string
}
type Cluster struct {
ID string `json:"id"`
Instances instances `json:"instances"`
BackoffPeriod time.Duration `json:"backoff_period"`
ClusterDomain string `json:"cluster_domain"`
ControllerPin string `json:"controller_pin"`
ControllerKey string `json:"controller_key"`
RouterIP string `json:"router_ip"`
defaultInstances []*Instance
releaseInstances []*Instance
discMtx sync.Mutex
disc *discoverd.Client
bc BootConfig
vm *VMManager
out io.Writer
bridge *Bridge
rootFS string
}
func (c *Cluster) ControllerDomain() string {
return "controller." + c.ClusterDomain
}
type instances []*Instance
func (i instances) Get(id string) (*Instance, error) {
for _, inst := range i {
if inst.ID == id {
return inst, nil
}
}
return nil, fmt.Errorf("no such host: %s", id)
}
func (c *Cluster) discoverdClient(ip string) *discoverd.Client {
c.discMtx.Lock()
defer c.discMtx.Unlock()
if c.disc == nil {
c.disc = discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", ip))
}
return c.disc
}
type Streams struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
func New(bc BootConfig, out io.Writer) *Cluster {
return &Cluster{
ID: random.String(8),
bc: bc,
out: out,
}
}
func BuildFlynn(bc BootConfig, rootFS, commit string, merge bool, out io.Writer) (string, error) {
c := New(bc, out)
defer c.Shutdown()
return c.BuildFlynn(rootFS, commit, merge, false)
}
func (c *Cluster) log(a ...interface{}) (int, error) {
return fmt.Fprintln(c.out, append([]interface{}{"++", time.Now().Format("15:04:05.000")}, a...)...)
}
func (c *Cluster) logf(f string, a ...interface{}) (int, error) {
return fmt.Fprintf(c.out, strings.Join([]string{"++", time.Now().Format("15:04:05.000"), f}, " "), a...)
}
func (c *Cluster) BuildFlynn(rootFS, commit string, merge bool, runTests bool) (string, error) {
c.log("Building Flynn...")
if err := c.setup(); err != nil {
return "", err
}
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return "", err
} | User: uid,
Group: gid,
Memory: "4096",
Cores: 8,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: false},
},
})
if err != nil {
return build.Drive("hda").FS, err
}
c.log("Booting build instance...")
if err := build.Start(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error starting build instance: %s", err)
}
c.log("Waiting for instance to boot...")
if err := buildFlynn(build, commit, merge, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("error running build script: %s", err)
}
if runTests {
if err := runUnitTests(build, c.out); err != nil {
build.Kill()
return build.Drive("hda").FS, fmt.Errorf("unit tests failed: %s", err)
}
}
if err := build.Shutdown(); err != nil {
return build.Drive("hda").FS, fmt.Errorf("error while stopping build instance: %s", err)
}
c.rootFS = build.Drive("hda").FS
return c.rootFS, nil
}
type BootResult struct {
ControllerDomain string
ControllerPin string
ControllerKey string
Instances []*Instance
}
func (c *Cluster) Boot(typ ClusterType, count int, dumpLogs io.Writer, killOnFailure bool) (res *BootResult, err error) {
if err := c.setup(); err != nil {
return nil, err
}
defer func() {
if err != nil {
if dumpLogs != nil && len(c.Instances) > 0 {
c.DumpLogs(dumpLogs)
}
if killOnFailure {
c.Shutdown()
}
}
}()
c.log("Booting", count, "VMs")
instances, err := c.startVMs(typ, c.rootFS, count, true)
if err != nil {
return nil, err
}
for _, inst := range instances {
if err := c.startFlynnHost(inst, instances); err != nil {
return nil, err
}
}
c.log("Bootstrapping layer 1...")
if err := c.bootstrapLayer1(instances); err != nil {
return nil, err
}
return &BootResult{
ControllerDomain: c.ControllerDomain(),
ControllerPin: c.ControllerPin,
ControllerKey: c.ControllerKey,
Instances: instances,
}, nil
}
func (c *Cluster) BridgeIP() string {
if c.bridge == nil {
return ""
}
return c.bridge.IP()
}
func (c *Cluster) AddHost() (*Instance, error) {
if c.rootFS == "" {
return nil, errors.New("cluster not yet booted")
}
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeDefault, c.rootFS, 1, false)
if err != nil {
return nil, err
}
inst := instances[0]
if err := c.startFlynnHost(inst, c.defaultInstances); err != nil {
return nil, err
}
return inst, err
}
func (c *Cluster) AddVanillaHost(rootFS string) (*Instance, error) {
c.log("Booting 1 VM")
instances, err := c.startVMs(ClusterTypeNone, rootFS, 1, false)
return instances[0], err
}
// RemoveHost stops flynn-host on the instance but leaves it running so the logs
// are still available if we need to dump them later.
func (c *Cluster) RemoveHost(id string) error {
inst, err := c.Instances.Get(id)
if err != nil {
return err
}
c.log("removing host", id)
// Clean shutdown requires waiting for that host to unadvertise on discoverd.
// Specifically: Wait for router-api services to disappear to indicate host
// removal (rather than using StreamHostEvents), so that other
// tests won't try and connect to this host via service discovery.
ip := c.defaultInstances[0].IP
events := make(chan *discoverd.Event)
stream, err := c.discoverdClient(ip).Service("router-api").Watch(events)
if err != nil {
return err
}
defer stream.Close()
// ssh into the host and tell the flynn-host daemon to stop
var cmd string
switch c.bc.Backend {
case "libvirt-lxc":
// manually kill containers after stopping flynn-host due to https://github.com/flynn/flynn/issues/1177
cmd = "sudo start-stop-daemon --stop --pidfile /var/run/flynn-host.pid --retry 15 && (virsh -c lxc:/// list --name | xargs -L 1 virsh -c lxc:/// destroy || true)"
}
if err := inst.Run(cmd, nil); err != nil {
return err
}
loop:
for {
select {
case event := <-events:
if event.Kind == discoverd.EventKindDown {
break loop
}
case <-time.After(20 * time.Second):
return fmt.Errorf("timed out waiting for host removal")
}
}
return nil
}
func (c *Cluster) Size() int {
return len(c.Instances)
}
func (c *Cluster) startVMs(typ ClusterType, rootFS string, count int, initial bool) ([]*Instance, error) {
uid, gid, err := lookupUser(c.bc.User)
if err != nil {
return nil, err
}
instances := make([]*Instance, count)
for i := 0; i < count; i++ {
inst, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel,
User: uid,
Group: gid,
Memory: "2048",
Cores: 2,
Drives: map[string]*VMDrive{
"hda": {FS: rootFS, COW: true, Temp: true},
},
})
if err != nil {
return nil, fmt.Errorf("error creating instance %d: %s", i, err)
}
if err = inst.Start(); err != nil {
return nil, fmt.Errorf("error starting instance %d: %s", i, err)
}
inst.initial = initial
instances[i] = inst
c.Instances = append(c.Instances, inst)
switch typ {
case ClusterTypeDefault:
c.defaultInstances = append(c.defaultInstances, inst)
case ClusterTypeRelease:
c.releaseInstances = append(c.releaseInstances, inst)
}
}
return instances, nil
}
func (c *Cluster) startFlynnHost(inst *Instance, peerInstances []*Instance) error {
tmpl, ok := flynnHostScripts[c.bc.Backend]
if !ok {
return fmt.Errorf("unknown host backend: %s", c.bc.Backend)
}
peers := make([]string, 0, len(peerInstances))
for _, inst := range peerInstances {
if !inst.initial {
continue
}
peers = append(peers, fmt.Sprintf("%s=http://%s:2380", inst.ID, inst.IP))
}
var script bytes.Buffer
data := hostScriptData{
ID: inst.ID,
IP: inst.IP,
Peers: strings.Join(peers, ","),
EtcdProxy: !inst.initial,
}
tmpl.Execute(&script, data)
c.logf("Starting flynn-host on %s [id: %s]\n", inst.IP, inst.ID)
return inst.Run("bash", &Streams{Stdin: &script, Stdout: c.out, Stderr: os.Stderr})
}
func (c *Cluster) setup() error {
if _, err := os.Stat(c.bc.Kernel); os.IsNotExist(err) {
return fmt.Errorf("cluster: not a kernel file: %s", c.bc.Kernel)
}
if c.bridge == nil {
var err error
name := "flynnbr." + random.String(5)
c.logf("creating network bridge %s\n", name)
c.bridge, err = createBridge(name, c.bc.Network, c.bc.NatIface)
if err != nil {
return fmt.Errorf("could not create network bridge: %s", err)
}
}
c.vm = NewVMManager(c.bridge)
return nil
}
func (c *Cluster) Run(command string, s *Streams) error {
return c.run(command, s, nil)
}
func (c *Cluster) RunWithEnv(command string, s *Streams, env map[string]string) error {
return c.run(command, s, env)
}
func (c *Cluster) run(command string, s *Streams, env map[string]string) error {
if len(c.Instances) == 0 {
return errors.New("no booted servers in cluster")
}
return c.Instances[0].RunWithEnv(command, s, env)
}
func (c *Cluster) CLIConfig() (*config.Config, error) {
conf := &config.Config{}
s := &config.Cluster{
Name: "default",
URL: "https://" + c.ControllerDomain(),
Key: c.ControllerKey,
GitHost: c.ClusterDomain + ":2222",
TLSPin: c.ControllerPin,
}
if err := conf.Add(s, true /*force*/); err != nil {
return nil, err
}
return conf, nil
}
func (c *Cluster) Shutdown() {
for i, inst := range c.Instances {
c.logf("killing instance %d [id: %s]\n", i, inst.ID)
if err := inst.Kill(); err != nil {
c.logf("error killing instance %d: %s\n", i, err)
}
}
if c.bridge != nil {
c.logf("deleting network bridge %s\n", c.bridge.name)
if err := deleteBridge(c.bridge); err != nil {
c.logf("error deleting network bridge %s: %s\n", c.bridge.name, err)
}
c.bridge = nil
}
}
var flynnBuildScript = template.Must(template.New("flynn-build").Parse(`
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
if [ ! -d $flynn ]; then
git clone https://github.com/flynn/flynn $flynn
fi
cd $flynn
# Also fetch Github PR commits
if ! git config --get-all remote.origin.fetch | grep -q '^+refs/pull'; then
git config --add remote.origin.fetch '+refs/pull/*/head:refs/remotes/origin/pr/*'
fi
git fetch
git checkout --quiet {{ .Commit }}
{{ if .Merge }}
git config user.email "[email protected]"
git config user.name "CI"
git merge origin/master
{{ end }}
test/scripts/wait-for-docker
make
if [[ -f test/scripts/debug-info.sh ]]; then
sudo cp test/scripts/debug-info.sh /usr/local/bin/debug-info.sh
fi
sudo cp host/bin/flynn-* /usr/local/bin
sudo cp host/bin/manifest.json /etc/flynn-host.json
sudo cp bootstrap/bin/manifest.json /etc/flynn-bootstrap.json
`[1:]))
type buildData struct {
Commit string
Merge bool
}
func buildFlynn(inst *Instance, commit string, merge bool, out io.Writer) error {
var b bytes.Buffer
flynnBuildScript.Execute(&b, buildData{commit, merge})
return inst.RunWithTimeout("bash", &Streams{Stdin: &b, Stdout: out, Stderr: out}, 30*time.Minute)
}
var flynnUnitTestScript = `
#!/bin/bash
set -e -x
export GOPATH=~/go
flynn=$GOPATH/src/github.com/flynn/flynn
cd $flynn
if [[ -f test/scripts/test-unit.sh ]]; then
timeout --signal=QUIT --kill-after=10 5m test/scripts/test-unit.sh
fi
`[1:]
func runUnitTests(inst *Instance, out io.Writer) error {
return inst.Run("bash", &Streams{Stdin: bytes.NewBufferString(flynnUnitTestScript), Stdout: out, Stderr: out})
}
type hostScriptData struct {
ID string
IP string
Peers string
EtcdProxy bool
}
var flynnHostScripts = map[string]*template.Template{
"libvirt-lxc": template.Must(template.New("flynn-host-libvirt").Parse(`
if [[ -f /usr/local/bin/debug-info.sh ]]; then
/usr/local/bin/debug-info.sh &>/tmp/debug-info.log &
fi
sudo start-stop-daemon \
--start \
--background \
--no-close \
--make-pidfile \
--pidfile /var/run/flynn-host.pid \
--exec /usr/bin/env \
-- \
ETCD_NAME={{ .ID }} \
ETCD_INITIAL_CLUSTER={{ .Peers }} \
ETCD_INITIAL_CLUSTER_STATE=new \
{{ if .EtcdProxy }} ETCD_PROXY=on {{ end }} \
flynn-host \
daemon \
--id {{ .ID }} \
--manifest /etc/flynn-host.json \
--external {{ .IP }} \
--force \
--backend libvirt-lxc \
&>/tmp/flynn-host.log
`[1:])),
}
type bootstrapMsg struct {
Id string `json:"id"`
State string `json:"state"`
Data json.RawMessage `json:"data"`
Error string `json:"error"`
}
type controllerCert struct {
Pin string `json:"pin"`
}
func (c *Cluster) bootstrapLayer1(instances []*Instance) error {
inst := instances[0]
c.ClusterDomain = fmt.Sprintf("flynn-%s.local", random.String(16))
c.ControllerKey = random.String(16)
c.BackoffPeriod = 5 * time.Second
rd, wr := io.Pipe()
var cmdErr error
go func() {
command := fmt.Sprintf(
"DISCOVERD=%s:1111 CLUSTER_DOMAIN=%s CONTROLLER_KEY=%s BACKOFF_PERIOD=%fs flynn-host bootstrap --json --min-hosts=%d /etc/flynn-bootstrap.json",
inst.IP, c.ClusterDomain, c.ControllerKey, c.BackoffPeriod.Seconds(), len(instances),
)
cmdErr = inst.Run(command, &Streams{Stdout: wr, Stderr: os.Stderr})
wr.Close()
}()
// grab the controller tls pin from the bootstrap output
var cert controllerCert
dec := json.NewDecoder(rd)
for {
var msg bootstrapMsg
if err := dec.Decode(&msg); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("failed to parse bootstrap JSON output: %s", err)
}
c.log("bootstrap ===>", msg.Id, msg.State)
if msg.State == "error" {
c.log(msg.Error)
}
if msg.Id == "controller-cert" && msg.State == "done" {
json.Unmarshal(msg.Data, &cert)
}
}
if cmdErr != nil {
return cmdErr
}
if cert.Pin == "" {
return errors.New("could not determine controller cert from bootstrap output")
}
c.ControllerPin = cert.Pin
// grab the router IP from discoverd
disc := discoverd.NewClientWithURL(fmt.Sprintf("http://%s:1111", inst.IP))
leader, err := disc.Service("router-api").Leader()
if err != nil {
return fmt.Errorf("could not detect router ip: %s", err)
}
if err = setLocalDNS([]string{c.ClusterDomain, c.ControllerDomain()}, leader.Host()); err != nil {
return fmt.Errorf("could not set cluster DNS entries: %s", err)
}
c.RouterIP = leader.Host()
return nil
}
func setLocalDNS(domains []string, ip string) error {
command := fmt.Sprintf(
`grep -q "^%[1]s" /etc/hosts && sed "s/^%[1]s.*/%[1]s %s/" -i /etc/hosts || echo %[1]s %s >> /etc/hosts`,
ip, strings.Join(domains, " "),
)
cmd := exec.Command("bash", "-c", command)
return cmd.Run()
}
func lookupUser(name string) (int, int, error) {
u, err := user.Lookup(name)
if err != nil {
return 0, 0, err
}
uid, _ := strconv.Atoi(u.Uid)
gid, _ := strconv.Atoi(u.Gid)
return uid, gid, nil
}
func (c *Cluster) DumpLogs(w io.Writer) {
tw := iotool.NewTimeoutWriter(w, 60*time.Second)
c.dumpLogs(tw)
tw.Finished()
}
func (c *Cluster) dumpLogs(w io.Writer) {
streams := &Streams{Stdout: w, Stderr: w}
run := func(inst *Instance, cmd string) error {
fmt.Fprint(w, "\n\n***** ***** ***** ***** ***** ***** ***** ***** ***** *****\n\n")
fmt.Fprintln(w, "HostID:", inst.ID, "-", cmd)
fmt.Fprintln(w)
err := inst.Run(cmd, streams)
fmt.Fprintln(w)
return err
}
fmt.Fprint(w, "\n\n***** ***** ***** DUMPING ALL LOGS ***** ***** *****\n\n")
for _, inst := range c.Instances {
run(inst, "ps faux")
run(inst, "cat /tmp/flynn-host.log")
run(inst, "cat /tmp/debug-info.log")
run(inst, "sudo cat /var/log/libvirt/libvirtd.log")
}
printLogs := func(instances []*Instance) {
fallback := func() {
fmt.Fprintf(w, "\n*** Error getting job logs via flynn-host, falling back to tail log dump\n\n")
for _, inst := range instances {
run(inst, "sudo bash -c 'tail -n +1 /var/log/flynn/**/*.log'")
}
}
run(instances[0], "flynn-host ps -a")
var out bytes.Buffer
if err := instances[0].Run("flynn-host ps -a -q", &Streams{Stdout: &out, Stderr: w}); err != nil {
io.Copy(w, &out)
fallback()
return
}
ids := strings.Split(strings.TrimSpace(out.String()), "\n")
for _, id := range ids {
if err := run(instances[0], fmt.Sprintf("flynn-host inspect %s", id)); err != nil {
fallback()
return
}
run(instances[0], fmt.Sprintf("flynn-host log --init %s", id))
}
}
printLogs(c.defaultInstances)
if len(c.releaseInstances) > 0 {
printLogs(c.releaseInstances)
}
} |
build, err := c.vm.NewInstance(&VMConfig{
Kernel: c.bc.Kernel, | random_line_split |
electron-oidc.ts | import queryString from 'querystring';
import fetch from 'node-fetch';
import nodeUrl from 'url';
import {BrowserWindowConstructorOptions, Event as ElectronEvent} from 'electron';
import crypto from 'crypto';
import Bluebird from 'bluebird';
import {IOidcConfig, ITokenObject} from '../src/contracts/index';
// eslint-disable-next-line @typescript-eslint/no-require-imports
import electron = require('electron');
const BrowserWindow = electron.BrowserWindow || electron.remote.BrowserWindow;
const authoritiesToRefresh: Array<string> = [];
const refreshTimeouts: Map<string, any> = new Map();
export default (
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): {
getTokenObject: (authorityUrl: string) => Promise<ITokenObject>;
logout: (tokenObject: ITokenObject, authorityUrl: string) => Promise<boolean>;
startSilentRefreshing: (authorityUrl: string, tokenObject: ITokenObject, refreshCallback: Function) => void;
} => {
function getTokenObjectForAuthorityUrl(authorityUrl): Promise<any> {
return getTokenObject(authorityUrl, config, windowParams);
}
function logoutViaTokenObjectAndAuthorityUrl(tokenObject: ITokenObject, authorityUrl: string): Promise<boolean> {
return logout(tokenObject, authorityUrl, config, windowParams);
}
function refreshTokenViaSilentRefresh(
authorityUrl: string,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
return startSilentRefreshing(authorityUrl, config, tokenObject, refreshCallback);
}
return {
getTokenObject: getTokenObjectForAuthorityUrl,
logout: logoutViaTokenObjectAndAuthorityUrl,
startSilentRefreshing: refreshTokenViaSilentRefresh,
};
};
function getTokenObject(
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<{
idToken: string;
accessToken: string;
}> {
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri, | nonce: getRandomString(16),
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
return new Promise((resolve: Function, reject: Function): void => {
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow(windowParams || {useContentSize: true});
authWindow.loadURL(urlToLoad);
authWindow.show();
// Reject the Promise when the user closes the new window.
authWindow.on('closed', (): void => {
reject(new Error('window was closed by user'));
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
redirectCallback(url, authWindow, config, resolve, reject);
});
});
}
// Handle the different callbacks.
function redirectCallback(
url: string,
authWindow: electron.BrowserWindow,
config: IOidcConfig,
resolve: Function,
reject: Function,
): void {
// Parse callback url into its parts.
const urlParts = nodeUrl.parse(url, true);
const href = urlParts.href;
/**
* If there was an error:
* - Reject the promise with the error.
* - Close the window.
*
* If the href includes the callback uri:
* - Load that href in the window.
*
* If the href includes the specified redirect uri:
* - Parse the hash into its parts.
* - Add those parts to new object.
* - Resolve the promise with this object.
* - Close the window.
*/
if (href === null) {
reject(new Error(`Could not parse url: ${url}`));
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
} else if (href.includes('/connect/authorize/callback')) {
authWindow.loadURL(href);
} else if (href.includes(config.redirectUri)) {
const identityParameter = urlParts.hash;
const parameterAsArray = identityParameter.split('&');
if (parameterAsArray[0].includes('login_required')) {
reject(new Error('User is no longer logged in.'));
return;
}
const idToken = parameterAsArray[0].split('=')[1];
const accessToken = parameterAsArray[1].split('=')[1];
const expiresIn = parameterAsArray.find((parameter) => parameter.startsWith('expires_in=')).split('=')[1];
const tokenObject = {
idToken,
accessToken,
expiresIn,
};
resolve(tokenObject);
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
}
}
function logout(
tokenObject: ITokenObject,
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<boolean> {
const urlParams = {
id_token_hint: tokenObject.userId,
post_logout_redirect_uri: config.logoutRedirectUri,
};
const endSessionUrl = `${authorityUrl}connect/endsession?${queryString.stringify(urlParams)}`;
stopSilentRefreshing(authorityUrl);
return new Promise(
async (resolve: Function): Promise<void> => {
const response: fetch.Response = await fetch(endSessionUrl);
const logoutWindow = new BrowserWindow(windowParams || {useContentSize: true});
logoutWindow.webContents.on('will-navigate', (event, url) => {
if (url.includes(config.logoutRedirectUri)) {
event.preventDefault();
resolve(true);
logoutWindow.close();
}
});
logoutWindow.on('closed', () => {
resolve(true);
});
logoutWindow.loadURL(response.url);
logoutWindow.show();
},
);
}
async function silentRefresh(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): Promise<void> {
// Token refresh factor is set as described at https://github.com/manfredsteyer/angular-oauth2-oidc/blob/master/docs-src/silent-refresh.md#automatically-refreshing-a-token-when-before-it-expires-code-flow-and-implicit-flow
const tokenRefreshFactor = 0.75;
const secondsInMilisecondsFactor = 1000;
const tokenRefreshInterval = tokenObject.expiresIn * tokenRefreshFactor * secondsInMilisecondsFactor;
const timeout = wait(tokenRefreshInterval);
refreshTimeouts.set(authorityUrl, timeout);
await timeout;
if (!authoritiesToRefresh.includes(authorityUrl)) {
return;
}
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
prompt: 'none',
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow({show: false});
authWindow.loadURL(urlToLoad);
// Throw an error, if the user closes the new window.
authWindow.on('closed', (): void => {
throw new Error('window was closed by user');
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
const redirectCallbackResolved = (token: ITokenObject): void => {
refreshCallback(token);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
};
const redirectCallbackRejected = (error: Error): void => {
if (error.message !== 'User is no longer logged in.') {
throw error;
}
stopSilentRefreshing(authorityUrl);
};
redirectCallback(url, authWindow, config, redirectCallbackResolved, redirectCallbackRejected);
});
}
function startSilentRefreshing(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
authoritiesToRefresh.push(authorityUrl);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
}
function stopSilentRefreshing(authorityUrl: string): void {
if (refreshTimeouts.has(authorityUrl)) {
refreshTimeouts.get(authorityUrl).cancel();
refreshTimeouts.delete(authorityUrl);
}
if (authoritiesToRefresh.includes(authorityUrl)) {
const authorityToRemove = authoritiesToRefresh.findIndex((authority) => authority === authorityUrl);
authoritiesToRefresh.splice(authorityToRemove, 0);
}
}
function wait(ms: number): Promise<void> {
return new Bluebird.Promise((resolve: Function) => {
setTimeout(() => {
resolve();
}, ms);
});
}
function getRandomString(length: number): string {
const charset: string = '0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~';
let result: string = '';
while (length > 0) {
const randomValues: Buffer = crypto.randomBytes(length);
// eslint-disable-next-line no-loop-func
randomValues.forEach((value: number) => {
if (length === 0) {
return;
}
if (value < charset.length) {
result += charset[value];
length--;
}
});
}
return result;
} | response_type: config.responseType,
scope: config.scope,
state: getRandomString(16), | random_line_split |
electron-oidc.ts | import queryString from 'querystring';
import fetch from 'node-fetch';
import nodeUrl from 'url';
import {BrowserWindowConstructorOptions, Event as ElectronEvent} from 'electron';
import crypto from 'crypto';
import Bluebird from 'bluebird';
import {IOidcConfig, ITokenObject} from '../src/contracts/index';
// eslint-disable-next-line @typescript-eslint/no-require-imports
import electron = require('electron');
const BrowserWindow = electron.BrowserWindow || electron.remote.BrowserWindow;
const authoritiesToRefresh: Array<string> = [];
const refreshTimeouts: Map<string, any> = new Map();
export default (
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): {
getTokenObject: (authorityUrl: string) => Promise<ITokenObject>;
logout: (tokenObject: ITokenObject, authorityUrl: string) => Promise<boolean>;
startSilentRefreshing: (authorityUrl: string, tokenObject: ITokenObject, refreshCallback: Function) => void;
} => {
function getTokenObjectForAuthorityUrl(authorityUrl): Promise<any> {
return getTokenObject(authorityUrl, config, windowParams);
}
function logoutViaTokenObjectAndAuthorityUrl(tokenObject: ITokenObject, authorityUrl: string): Promise<boolean> {
return logout(tokenObject, authorityUrl, config, windowParams);
}
function refreshTokenViaSilentRefresh(
authorityUrl: string,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
return startSilentRefreshing(authorityUrl, config, tokenObject, refreshCallback);
}
return {
getTokenObject: getTokenObjectForAuthorityUrl,
logout: logoutViaTokenObjectAndAuthorityUrl,
startSilentRefreshing: refreshTokenViaSilentRefresh,
};
};
function getTokenObject(
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<{
idToken: string;
accessToken: string;
}> {
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
return new Promise((resolve: Function, reject: Function): void => {
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow(windowParams || {useContentSize: true});
authWindow.loadURL(urlToLoad);
authWindow.show();
// Reject the Promise when the user closes the new window.
authWindow.on('closed', (): void => {
reject(new Error('window was closed by user'));
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
redirectCallback(url, authWindow, config, resolve, reject);
});
});
}
// Handle the different callbacks.
function redirectCallback(
url: string,
authWindow: electron.BrowserWindow,
config: IOidcConfig,
resolve: Function,
reject: Function,
): void {
// Parse callback url into its parts.
const urlParts = nodeUrl.parse(url, true);
const href = urlParts.href;
/**
* If there was an error:
* - Reject the promise with the error.
* - Close the window.
*
* If the href includes the callback uri:
* - Load that href in the window.
*
* If the href includes the specified redirect uri:
* - Parse the hash into its parts.
* - Add those parts to new object.
* - Resolve the promise with this object.
* - Close the window.
*/
if (href === null) {
reject(new Error(`Could not parse url: ${url}`));
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
} else if (href.includes('/connect/authorize/callback')) {
authWindow.loadURL(href);
} else if (href.includes(config.redirectUri)) {
const identityParameter = urlParts.hash;
const parameterAsArray = identityParameter.split('&');
if (parameterAsArray[0].includes('login_required')) {
reject(new Error('User is no longer logged in.'));
return;
}
const idToken = parameterAsArray[0].split('=')[1];
const accessToken = parameterAsArray[1].split('=')[1];
const expiresIn = parameterAsArray.find((parameter) => parameter.startsWith('expires_in=')).split('=')[1];
const tokenObject = {
idToken,
accessToken,
expiresIn,
};
resolve(tokenObject);
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
}
}
function | (
tokenObject: ITokenObject,
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<boolean> {
const urlParams = {
id_token_hint: tokenObject.userId,
post_logout_redirect_uri: config.logoutRedirectUri,
};
const endSessionUrl = `${authorityUrl}connect/endsession?${queryString.stringify(urlParams)}`;
stopSilentRefreshing(authorityUrl);
return new Promise(
async (resolve: Function): Promise<void> => {
const response: fetch.Response = await fetch(endSessionUrl);
const logoutWindow = new BrowserWindow(windowParams || {useContentSize: true});
logoutWindow.webContents.on('will-navigate', (event, url) => {
if (url.includes(config.logoutRedirectUri)) {
event.preventDefault();
resolve(true);
logoutWindow.close();
}
});
logoutWindow.on('closed', () => {
resolve(true);
});
logoutWindow.loadURL(response.url);
logoutWindow.show();
},
);
}
async function silentRefresh(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): Promise<void> {
// Token refresh factor is set as described at https://github.com/manfredsteyer/angular-oauth2-oidc/blob/master/docs-src/silent-refresh.md#automatically-refreshing-a-token-when-before-it-expires-code-flow-and-implicit-flow
const tokenRefreshFactor = 0.75;
const secondsInMilisecondsFactor = 1000;
const tokenRefreshInterval = tokenObject.expiresIn * tokenRefreshFactor * secondsInMilisecondsFactor;
const timeout = wait(tokenRefreshInterval);
refreshTimeouts.set(authorityUrl, timeout);
await timeout;
if (!authoritiesToRefresh.includes(authorityUrl)) {
return;
}
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
prompt: 'none',
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow({show: false});
authWindow.loadURL(urlToLoad);
// Throw an error, if the user closes the new window.
authWindow.on('closed', (): void => {
throw new Error('window was closed by user');
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
const redirectCallbackResolved = (token: ITokenObject): void => {
refreshCallback(token);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
};
const redirectCallbackRejected = (error: Error): void => {
if (error.message !== 'User is no longer logged in.') {
throw error;
}
stopSilentRefreshing(authorityUrl);
};
redirectCallback(url, authWindow, config, redirectCallbackResolved, redirectCallbackRejected);
});
}
function startSilentRefreshing(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
authoritiesToRefresh.push(authorityUrl);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
}
function stopSilentRefreshing(authorityUrl: string): void {
if (refreshTimeouts.has(authorityUrl)) {
refreshTimeouts.get(authorityUrl).cancel();
refreshTimeouts.delete(authorityUrl);
}
if (authoritiesToRefresh.includes(authorityUrl)) {
const authorityToRemove = authoritiesToRefresh.findIndex((authority) => authority === authorityUrl);
authoritiesToRefresh.splice(authorityToRemove, 0);
}
}
function wait(ms: number): Promise<void> {
return new Bluebird.Promise((resolve: Function) => {
setTimeout(() => {
resolve();
}, ms);
});
}
function getRandomString(length: number): string {
const charset: string = '0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~';
let result: string = '';
while (length > 0) {
const randomValues: Buffer = crypto.randomBytes(length);
// eslint-disable-next-line no-loop-func
randomValues.forEach((value: number) => {
if (length === 0) {
return;
}
if (value < charset.length) {
result += charset[value];
length--;
}
});
}
return result;
}
| logout | identifier_name |
electron-oidc.ts | import queryString from 'querystring';
import fetch from 'node-fetch';
import nodeUrl from 'url';
import {BrowserWindowConstructorOptions, Event as ElectronEvent} from 'electron';
import crypto from 'crypto';
import Bluebird from 'bluebird';
import {IOidcConfig, ITokenObject} from '../src/contracts/index';
// eslint-disable-next-line @typescript-eslint/no-require-imports
import electron = require('electron');
const BrowserWindow = electron.BrowserWindow || electron.remote.BrowserWindow;
const authoritiesToRefresh: Array<string> = [];
const refreshTimeouts: Map<string, any> = new Map();
export default (
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): {
getTokenObject: (authorityUrl: string) => Promise<ITokenObject>;
logout: (tokenObject: ITokenObject, authorityUrl: string) => Promise<boolean>;
startSilentRefreshing: (authorityUrl: string, tokenObject: ITokenObject, refreshCallback: Function) => void;
} => {
function getTokenObjectForAuthorityUrl(authorityUrl): Promise<any> {
return getTokenObject(authorityUrl, config, windowParams);
}
function logoutViaTokenObjectAndAuthorityUrl(tokenObject: ITokenObject, authorityUrl: string): Promise<boolean> {
return logout(tokenObject, authorityUrl, config, windowParams);
}
function refreshTokenViaSilentRefresh(
authorityUrl: string,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
return startSilentRefreshing(authorityUrl, config, tokenObject, refreshCallback);
}
return {
getTokenObject: getTokenObjectForAuthorityUrl,
logout: logoutViaTokenObjectAndAuthorityUrl,
startSilentRefreshing: refreshTokenViaSilentRefresh,
};
};
function getTokenObject(
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<{
idToken: string;
accessToken: string;
}> {
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
return new Promise((resolve: Function, reject: Function): void => {
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow(windowParams || {useContentSize: true});
authWindow.loadURL(urlToLoad);
authWindow.show();
// Reject the Promise when the user closes the new window.
authWindow.on('closed', (): void => {
reject(new Error('window was closed by user'));
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
redirectCallback(url, authWindow, config, resolve, reject);
});
});
}
// Handle the different callbacks.
function redirectCallback(
url: string,
authWindow: electron.BrowserWindow,
config: IOidcConfig,
resolve: Function,
reject: Function,
): void {
// Parse callback url into its parts.
const urlParts = nodeUrl.parse(url, true);
const href = urlParts.href;
/**
* If there was an error:
* - Reject the promise with the error.
* - Close the window.
*
* If the href includes the callback uri:
* - Load that href in the window.
*
* If the href includes the specified redirect uri:
* - Parse the hash into its parts.
* - Add those parts to new object.
* - Resolve the promise with this object.
* - Close the window.
*/
if (href === null) {
reject(new Error(`Could not parse url: ${url}`));
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
} else if (href.includes('/connect/authorize/callback')) {
authWindow.loadURL(href);
} else if (href.includes(config.redirectUri)) {
const identityParameter = urlParts.hash;
const parameterAsArray = identityParameter.split('&');
if (parameterAsArray[0].includes('login_required')) {
reject(new Error('User is no longer logged in.'));
return;
}
const idToken = parameterAsArray[0].split('=')[1];
const accessToken = parameterAsArray[1].split('=')[1];
const expiresIn = parameterAsArray.find((parameter) => parameter.startsWith('expires_in=')).split('=')[1];
const tokenObject = {
idToken,
accessToken,
expiresIn,
};
resolve(tokenObject);
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
}
}
function logout(
tokenObject: ITokenObject,
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<boolean> {
const urlParams = {
id_token_hint: tokenObject.userId,
post_logout_redirect_uri: config.logoutRedirectUri,
};
const endSessionUrl = `${authorityUrl}connect/endsession?${queryString.stringify(urlParams)}`;
stopSilentRefreshing(authorityUrl);
return new Promise(
async (resolve: Function): Promise<void> => {
const response: fetch.Response = await fetch(endSessionUrl);
const logoutWindow = new BrowserWindow(windowParams || {useContentSize: true});
logoutWindow.webContents.on('will-navigate', (event, url) => {
if (url.includes(config.logoutRedirectUri)) {
event.preventDefault();
resolve(true);
logoutWindow.close();
}
});
logoutWindow.on('closed', () => {
resolve(true);
});
logoutWindow.loadURL(response.url);
logoutWindow.show();
},
);
}
async function silentRefresh(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): Promise<void> |
function startSilentRefreshing(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
authoritiesToRefresh.push(authorityUrl);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
}
function stopSilentRefreshing(authorityUrl: string): void {
if (refreshTimeouts.has(authorityUrl)) {
refreshTimeouts.get(authorityUrl).cancel();
refreshTimeouts.delete(authorityUrl);
}
if (authoritiesToRefresh.includes(authorityUrl)) {
const authorityToRemove = authoritiesToRefresh.findIndex((authority) => authority === authorityUrl);
authoritiesToRefresh.splice(authorityToRemove, 0);
}
}
function wait(ms: number): Promise<void> {
return new Bluebird.Promise((resolve: Function) => {
setTimeout(() => {
resolve();
}, ms);
});
}
function getRandomString(length: number): string {
const charset: string = '0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~';
let result: string = '';
while (length > 0) {
const randomValues: Buffer = crypto.randomBytes(length);
// eslint-disable-next-line no-loop-func
randomValues.forEach((value: number) => {
if (length === 0) {
return;
}
if (value < charset.length) {
result += charset[value];
length--;
}
});
}
return result;
}
| {
// Token refresh factor is set as described at https://github.com/manfredsteyer/angular-oauth2-oidc/blob/master/docs-src/silent-refresh.md#automatically-refreshing-a-token-when-before-it-expires-code-flow-and-implicit-flow
const tokenRefreshFactor = 0.75;
const secondsInMilisecondsFactor = 1000;
const tokenRefreshInterval = tokenObject.expiresIn * tokenRefreshFactor * secondsInMilisecondsFactor;
const timeout = wait(tokenRefreshInterval);
refreshTimeouts.set(authorityUrl, timeout);
await timeout;
if (!authoritiesToRefresh.includes(authorityUrl)) {
return;
}
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
prompt: 'none',
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow({show: false});
authWindow.loadURL(urlToLoad);
// Throw an error, if the user closes the new window.
authWindow.on('closed', (): void => {
throw new Error('window was closed by user');
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
const redirectCallbackResolved = (token: ITokenObject): void => {
refreshCallback(token);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
};
const redirectCallbackRejected = (error: Error): void => {
if (error.message !== 'User is no longer logged in.') {
throw error;
}
stopSilentRefreshing(authorityUrl);
};
redirectCallback(url, authWindow, config, redirectCallbackResolved, redirectCallbackRejected);
});
} | identifier_body |
electron-oidc.ts | import queryString from 'querystring';
import fetch from 'node-fetch';
import nodeUrl from 'url';
import {BrowserWindowConstructorOptions, Event as ElectronEvent} from 'electron';
import crypto from 'crypto';
import Bluebird from 'bluebird';
import {IOidcConfig, ITokenObject} from '../src/contracts/index';
// eslint-disable-next-line @typescript-eslint/no-require-imports
import electron = require('electron');
const BrowserWindow = electron.BrowserWindow || electron.remote.BrowserWindow;
const authoritiesToRefresh: Array<string> = [];
const refreshTimeouts: Map<string, any> = new Map();
export default (
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): {
getTokenObject: (authorityUrl: string) => Promise<ITokenObject>;
logout: (tokenObject: ITokenObject, authorityUrl: string) => Promise<boolean>;
startSilentRefreshing: (authorityUrl: string, tokenObject: ITokenObject, refreshCallback: Function) => void;
} => {
function getTokenObjectForAuthorityUrl(authorityUrl): Promise<any> {
return getTokenObject(authorityUrl, config, windowParams);
}
function logoutViaTokenObjectAndAuthorityUrl(tokenObject: ITokenObject, authorityUrl: string): Promise<boolean> {
return logout(tokenObject, authorityUrl, config, windowParams);
}
function refreshTokenViaSilentRefresh(
authorityUrl: string,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
return startSilentRefreshing(authorityUrl, config, tokenObject, refreshCallback);
}
return {
getTokenObject: getTokenObjectForAuthorityUrl,
logout: logoutViaTokenObjectAndAuthorityUrl,
startSilentRefreshing: refreshTokenViaSilentRefresh,
};
};
function getTokenObject(
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<{
idToken: string;
accessToken: string;
}> {
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
return new Promise((resolve: Function, reject: Function): void => {
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow(windowParams || {useContentSize: true});
authWindow.loadURL(urlToLoad);
authWindow.show();
// Reject the Promise when the user closes the new window.
authWindow.on('closed', (): void => {
reject(new Error('window was closed by user'));
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
redirectCallback(url, authWindow, config, resolve, reject);
});
});
}
// Handle the different callbacks.
function redirectCallback(
url: string,
authWindow: electron.BrowserWindow,
config: IOidcConfig,
resolve: Function,
reject: Function,
): void {
// Parse callback url into its parts.
const urlParts = nodeUrl.parse(url, true);
const href = urlParts.href;
/**
* If there was an error:
* - Reject the promise with the error.
* - Close the window.
*
* If the href includes the callback uri:
* - Load that href in the window.
*
* If the href includes the specified redirect uri:
* - Parse the hash into its parts.
* - Add those parts to new object.
* - Resolve the promise with this object.
* - Close the window.
*/
if (href === null) {
reject(new Error(`Could not parse url: ${url}`));
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
} else if (href.includes('/connect/authorize/callback')) {
authWindow.loadURL(href);
} else if (href.includes(config.redirectUri)) {
const identityParameter = urlParts.hash;
const parameterAsArray = identityParameter.split('&');
if (parameterAsArray[0].includes('login_required')) {
reject(new Error('User is no longer logged in.'));
return;
}
const idToken = parameterAsArray[0].split('=')[1];
const accessToken = parameterAsArray[1].split('=')[1];
const expiresIn = parameterAsArray.find((parameter) => parameter.startsWith('expires_in=')).split('=')[1];
const tokenObject = {
idToken,
accessToken,
expiresIn,
};
resolve(tokenObject);
authWindow.removeAllListeners('closed');
setImmediate(() => {
authWindow.close();
});
}
}
function logout(
tokenObject: ITokenObject,
authorityUrl: string,
config: IOidcConfig,
windowParams: BrowserWindowConstructorOptions,
): Promise<boolean> {
const urlParams = {
id_token_hint: tokenObject.userId,
post_logout_redirect_uri: config.logoutRedirectUri,
};
const endSessionUrl = `${authorityUrl}connect/endsession?${queryString.stringify(urlParams)}`;
stopSilentRefreshing(authorityUrl);
return new Promise(
async (resolve: Function): Promise<void> => {
const response: fetch.Response = await fetch(endSessionUrl);
const logoutWindow = new BrowserWindow(windowParams || {useContentSize: true});
logoutWindow.webContents.on('will-navigate', (event, url) => {
if (url.includes(config.logoutRedirectUri)) {
event.preventDefault();
resolve(true);
logoutWindow.close();
}
});
logoutWindow.on('closed', () => {
resolve(true);
});
logoutWindow.loadURL(response.url);
logoutWindow.show();
},
);
}
async function silentRefresh(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): Promise<void> {
// Token refresh factor is set as described at https://github.com/manfredsteyer/angular-oauth2-oidc/blob/master/docs-src/silent-refresh.md#automatically-refreshing-a-token-when-before-it-expires-code-flow-and-implicit-flow
const tokenRefreshFactor = 0.75;
const secondsInMilisecondsFactor = 1000;
const tokenRefreshInterval = tokenObject.expiresIn * tokenRefreshFactor * secondsInMilisecondsFactor;
const timeout = wait(tokenRefreshInterval);
refreshTimeouts.set(authorityUrl, timeout);
await timeout;
if (!authoritiesToRefresh.includes(authorityUrl)) {
return;
}
// Build the Url Params from the Config.
const urlParams = {
client_id: config.clientId,
redirect_uri: config.redirectUri,
response_type: config.responseType,
scope: config.scope,
state: getRandomString(16),
nonce: getRandomString(16),
prompt: 'none',
};
const urlToLoad: string = `${authorityUrl}connect/authorize?${queryString.stringify(urlParams)}`;
// Open a new browser window and load the previously constructed url.
const authWindow = new BrowserWindow({show: false});
authWindow.loadURL(urlToLoad);
// Throw an error, if the user closes the new window.
authWindow.on('closed', (): void => {
throw new Error('window was closed by user');
});
/**
* This will trigger everytime the new window will redirect.
* Important: Not AFTER it redirects but BEFORE.
* This gives us the possibility to intercept the redirect to
* the specified redirect uri, which would lead to faulty behaviour
* due to security aspects in chromium.
*
* If that redirect would start we stop it by preventing the default
* behaviour and instead parse its parameters in the
* "onCallback"-function.
*/
authWindow.webContents.on('will-redirect', (event: ElectronEvent, url: string): void => {
if (url.includes(config.redirectUri)) {
event.preventDefault();
}
const redirectCallbackResolved = (token: ITokenObject): void => {
refreshCallback(token);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
};
const redirectCallbackRejected = (error: Error): void => {
if (error.message !== 'User is no longer logged in.') |
stopSilentRefreshing(authorityUrl);
};
redirectCallback(url, authWindow, config, redirectCallbackResolved, redirectCallbackRejected);
});
}
function startSilentRefreshing(
authorityUrl: string,
config: IOidcConfig,
tokenObject: ITokenObject,
refreshCallback: Function,
): void {
authoritiesToRefresh.push(authorityUrl);
silentRefresh(authorityUrl, config, tokenObject, refreshCallback);
}
function stopSilentRefreshing(authorityUrl: string): void {
if (refreshTimeouts.has(authorityUrl)) {
refreshTimeouts.get(authorityUrl).cancel();
refreshTimeouts.delete(authorityUrl);
}
if (authoritiesToRefresh.includes(authorityUrl)) {
const authorityToRemove = authoritiesToRefresh.findIndex((authority) => authority === authorityUrl);
authoritiesToRefresh.splice(authorityToRemove, 0);
}
}
function wait(ms: number): Promise<void> {
return new Bluebird.Promise((resolve: Function) => {
setTimeout(() => {
resolve();
}, ms);
});
}
function getRandomString(length: number): string {
const charset: string = '0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~';
let result: string = '';
while (length > 0) {
const randomValues: Buffer = crypto.randomBytes(length);
// eslint-disable-next-line no-loop-func
randomValues.forEach((value: number) => {
if (length === 0) {
return;
}
if (value < charset.length) {
result += charset[value];
length--;
}
});
}
return result;
}
| {
throw error;
} | conditional_block |
clusterapi_utils.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterapi
import (
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
cpuKey = "capacity.cluster-autoscaler.kubernetes.io/cpu"
memoryKey = "capacity.cluster-autoscaler.kubernetes.io/memory"
diskCapacityKey = "capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk"
gpuTypeKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-type"
gpuCountKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-count"
maxPodsKey = "capacity.cluster-autoscaler.kubernetes.io/maxPods"
taintsKey = "capacity.cluster-autoscaler.kubernetes.io/taints"
labelsKey = "capacity.cluster-autoscaler.kubernetes.io/labels"
)
var (
// clusterNameLabel is the label applied to objects(Machine, MachineSet, MachineDeployment)
// to identify which cluster they are owned by. Because the label can be
// affected by the CAPI_GROUP environment variable, it is initialized here.
clusterNameLabel = getClusterNameLabel()
// errMissingMinAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMinSizeAnnotationKey.
errMissingMinAnnotation = errors.New("missing min annotation")
// errMissingMaxAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMaxSizeAnnotationKey.
errMissingMaxAnnotation = errors.New("missing max annotation")
// errInvalidMinAnnotationValue is the error returned when a
// machine set has a non-integral min annotation value.
errInvalidMinAnnotation = errors.New("invalid min annotation")
// errInvalidMaxAnnotationValue is the error returned when a
// machine set has a non-integral max annotation value.
errInvalidMaxAnnotation = errors.New("invalid max annotation")
// machineDeleteAnnotationKey is the annotation used by cluster-api to indicate
// that a machine should be deleted. Because this key can be affected by the
// CAPI_GROUP env variable, it is initialized here.
machineDeleteAnnotationKey = getMachineDeleteAnnotationKey()
// machineAnnotationKey is the annotation used by the cluster-api on Node objects
// to specify the name of the related Machine object. Because this can be affected
// by the CAPI_GROUP env variable, it is initialized here.
machineAnnotationKey = getMachineAnnotationKey()
// nodeGroupMinSizeAnnotationKey and nodeGroupMaxSizeAnnotationKey are the keys
// used in MachineSet and MachineDeployment annotations to specify the limits
// for the node group. Because the keys can be affected by the CAPI_GROUP env
// variable, they are initialized here.
nodeGroupMinSizeAnnotationKey = getNodeGroupMinSizeAnnotationKey()
nodeGroupMaxSizeAnnotationKey = getNodeGroupMaxSizeAnnotationKey()
zeroQuantity = resource.MustParse("0")
)
type normalizedProviderID string
// minSize returns the minimum value encoded in the annotations keyed
// by nodeGroupMinSizeAnnotationKey. Returns errMissingMinAnnotation
// if the annotation doesn't exist or errInvalidMinAnnotation if the
// value is not of type int.
func minSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMinSizeAnnotationKey]
if !found {
return 0, errMissingMinAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMinAnnotation)
}
return i, nil
}
// maxSize returns the maximum value encoded in the annotations keyed
// by nodeGroupMaxSizeAnnotationKey. Returns errMissingMaxAnnotation
// if the annotation doesn't exist or errInvalidMaxAnnotation if the
// value is not of type int.
func maxSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMaxSizeAnnotationKey]
if !found {
return 0, errMissingMaxAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMaxAnnotation)
}
return i, nil
}
func parseScalingBounds(annotations map[string]string) (int, int, error) {
minSize, err := minSize(annotations)
if err != nil && err != errMissingMinAnnotation {
return 0, 0, err
}
if minSize < 0 {
return 0, 0, errInvalidMinAnnotation
}
maxSize, err := maxSize(annotations)
if err != nil && err != errMissingMaxAnnotation {
return 0, 0, err
}
if maxSize < 0 {
return 0, 0, errInvalidMaxAnnotation
}
if maxSize < minSize {
return 0, 0, errInvalidMaxAnnotation
}
return minSize, maxSize, nil
}
func getOwnerForKind(u *unstructured.Unstructured, kind string) *metav1.OwnerReference {
if u != nil {
for _, ref := range u.GetOwnerReferences() {
if ref.Kind == kind && ref.Name != "" {
return ref.DeepCopy()
}
}
}
return nil
}
func machineOwnerRef(machine *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machine, machineSetKind)
}
func machineSetOwnerRef(machineSet *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machineSet, machineDeploymentKind)
}
func machineSetHasMachineDeploymentOwnerRef(machineSet *unstructured.Unstructured) bool {
return machineSetOwnerRef(machineSet) != nil
}
// normalizedProviderString splits s on '/' returning everything after
// the last '/'.
func normalizedProviderString(s string) normalizedProviderID {
split := strings.Split(s, "/")
return normalizedProviderID(split[len(split)-1])
}
func parseKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
return resource.ParseQuantity(val)
}
return zeroQuantity.DeepCopy(), nil
}
func parseIntKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
valInt, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return zeroQuantity.DeepCopy(), fmt.Errorf("value %q from annotation %q expected to be an integer: %v", val, key, err)
}
return *resource.NewQuantity(valInt, resource.DecimalSI), nil
}
return zeroQuantity.DeepCopy(), nil
}
func | (annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, cpuKey)
}
func parseMemoryCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, memoryKey)
}
func parseEphemeralDiskCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, diskCapacityKey)
}
func parseGPUCount(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, gpuCountKey)
}
// The GPU type is not currently considered by the autoscaler when planning
// expansion, but most likely will be in the future. This method is being added
// in expectation of that arrival.
// see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/utils/gpu/gpu.go
func parseGPUType(annotations map[string]string) string {
if val, found := annotations[gpuTypeKey]; found {
return val
}
return ""
}
func parseMaxPodsCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, maxPodsKey)
}
func clusterNameFromResource(r *unstructured.Unstructured) string {
// Use Spec.ClusterName if defined (only available on v1alpha3+ types)
clusterName, found, err := unstructured.NestedString(r.Object, "spec", "clusterName")
if err != nil {
return ""
}
if found {
return clusterName
}
// Fallback to value of clusterNameLabel
if clusterName, ok := r.GetLabels()[clusterNameLabel]; ok {
return clusterName
}
return ""
}
// getNodeGroupMinSizeAnnotationKey returns the key that is used for the
// node group minimum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMinSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-min-size", getCAPIGroup())
return key
}
// getNodeGroupMaxSizeAnnotationKey returns the key that is used for the
// node group maximum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMaxSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-max-size", getCAPIGroup())
return key
}
// getMachineDeleteAnnotationKey returns the key that is used by cluster-api for marking
// machines to be deleted. This function is needed because the user can change the default
// group name by using the CAPI_GROUP environment variable.
func getMachineDeleteAnnotationKey() string {
key := fmt.Sprintf("%s/delete-machine", getCAPIGroup())
return key
}
// getMachineAnnotationKey returns the key that is used by cluster-api for annotating
// nodes with their related machine objects. This function is needed because the user can change
// the default group name by using the CAPI_GROUP environment variable.
func getMachineAnnotationKey() string {
key := fmt.Sprintf("%s/machine", getCAPIGroup())
return key
}
// getClusterNameLabel returns the key that is used by cluster-api for labeling
// which cluster an object belongs to. This function is needed because the user can change
// the default group name by using the CAPI_GROUP environment variable.
func getClusterNameLabel() string {
key := fmt.Sprintf("%s/cluster-name", getCAPIGroup())
return key
}
| parseCPUCapacity | identifier_name |
clusterapi_utils.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterapi
import (
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
cpuKey = "capacity.cluster-autoscaler.kubernetes.io/cpu"
memoryKey = "capacity.cluster-autoscaler.kubernetes.io/memory"
diskCapacityKey = "capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk"
gpuTypeKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-type"
gpuCountKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-count"
maxPodsKey = "capacity.cluster-autoscaler.kubernetes.io/maxPods"
taintsKey = "capacity.cluster-autoscaler.kubernetes.io/taints"
labelsKey = "capacity.cluster-autoscaler.kubernetes.io/labels"
)
var (
// clusterNameLabel is the label applied to objects(Machine, MachineSet, MachineDeployment)
// to identify which cluster they are owned by. Because the label can be
// affected by the CAPI_GROUP environment variable, it is initialized here.
clusterNameLabel = getClusterNameLabel()
// errMissingMinAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMinSizeAnnotationKey.
errMissingMinAnnotation = errors.New("missing min annotation")
// errMissingMaxAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMaxSizeAnnotationKey.
errMissingMaxAnnotation = errors.New("missing max annotation")
// errInvalidMinAnnotationValue is the error returned when a
// machine set has a non-integral min annotation value.
errInvalidMinAnnotation = errors.New("invalid min annotation")
// errInvalidMaxAnnotationValue is the error returned when a
// machine set has a non-integral max annotation value.
errInvalidMaxAnnotation = errors.New("invalid max annotation")
// machineDeleteAnnotationKey is the annotation used by cluster-api to indicate
// that a machine should be deleted. Because this key can be affected by the
// CAPI_GROUP env variable, it is initialized here.
machineDeleteAnnotationKey = getMachineDeleteAnnotationKey()
// machineAnnotationKey is the annotation used by the cluster-api on Node objects
// to specify the name of the related Machine object. Because this can be affected
// by the CAPI_GROUP env variable, it is initialized here.
machineAnnotationKey = getMachineAnnotationKey()
// nodeGroupMinSizeAnnotationKey and nodeGroupMaxSizeAnnotationKey are the keys
// used in MachineSet and MachineDeployment annotations to specify the limits
// for the node group. Because the keys can be affected by the CAPI_GROUP env
// variable, they are initialized here.
nodeGroupMinSizeAnnotationKey = getNodeGroupMinSizeAnnotationKey()
nodeGroupMaxSizeAnnotationKey = getNodeGroupMaxSizeAnnotationKey()
zeroQuantity = resource.MustParse("0")
)
type normalizedProviderID string
// minSize returns the minimum value encoded in the annotations keyed
// by nodeGroupMinSizeAnnotationKey. Returns errMissingMinAnnotation
// if the annotation doesn't exist or errInvalidMinAnnotation if the
// value is not of type int.
func minSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMinSizeAnnotationKey]
if !found {
return 0, errMissingMinAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMinAnnotation)
}
return i, nil
}
// maxSize returns the maximum value encoded in the annotations keyed
// by nodeGroupMaxSizeAnnotationKey. Returns errMissingMaxAnnotation
// if the annotation doesn't exist or errInvalidMaxAnnotation if the
// value is not of type int.
func maxSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMaxSizeAnnotationKey]
if !found {
return 0, errMissingMaxAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMaxAnnotation)
}
return i, nil
}
func parseScalingBounds(annotations map[string]string) (int, int, error) {
minSize, err := minSize(annotations)
if err != nil && err != errMissingMinAnnotation {
return 0, 0, err
}
if minSize < 0 {
return 0, 0, errInvalidMinAnnotation
}
maxSize, err := maxSize(annotations)
if err != nil && err != errMissingMaxAnnotation {
return 0, 0, err
}
if maxSize < 0 {
return 0, 0, errInvalidMaxAnnotation
}
if maxSize < minSize {
return 0, 0, errInvalidMaxAnnotation
}
return minSize, maxSize, nil
}
func getOwnerForKind(u *unstructured.Unstructured, kind string) *metav1.OwnerReference {
if u != nil {
for _, ref := range u.GetOwnerReferences() {
if ref.Kind == kind && ref.Name != "" {
return ref.DeepCopy()
}
}
}
return nil
}
func machineOwnerRef(machine *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machine, machineSetKind)
}
func machineSetOwnerRef(machineSet *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machineSet, machineDeploymentKind)
}
func machineSetHasMachineDeploymentOwnerRef(machineSet *unstructured.Unstructured) bool {
return machineSetOwnerRef(machineSet) != nil
}
// normalizedProviderString splits s on '/' returning everything after
// the last '/'.
func normalizedProviderString(s string) normalizedProviderID {
split := strings.Split(s, "/")
return normalizedProviderID(split[len(split)-1])
}
func parseKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" |
return zeroQuantity.DeepCopy(), nil
}
func parseIntKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
valInt, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return zeroQuantity.DeepCopy(), fmt.Errorf("value %q from annotation %q expected to be an integer: %v", val, key, err)
}
return *resource.NewQuantity(valInt, resource.DecimalSI), nil
}
return zeroQuantity.DeepCopy(), nil
}
func parseCPUCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, cpuKey)
}
func parseMemoryCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, memoryKey)
}
func parseEphemeralDiskCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, diskCapacityKey)
}
func parseGPUCount(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, gpuCountKey)
}
// The GPU type is not currently considered by the autoscaler when planning
// expansion, but most likely will be in the future. This method is being added
// in expectation of that arrival.
// see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/utils/gpu/gpu.go
func parseGPUType(annotations map[string]string) string {
if val, found := annotations[gpuTypeKey]; found {
return val
}
return ""
}
func parseMaxPodsCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, maxPodsKey)
}
func clusterNameFromResource(r *unstructured.Unstructured) string {
// Use Spec.ClusterName if defined (only available on v1alpha3+ types)
clusterName, found, err := unstructured.NestedString(r.Object, "spec", "clusterName")
if err != nil {
return ""
}
if found {
return clusterName
}
// Fallback to value of clusterNameLabel
if clusterName, ok := r.GetLabels()[clusterNameLabel]; ok {
return clusterName
}
return ""
}
// getNodeGroupMinSizeAnnotationKey returns the key that is used for the
// node group minimum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMinSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-min-size", getCAPIGroup())
return key
}
// getNodeGroupMaxSizeAnnotationKey returns the key that is used for the
// node group maximum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMaxSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-max-size", getCAPIGroup())
return key
}
// getMachineDeleteAnnotationKey returns the key that is used by cluster-api for marking
// machines to be deleted. This function is needed because the user can change the default
// group name by using the CAPI_GROUP environment variable.
func getMachineDeleteAnnotationKey() string {
key := fmt.Sprintf("%s/delete-machine", getCAPIGroup())
return key
}
// getMachineAnnotationKey returns the key that is used by cluster-api for annotating
// nodes with their related machine objects. This function is needed because the user can change
// the default group name by using the CAPI_GROUP environment variable.
func getMachineAnnotationKey() string {
key := fmt.Sprintf("%s/machine", getCAPIGroup())
return key
}
// getClusterNameLabel returns the key that is used by cluster-api for labeling
// which cluster an object belongs to. This function is needed because the user can change
// the default group name by using the CAPI_GROUP environment variable.
func getClusterNameLabel() string {
key := fmt.Sprintf("%s/cluster-name", getCAPIGroup())
return key
}
| {
return resource.ParseQuantity(val)
} | conditional_block |
clusterapi_utils.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterapi
import (
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
cpuKey = "capacity.cluster-autoscaler.kubernetes.io/cpu"
memoryKey = "capacity.cluster-autoscaler.kubernetes.io/memory"
diskCapacityKey = "capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk"
gpuTypeKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-type"
gpuCountKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-count"
maxPodsKey = "capacity.cluster-autoscaler.kubernetes.io/maxPods"
taintsKey = "capacity.cluster-autoscaler.kubernetes.io/taints"
labelsKey = "capacity.cluster-autoscaler.kubernetes.io/labels"
)
var (
// clusterNameLabel is the label applied to objects(Machine, MachineSet, MachineDeployment)
// to identify which cluster they are owned by. Because the label can be
// affected by the CAPI_GROUP environment variable, it is initialized here.
clusterNameLabel = getClusterNameLabel()
// errMissingMinAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMinSizeAnnotationKey.
errMissingMinAnnotation = errors.New("missing min annotation")
// errMissingMaxAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMaxSizeAnnotationKey.
errMissingMaxAnnotation = errors.New("missing max annotation")
// errInvalidMinAnnotationValue is the error returned when a
// machine set has a non-integral min annotation value.
errInvalidMinAnnotation = errors.New("invalid min annotation")
// errInvalidMaxAnnotationValue is the error returned when a
// machine set has a non-integral max annotation value.
errInvalidMaxAnnotation = errors.New("invalid max annotation")
// machineDeleteAnnotationKey is the annotation used by cluster-api to indicate
// that a machine should be deleted. Because this key can be affected by the | // by the CAPI_GROUP env variable, it is initialized here.
machineAnnotationKey = getMachineAnnotationKey()
// nodeGroupMinSizeAnnotationKey and nodeGroupMaxSizeAnnotationKey are the keys
// used in MachineSet and MachineDeployment annotations to specify the limits
// for the node group. Because the keys can be affected by the CAPI_GROUP env
// variable, they are initialized here.
nodeGroupMinSizeAnnotationKey = getNodeGroupMinSizeAnnotationKey()
nodeGroupMaxSizeAnnotationKey = getNodeGroupMaxSizeAnnotationKey()
zeroQuantity = resource.MustParse("0")
)
type normalizedProviderID string
// minSize returns the minimum value encoded in the annotations keyed
// by nodeGroupMinSizeAnnotationKey. Returns errMissingMinAnnotation
// if the annotation doesn't exist or errInvalidMinAnnotation if the
// value is not of type int.
func minSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMinSizeAnnotationKey]
if !found {
return 0, errMissingMinAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMinAnnotation)
}
return i, nil
}
// maxSize returns the maximum value encoded in the annotations keyed
// by nodeGroupMaxSizeAnnotationKey. Returns errMissingMaxAnnotation
// if the annotation doesn't exist or errInvalidMaxAnnotation if the
// value is not of type int.
func maxSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMaxSizeAnnotationKey]
if !found {
return 0, errMissingMaxAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMaxAnnotation)
}
return i, nil
}
func parseScalingBounds(annotations map[string]string) (int, int, error) {
minSize, err := minSize(annotations)
if err != nil && err != errMissingMinAnnotation {
return 0, 0, err
}
if minSize < 0 {
return 0, 0, errInvalidMinAnnotation
}
maxSize, err := maxSize(annotations)
if err != nil && err != errMissingMaxAnnotation {
return 0, 0, err
}
if maxSize < 0 {
return 0, 0, errInvalidMaxAnnotation
}
if maxSize < minSize {
return 0, 0, errInvalidMaxAnnotation
}
return minSize, maxSize, nil
}
func getOwnerForKind(u *unstructured.Unstructured, kind string) *metav1.OwnerReference {
if u != nil {
for _, ref := range u.GetOwnerReferences() {
if ref.Kind == kind && ref.Name != "" {
return ref.DeepCopy()
}
}
}
return nil
}
func machineOwnerRef(machine *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machine, machineSetKind)
}
func machineSetOwnerRef(machineSet *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machineSet, machineDeploymentKind)
}
func machineSetHasMachineDeploymentOwnerRef(machineSet *unstructured.Unstructured) bool {
return machineSetOwnerRef(machineSet) != nil
}
// normalizedProviderString splits s on '/' returning everything after
// the last '/'.
func normalizedProviderString(s string) normalizedProviderID {
split := strings.Split(s, "/")
return normalizedProviderID(split[len(split)-1])
}
func parseKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
return resource.ParseQuantity(val)
}
return zeroQuantity.DeepCopy(), nil
}
func parseIntKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
valInt, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return zeroQuantity.DeepCopy(), fmt.Errorf("value %q from annotation %q expected to be an integer: %v", val, key, err)
}
return *resource.NewQuantity(valInt, resource.DecimalSI), nil
}
return zeroQuantity.DeepCopy(), nil
}
func parseCPUCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, cpuKey)
}
func parseMemoryCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, memoryKey)
}
func parseEphemeralDiskCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, diskCapacityKey)
}
func parseGPUCount(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, gpuCountKey)
}
// The GPU type is not currently considered by the autoscaler when planning
// expansion, but most likely will be in the future. This method is being added
// in expectation of that arrival.
// see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/utils/gpu/gpu.go
func parseGPUType(annotations map[string]string) string {
if val, found := annotations[gpuTypeKey]; found {
return val
}
return ""
}
func parseMaxPodsCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, maxPodsKey)
}
func clusterNameFromResource(r *unstructured.Unstructured) string {
// Use Spec.ClusterName if defined (only available on v1alpha3+ types)
clusterName, found, err := unstructured.NestedString(r.Object, "spec", "clusterName")
if err != nil {
return ""
}
if found {
return clusterName
}
// Fallback to value of clusterNameLabel
if clusterName, ok := r.GetLabels()[clusterNameLabel]; ok {
return clusterName
}
return ""
}
// getNodeGroupMinSizeAnnotationKey returns the key that is used for the
// node group minimum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMinSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-min-size", getCAPIGroup())
return key
}
// getNodeGroupMaxSizeAnnotationKey returns the key that is used for the
// node group maximum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMaxSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-max-size", getCAPIGroup())
return key
}
// getMachineDeleteAnnotationKey returns the key that is used by cluster-api for marking
// machines to be deleted. This function is needed because the user can change the default
// group name by using the CAPI_GROUP environment variable.
func getMachineDeleteAnnotationKey() string {
key := fmt.Sprintf("%s/delete-machine", getCAPIGroup())
return key
}
// getMachineAnnotationKey returns the key that is used by cluster-api for annotating
// nodes with their related machine objects. This function is needed because the user can change
// the default group name by using the CAPI_GROUP environment variable.
func getMachineAnnotationKey() string {
key := fmt.Sprintf("%s/machine", getCAPIGroup())
return key
}
// getClusterNameLabel returns the key that is used by cluster-api for labeling
// which cluster an object belongs to. This function is needed because the user can change
// the default group name by using the CAPI_GROUP environment variable.
func getClusterNameLabel() string {
key := fmt.Sprintf("%s/cluster-name", getCAPIGroup())
return key
} | // CAPI_GROUP env variable, it is initialized here.
machineDeleteAnnotationKey = getMachineDeleteAnnotationKey()
// machineAnnotationKey is the annotation used by the cluster-api on Node objects
// to specify the name of the related Machine object. Because this can be affected | random_line_split |
clusterapi_utils.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterapi
import (
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
const (
cpuKey = "capacity.cluster-autoscaler.kubernetes.io/cpu"
memoryKey = "capacity.cluster-autoscaler.kubernetes.io/memory"
diskCapacityKey = "capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk"
gpuTypeKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-type"
gpuCountKey = "capacity.cluster-autoscaler.kubernetes.io/gpu-count"
maxPodsKey = "capacity.cluster-autoscaler.kubernetes.io/maxPods"
taintsKey = "capacity.cluster-autoscaler.kubernetes.io/taints"
labelsKey = "capacity.cluster-autoscaler.kubernetes.io/labels"
)
var (
// clusterNameLabel is the label applied to objects(Machine, MachineSet, MachineDeployment)
// to identify which cluster they are owned by. Because the label can be
// affected by the CAPI_GROUP environment variable, it is initialized here.
clusterNameLabel = getClusterNameLabel()
// errMissingMinAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMinSizeAnnotationKey.
errMissingMinAnnotation = errors.New("missing min annotation")
// errMissingMaxAnnotation is the error returned when a
// machine set does not have an annotation keyed by
// nodeGroupMaxSizeAnnotationKey.
errMissingMaxAnnotation = errors.New("missing max annotation")
// errInvalidMinAnnotationValue is the error returned when a
// machine set has a non-integral min annotation value.
errInvalidMinAnnotation = errors.New("invalid min annotation")
// errInvalidMaxAnnotationValue is the error returned when a
// machine set has a non-integral max annotation value.
errInvalidMaxAnnotation = errors.New("invalid max annotation")
// machineDeleteAnnotationKey is the annotation used by cluster-api to indicate
// that a machine should be deleted. Because this key can be affected by the
// CAPI_GROUP env variable, it is initialized here.
machineDeleteAnnotationKey = getMachineDeleteAnnotationKey()
// machineAnnotationKey is the annotation used by the cluster-api on Node objects
// to specify the name of the related Machine object. Because this can be affected
// by the CAPI_GROUP env variable, it is initialized here.
machineAnnotationKey = getMachineAnnotationKey()
// nodeGroupMinSizeAnnotationKey and nodeGroupMaxSizeAnnotationKey are the keys
// used in MachineSet and MachineDeployment annotations to specify the limits
// for the node group. Because the keys can be affected by the CAPI_GROUP env
// variable, they are initialized here.
nodeGroupMinSizeAnnotationKey = getNodeGroupMinSizeAnnotationKey()
nodeGroupMaxSizeAnnotationKey = getNodeGroupMaxSizeAnnotationKey()
zeroQuantity = resource.MustParse("0")
)
type normalizedProviderID string
// minSize returns the minimum value encoded in the annotations keyed
// by nodeGroupMinSizeAnnotationKey. Returns errMissingMinAnnotation
// if the annotation doesn't exist or errInvalidMinAnnotation if the
// value is not of type int.
func minSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMinSizeAnnotationKey]
if !found {
return 0, errMissingMinAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMinAnnotation)
}
return i, nil
}
// maxSize returns the maximum value encoded in the annotations keyed
// by nodeGroupMaxSizeAnnotationKey. Returns errMissingMaxAnnotation
// if the annotation doesn't exist or errInvalidMaxAnnotation if the
// value is not of type int.
func maxSize(annotations map[string]string) (int, error) {
val, found := annotations[nodeGroupMaxSizeAnnotationKey]
if !found {
return 0, errMissingMaxAnnotation
}
i, err := strconv.Atoi(val)
if err != nil {
return 0, errors.Wrapf(err, "%s", errInvalidMaxAnnotation)
}
return i, nil
}
func parseScalingBounds(annotations map[string]string) (int, int, error) {
minSize, err := minSize(annotations)
if err != nil && err != errMissingMinAnnotation {
return 0, 0, err
}
if minSize < 0 {
return 0, 0, errInvalidMinAnnotation
}
maxSize, err := maxSize(annotations)
if err != nil && err != errMissingMaxAnnotation {
return 0, 0, err
}
if maxSize < 0 {
return 0, 0, errInvalidMaxAnnotation
}
if maxSize < minSize {
return 0, 0, errInvalidMaxAnnotation
}
return minSize, maxSize, nil
}
func getOwnerForKind(u *unstructured.Unstructured, kind string) *metav1.OwnerReference {
if u != nil {
for _, ref := range u.GetOwnerReferences() {
if ref.Kind == kind && ref.Name != "" {
return ref.DeepCopy()
}
}
}
return nil
}
func machineOwnerRef(machine *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machine, machineSetKind)
}
func machineSetOwnerRef(machineSet *unstructured.Unstructured) *metav1.OwnerReference {
return getOwnerForKind(machineSet, machineDeploymentKind)
}
func machineSetHasMachineDeploymentOwnerRef(machineSet *unstructured.Unstructured) bool {
return machineSetOwnerRef(machineSet) != nil
}
// normalizedProviderString splits s on '/' returning everything after
// the last '/'.
func normalizedProviderString(s string) normalizedProviderID {
split := strings.Split(s, "/")
return normalizedProviderID(split[len(split)-1])
}
func parseKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
return resource.ParseQuantity(val)
}
return zeroQuantity.DeepCopy(), nil
}
func parseIntKey(annotations map[string]string, key string) (resource.Quantity, error) {
if val, exists := annotations[key]; exists && val != "" {
valInt, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return zeroQuantity.DeepCopy(), fmt.Errorf("value %q from annotation %q expected to be an integer: %v", val, key, err)
}
return *resource.NewQuantity(valInt, resource.DecimalSI), nil
}
return zeroQuantity.DeepCopy(), nil
}
func parseCPUCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, cpuKey)
}
func parseMemoryCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, memoryKey)
}
func parseEphemeralDiskCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseKey(annotations, diskCapacityKey)
}
func parseGPUCount(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, gpuCountKey)
}
// The GPU type is not currently considered by the autoscaler when planning
// expansion, but most likely will be in the future. This method is being added
// in expectation of that arrival.
// see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/utils/gpu/gpu.go
func parseGPUType(annotations map[string]string) string {
if val, found := annotations[gpuTypeKey]; found {
return val
}
return ""
}
func parseMaxPodsCapacity(annotations map[string]string) (resource.Quantity, error) {
return parseIntKey(annotations, maxPodsKey)
}
func clusterNameFromResource(r *unstructured.Unstructured) string {
// Use Spec.ClusterName if defined (only available on v1alpha3+ types)
clusterName, found, err := unstructured.NestedString(r.Object, "spec", "clusterName")
if err != nil {
return ""
}
if found {
return clusterName
}
// Fallback to value of clusterNameLabel
if clusterName, ok := r.GetLabels()[clusterNameLabel]; ok {
return clusterName
}
return ""
}
// getNodeGroupMinSizeAnnotationKey returns the key that is used for the
// node group minimum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMinSizeAnnotationKey() string |
// getNodeGroupMaxSizeAnnotationKey returns the key that is used for the
// node group maximum size annotation. This function is needed because the user can
// change the default group name by using the CAPI_GROUP environment variable.
func getNodeGroupMaxSizeAnnotationKey() string {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-max-size", getCAPIGroup())
return key
}
// getMachineDeleteAnnotationKey returns the key that is used by cluster-api for marking
// machines to be deleted. This function is needed because the user can change the default
// group name by using the CAPI_GROUP environment variable.
func getMachineDeleteAnnotationKey() string {
key := fmt.Sprintf("%s/delete-machine", getCAPIGroup())
return key
}
// getMachineAnnotationKey returns the key that is used by cluster-api for annotating
// nodes with their related machine objects. This function is needed because the user can change
// the default group name by using the CAPI_GROUP environment variable.
func getMachineAnnotationKey() string {
key := fmt.Sprintf("%s/machine", getCAPIGroup())
return key
}
// getClusterNameLabel returns the key that is used by cluster-api for labeling
// which cluster an object belongs to. This function is needed because the user can change
// the default group name by using the CAPI_GROUP environment variable.
func getClusterNameLabel() string {
key := fmt.Sprintf("%s/cluster-name", getCAPIGroup())
return key
}
| {
key := fmt.Sprintf("%s/cluster-api-autoscaler-node-group-min-size", getCAPIGroup())
return key
} | identifier_body |
analysisTools.py | import numpy as np
import os
# importing local tools:
import plottingTools as pt
# read ATM SED file
def getSED(SEDfile):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) return wavelength in micron, flux (F_lambda in W/m2/micron), emissivity and albedo
wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# translate flux to AB mags
Dast = 10
magAB = getABmag(wavelength, flux*Dast**2)
return 1.0e6*wavelength, magAB, epsilon, albedo
# flux to AB mag
def getABmag(wave, Flambda):
# given wavelength and Flambda in SI units, return AB magnitudes
c = 2.998e8
Fnu = wave**2 / c * Flambda
return -2.5*np.log10(Fnu*1.0e26/3631)
def getSEDwithNoise(dataDIR, ATMDIR, Dast, rAU, fName, tax):
# standard wavelengths and expected m5 depths
wavSPH, m5static = getSPHERExSensitivity(dataDIR)
# get noiseless magnitudes for ATM model corrected with chosen Bus-DeMeo class
SEDfile = ATMDIR + fName + str(rAU) + '.dat'
magTrue = getATMBusSED(SEDfile, wavSPH, tax, Dast, rAU, dataDIR)
### add photometric noise
# since asteroids move, we need to shift m5 by 0.75 mag
# because we don't have 4 surveys as for static sources
m5SPH = m5static - 0.75 ## IMPORTANT !!!
# corresponding photometric errors
magErr = getPhotomErrors(magTrue, m5SPH)
## obs magnitudes: true mags with added photometric noise and var. offset
# draw from a Gaussian distribution
magObs = magTrue + np.random.normal(0, magErr)
return wavSPH, magTrue, magObs, magErr
def getPhotomErrors(mag, m5):
## generate photometric errors (use eq.5 from ls.st/lop with gamma=0.039)
rgamma = 0.039
xval = np.power(10, 0.4*(mag-m5))
# random photometric error
err_rand = np.sqrt((0.04-rgamma)*xval + rgamma*xval*xval)
# add systematic error for SPHEREx photometric extractions (in mag)
sysErr = 0.01 ## somewhat arbitrary, but realistic
return np.sqrt(err_rand**2 + sysErr**2)
# similar to getSPHERExSED, but already has noise-free static model
def getObsSED(wavSPH, magTrue, mjd0, Amp, Per, sysErr, dataDIR, BrendanFile):
# Brendan's file with SPHEREx observations
mjdObs, waveObs, m5Obs = getBrendanSpectrum(dataDIR, BrendanFile)
# interpolate true mags to observed wavelengths
magTrueObs = np.interp(waveObs, wavSPH, magTrue)
## photometric errors
magErr = getPhotomErrors(magTrueObs, m5Obs)
# draw from a Gaussian distribution
dmNoise = np.random.normal(0, magErr)
## generate light curve offsets
dmOff = Amp*np.sin(2*np.pi*(mjdObs-mjd0)/Per)
# raw magnitudes: true mags with added variability and photometric noise
magRaw = magTrueObs + dmNoise + dmOff
return waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise
def getSPHERExSensitivity(dataDIR):
# read data from Olivier Dore's Point_Source_Sensitivity_v28_base_cbe.txt file
# wavelengthSPHEREx (in micron) is the first column
# m5SPHEREx (AB magnitude) is the second column
dataFile = dataDIR + 'Point_Source_Sensitivity_v28_base_cbe.txt'
SPHERExSensitivity = np.loadtxt(dataFile)
return SPHERExSensitivity.T[0], SPHERExSensitivity.T[1]
def getATMBusSED(SEDfile, waveSPH, BusTaxi, Dast, rAU, BusDIR):
# given ATM model, scale by Dast and interpolate to waveSPH
magTrue, eps, alb = getATMmodelMag(SEDfile, Dast, waveSPH)
# if requested, correct with Bus-DeMeo reflectivity curve
if (BusTaxi!=''):
# read reflectivity curve
file = BusDIR + "/" + "reflectivity" + BusTaxi + ".dat"
refldata = np.loadtxt(file, skiprows=1)
waveSPHrefl, reflectivity = refldata.T[0], refldata.T[1]
print('read in', file)
if (waveSPHrefl.size != waveSPH.size):
print('ERROR: different standard SPHEREx wavelength grids!')
# assumption is that emission is negligible below this wavelength
wavMinEm = 2.0 # micron, OK outside Earth's orbit
# compute and apply correction
magTrue += getBusAKARIMagCorr(waveSPH, reflectivity, magTrue, wavMinEm)
return magTrue
# no-noise version
def getATMmodelMag(SEDfile, Dast, waveSPH):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) correct flux from the fiducial D=1km to Dast
# 3) given input wavelength array, compute AB magnitudes
# 4) return true AB magnitudes, epsilon, and albedo interpolated to waveSPH values
# 1) read data
wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# 2) correct for Dast and translate flux to AB mags
magAB = getABmag(wavelength, flux*Dast**2)
# 3) interpolate magAB, epsilon and albedo to waveSPH
SPHmagAB = np.interp(waveSPH, 1.0e6*wavelength, magAB)
SPHeps = np.interp(waveSPH, 1.0e6*wavelength, epsilon)
SPHalb = np.interp(waveSPH, 1.0e6*wavelength, albedo) | return SPHmagAB, SPHeps, SPHalb
def getBusAKARIMagCorr(wave, refl, magTot, wavMax):
## compute additive correction to magTot because of a different
## reflectivity curve affecting the scattered flux; the correction
## vanishes at the first wavelength in the SPHEREx standard grid
refl0 = refl/refl[0]
# part 1: emission negligible at short wavelengths
dmag1 = -2.5*np.log10(refl0)
# part 2: extrapolate the scattered component from short wavelengths
# compute fraction of the total flux due to scattered component
magTotATwavMax = np.interp(wavMax, wave, magTot)
ftotATwavMax = 10**(0.4*(magTot-magTotATwavMax))
# and extrapolate as Rayleigh-Jeans tail
fCorr = 1 - (1-refl0) * ftotATwavMax * (wavMax/wave)**2
dmag2 = -2.5*np.log10(fCorr)
return np.where(wave < wavMax, dmag1, dmag2)
# read MJD and wavelength from Brendan's file, and regrid wavelengths
# to the standard wavelength; return MJD and corresponding standard
# wavelength and 5-sigma SPHEREx depth
def getBrendanSpectrum(dataDIR, dataFile, singleSurvey=True):
b = np.loadtxt(dataFile, skiprows=1)
mjdBrendan = b.T[0]
wavBrendan = b.T[1]
waveSPH, m5SPH = getSPHERExSensitivity(dataDIR)
wavBrendanSPH = getStandardLambda(wavBrendan, waveSPH)
m5BrendanSPH = np.interp(wavBrendanSPH, waveSPH, m5SPH)
if (singleSurvey):
# since asteroids move, we need to increase errors by sqrt(4)
# because we don't have 4 surveys as for static sources, or
# the 5-sigma limiting depth is shallower by ~0.75 mag
m5BrendanSPH -= 1.25*np.log10(4)
return mjdBrendan, wavBrendanSPH, m5BrendanSPH
# wrapper around getOrbitInfoFromBrendanSpectrum since wav is not needed
def getOrbitInfoFromBrendansMJDs(mjd):
return getOrbitInfoFromBrendanSpectrum(mjd, mjd)
# given BrendanSpectrum (mjd, wavelength), for each SPHEREx season/survey
# (separated by >100d), find for all its orbits (<0.05d) how many pairs of
# fluxes per orbit; return as (j=0..Nseason; k=0..Norbits)
# NoPairs[j,k], MJDmin[j,k], MJDmax[j,k]
def getOrbitInfoFromBrendanSpectrum(mjd,wav):
Norbits = []
NoPairs = []
MJDmin = []
MJDmax = []
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[0])
k = 1
Nobs = 0
for i in range(0,len(mjd)):
Nobs += 1
dt = mjd[i] - mjd[i-1]
if (dt>0.05):
# new orbit...
Mmax.append(mjd[i-1])
nps.append(int(k/2))
k = 1
if (dt>100):
# and also a new season
MJDmin.append(Mmin)
MJDmax.append(Mmax)
NoPairs.append(nps)
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[i])
else:
# not new orbit, simply count the point
k += 1
if (i == (len(mjd)-1)):
# special case of the last point
Mmax.append(mjd[i])
MJDmin.append(Mmin)
MJDmax.append(Mmax)
nps.append(int(k/2))
NoPairs.append(nps)
return NoPairs, MJDmin, MJDmax
def getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=False):
Nseasons = len(MJDmin)
Nobs = 0
for i in range(0, Nseasons):
Norbits=len(MJDmin[i])
Nobs += 2*np.sum(NoPairs[i])
dt = []
if verbose:
for j in range(0,len(NoPairs[i])):
dMJD = int(60*24*(MJDmax[i][j] - MJDmin[i][j]))
dt.append(dMJD)
print('season', i, ' Norb:', Norbits, ' Nobs=', Nobs)
print(' NoPairs=', NoPairs[i])
print(' dt=', dt)
print('No. of observations:', Nobs)
return Nobs
## select observations from a single season (zero-indexed!)
def selectSeasonSED(season, waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise):
NoPairs, MJDmin, MJDmax = getOrbitInfoFromBrendansMJDs(mjdObs)
Nseasons = len(MJDmin)
if (season > Nseasons):
print('there are only', Nseasons,' seasons, not', season)
return
Norbits=len(MJDmin[season])
mjdMinVal = MJDmin[season][0]
mjdMaxVal = MJDmax[season][Norbits-1]
wS = waveObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mjdS = mjdObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mRawS = magRaw[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mErrS = magErr[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmOffS = dmOff[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmNoiseS = dmNoise[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
return wS, mjdS, mRawS, mErrS, dmOffS, dmNoiseS
# given lambda from Brendon's file, return lambdaGrid which are
# the closest values in the standard SPHEREx wavelength grid
def getStandardLambda(waveBrandon, waveSPHEREx):
lambdaGrid = 0*waveBrandon
for i in range(0,len(waveBrandon)):
delLambda = np.abs(waveSPHEREx - waveBrandon[i])
lambdaGrid[i] = getClosest(delLambda, waveSPHEREx)[0][1]
return lambdaGrid
def getClosest(list1, list2):
zipped_pairs = zip(list1, list2)
return sorted(zipped_pairs)
def dumpSPHERExSED(MJD, wavelength, mag, magErr, dmVarOff, randNoise, filename):
np.savetxt(filename, (MJD, wavelength, mag, magErr, dmVarOff, randNoise))
return
def simSPHERExSpec(Dast, rAU, SEDfile, dataDIR, BusTaxi, LC, obsFile, ABrange='', outfilerootname=''):
## set defaults
if (ABrange==''):
ABrange = [15.0, 20.0]
if (outfilerootname==''):
outfilerootname = './simSPHERExSpecDefault'
ABmin, ABmax = ABrange
destfiles = []
## SPHEREx standard wavelengths and expected m5 depths for static sources
wavSPH, m5static = getSPHERExSensitivity(dataDIR)
## noise-free SED computed by ATM and corrected for the Bus emissivity
magTrue = getATMBusSED(SEDfile, wavSPH, BusTaxi[0], Dast, rAU, dataDIR)
## generate light-curve offsets and noise, and produce "raw" SPHEREx spectrum
# light curve parameters
mjd0 = LC['mjd0'] # arbitrary mjd for phase=0
Amp = LC['Ampl'] # sysErr = LC[3]
Per = LC['Period'] # period in days
sysErr = LC['sysErr'] # additional systematic photometric error for SPHEREx moving objects
# and now add photometric noise and variability offsets
wavObs, mjdObs, magRaw, magErr, dmOff, dmN = getObsSED(wavSPH, magTrue, mjd0, Amp, Per, sysErr, dataDIR, obsFile)
## now analyze and plot all seasons separately
# first get seasonal and orbital information
NoPairs, MJDmin, MJDmax = getOrbitInfoFromBrendansMJDs(mjdObs)
Nobs = getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=0)
for season in range(0, len(MJDmin)):
print('Season', season)
wS, mjdS, mRawS, mErrS, dmOffS, dmNoiseS = selectSeasonSED(season, wavObs, mjdObs, magRaw, magErr, dmOff, dmN)
pt.plotSEDerrors(wS, mRawS, mErrS, ABmin, ABmax, True, wavSPH, magTrue)
## save simulated raw SPHEREx SEDs
# first data
outfile = outfilerootname + '_rawSED_Season' + str(season) + '.dat'
dumpSPHERExSED(mjdS, wS, mRawS, mErrS, dmOffS, dmNoiseS, outfile)
# and then plots
destfile = outfilerootname + '_rawSED_Season' + str(season) + '.png'
cmdstr = 'mv oneSEDerrors.png ' + destfile
destfiles.append(destfile)
os.system(cmdstr)
print('produced plots (shown above):')
for i in range(0,len(destfiles)):
print(' ', destfiles[i])
print(' and corresponding data files that have extension dat instead of png')
print(' ')
print(' Each data file lists, for a single SPHEREx season, the following quantities:')
print(' MJD wavelength magSPHEREx magUncertainty varOffset randomNoise')
print(' the last two entries are added for convenience:')
print(' the input noiseless model can be obtained as magTrue = mag - varOffset - randomNoise')
return | random_line_split |
|
analysisTools.py | import numpy as np
import os
# importing local tools:
import plottingTools as pt
# read ATM SED file
def getSED(SEDfile):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) return wavelength in micron, flux (F_lambda in W/m2/micron), emissivity and albedo
wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# translate flux to AB mags
Dast = 10
magAB = getABmag(wavelength, flux*Dast**2)
return 1.0e6*wavelength, magAB, epsilon, albedo
# flux to AB mag
def getABmag(wave, Flambda):
# given wavelength and Flambda in SI units, return AB magnitudes
c = 2.998e8
Fnu = wave**2 / c * Flambda
return -2.5*np.log10(Fnu*1.0e26/3631)
def getSEDwithNoise(dataDIR, ATMDIR, Dast, rAU, fName, tax):
# standard wavelengths and expected m5 depths
wavSPH, m5static = getSPHERExSensitivity(dataDIR)
# get noiseless magnitudes for ATM model corrected with chosen Bus-DeMeo class
SEDfile = ATMDIR + fName + str(rAU) + '.dat'
magTrue = getATMBusSED(SEDfile, wavSPH, tax, Dast, rAU, dataDIR)
### add photometric noise
# since asteroids move, we need to shift m5 by 0.75 mag
# because we don't have 4 surveys as for static sources
m5SPH = m5static - 0.75 ## IMPORTANT !!!
# corresponding photometric errors
magErr = getPhotomErrors(magTrue, m5SPH)
## obs magnitudes: true mags with added photometric noise and var. offset
# draw from a Gaussian distribution
magObs = magTrue + np.random.normal(0, magErr)
return wavSPH, magTrue, magObs, magErr
def getPhotomErrors(mag, m5):
## generate photometric errors (use eq.5 from ls.st/lop with gamma=0.039)
rgamma = 0.039
xval = np.power(10, 0.4*(mag-m5))
# random photometric error
err_rand = np.sqrt((0.04-rgamma)*xval + rgamma*xval*xval)
# add systematic error for SPHEREx photometric extractions (in mag)
sysErr = 0.01 ## somewhat arbitrary, but realistic
return np.sqrt(err_rand**2 + sysErr**2)
# similar to getSPHERExSED, but already has noise-free static model
def getObsSED(wavSPH, magTrue, mjd0, Amp, Per, sysErr, dataDIR, BrendanFile):
# Brendan's file with SPHEREx observations
mjdObs, waveObs, m5Obs = getBrendanSpectrum(dataDIR, BrendanFile)
# interpolate true mags to observed wavelengths
magTrueObs = np.interp(waveObs, wavSPH, magTrue)
## photometric errors
magErr = getPhotomErrors(magTrueObs, m5Obs)
# draw from a Gaussian distribution
dmNoise = np.random.normal(0, magErr)
## generate light curve offsets
dmOff = Amp*np.sin(2*np.pi*(mjdObs-mjd0)/Per)
# raw magnitudes: true mags with added variability and photometric noise
magRaw = magTrueObs + dmNoise + dmOff
return waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise
def getSPHERExSensitivity(dataDIR):
# read data from Olivier Dore's Point_Source_Sensitivity_v28_base_cbe.txt file
# wavelengthSPHEREx (in micron) is the first column
# m5SPHEREx (AB magnitude) is the second column
dataFile = dataDIR + 'Point_Source_Sensitivity_v28_base_cbe.txt'
SPHERExSensitivity = np.loadtxt(dataFile)
return SPHERExSensitivity.T[0], SPHERExSensitivity.T[1]
def getATMBusSED(SEDfile, waveSPH, BusTaxi, Dast, rAU, BusDIR):
# given ATM model, scale by Dast and interpolate to waveSPH
magTrue, eps, alb = getATMmodelMag(SEDfile, Dast, waveSPH)
# if requested, correct with Bus-DeMeo reflectivity curve
if (BusTaxi!=''):
# read reflectivity curve
file = BusDIR + "/" + "reflectivity" + BusTaxi + ".dat"
refldata = np.loadtxt(file, skiprows=1)
waveSPHrefl, reflectivity = refldata.T[0], refldata.T[1]
print('read in', file)
if (waveSPHrefl.size != waveSPH.size):
print('ERROR: different standard SPHEREx wavelength grids!')
# assumption is that emission is negligible below this wavelength
wavMinEm = 2.0 # micron, OK outside Earth's orbit
# compute and apply correction
magTrue += getBusAKARIMagCorr(waveSPH, reflectivity, magTrue, wavMinEm)
return magTrue
# no-noise version
def getATMmodelMag(SEDfile, Dast, waveSPH):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) correct flux from the fiducial D=1km to Dast
# 3) given input wavelength array, compute AB magnitudes
# 4) return true AB magnitudes, epsilon, and albedo interpolated to waveSPH values
# 1) read data
wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# 2) correct for Dast and translate flux to AB mags
magAB = getABmag(wavelength, flux*Dast**2)
# 3) interpolate magAB, epsilon and albedo to waveSPH
SPHmagAB = np.interp(waveSPH, 1.0e6*wavelength, magAB)
SPHeps = np.interp(waveSPH, 1.0e6*wavelength, epsilon)
SPHalb = np.interp(waveSPH, 1.0e6*wavelength, albedo)
return SPHmagAB, SPHeps, SPHalb
def getBusAKARIMagCorr(wave, refl, magTot, wavMax):
## compute additive correction to magTot because of a different
## reflectivity curve affecting the scattered flux; the correction
## vanishes at the first wavelength in the SPHEREx standard grid
refl0 = refl/refl[0]
# part 1: emission negligible at short wavelengths
dmag1 = -2.5*np.log10(refl0)
# part 2: extrapolate the scattered component from short wavelengths
# compute fraction of the total flux due to scattered component
magTotATwavMax = np.interp(wavMax, wave, magTot)
ftotATwavMax = 10**(0.4*(magTot-magTotATwavMax))
# and extrapolate as Rayleigh-Jeans tail
fCorr = 1 - (1-refl0) * ftotATwavMax * (wavMax/wave)**2
dmag2 = -2.5*np.log10(fCorr)
return np.where(wave < wavMax, dmag1, dmag2)
# read MJD and wavelength from Brendan's file, and regrid wavelengths
# to the standard wavelength; return MJD and corresponding standard
# wavelength and 5-sigma SPHEREx depth
def getBrendanSpectrum(dataDIR, dataFile, singleSurvey=True):
b = np.loadtxt(dataFile, skiprows=1)
mjdBrendan = b.T[0]
wavBrendan = b.T[1]
waveSPH, m5SPH = getSPHERExSensitivity(dataDIR)
wavBrendanSPH = getStandardLambda(wavBrendan, waveSPH)
m5BrendanSPH = np.interp(wavBrendanSPH, waveSPH, m5SPH)
if (singleSurvey):
# since asteroids move, we need to increase errors by sqrt(4)
# because we don't have 4 surveys as for static sources, or
# the 5-sigma limiting depth is shallower by ~0.75 mag
m5BrendanSPH -= 1.25*np.log10(4)
return mjdBrendan, wavBrendanSPH, m5BrendanSPH
# wrapper around getOrbitInfoFromBrendanSpectrum since wav is not needed
def getOrbitInfoFromBrendansMJDs(mjd):
return getOrbitInfoFromBrendanSpectrum(mjd, mjd)
# given BrendanSpectrum (mjd, wavelength), for each SPHEREx season/survey
# (separated by >100d), find for all its orbits (<0.05d) how many pairs of
# fluxes per orbit; return as (j=0..Nseason; k=0..Norbits)
# NoPairs[j,k], MJDmin[j,k], MJDmax[j,k]
def getOrbitInfoFromBrendanSpectrum(mjd,wav):
Norbits = []
NoPairs = []
MJDmin = []
MJDmax = []
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[0])
k = 1
Nobs = 0
for i in range(0,len(mjd)):
Nobs += 1
dt = mjd[i] - mjd[i-1]
if (dt>0.05):
# new orbit...
Mmax.append(mjd[i-1])
nps.append(int(k/2))
k = 1
if (dt>100):
# and also a new season
MJDmin.append(Mmin)
MJDmax.append(Mmax)
NoPairs.append(nps)
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[i])
else:
# not new orbit, simply count the point
k += 1
if (i == (len(mjd)-1)):
# special case of the last point
Mmax.append(mjd[i])
MJDmin.append(Mmin)
MJDmax.append(Mmax)
nps.append(int(k/2))
NoPairs.append(nps)
return NoPairs, MJDmin, MJDmax
def getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=False):
Nseasons = len(MJDmin)
Nobs = 0
for i in range(0, Nseasons):
Norbits=len(MJDmin[i])
Nobs += 2*np.sum(NoPairs[i])
dt = []
if verbose:
for j in range(0,len(NoPairs[i])):
dMJD = int(60*24*(MJDmax[i][j] - MJDmin[i][j]))
dt.append(dMJD)
print('season', i, ' Norb:', Norbits, ' Nobs=', Nobs)
print(' NoPairs=', NoPairs[i])
print(' dt=', dt)
print('No. of observations:', Nobs)
return Nobs
## select observations from a single season (zero-indexed!)
def selectSeasonSED(season, waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise):
NoPairs, MJDmin, MJDmax = getOrbitInfoFromBrendansMJDs(mjdObs)
Nseasons = len(MJDmin)
if (season > Nseasons):
print('there are only', Nseasons,' seasons, not', season)
return
Norbits=len(MJDmin[season])
mjdMinVal = MJDmin[season][0]
mjdMaxVal = MJDmax[season][Norbits-1]
wS = waveObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mjdS = mjdObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mRawS = magRaw[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mErrS = magErr[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmOffS = dmOff[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmNoiseS = dmNoise[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
return wS, mjdS, mRawS, mErrS, dmOffS, dmNoiseS
# given lambda from Brendon's file, return lambdaGrid which are
# the closest values in the standard SPHEREx wavelength grid
def getStandardLambda(waveBrandon, waveSPHEREx):
lambdaGrid = 0*waveBrandon
for i in range(0,len(waveBrandon)):
delLambda = np.abs(waveSPHEREx - waveBrandon[i])
lambdaGrid[i] = getClosest(delLambda, waveSPHEREx)[0][1]
return lambdaGrid
def getClosest(list1, list2):
zipped_pairs = zip(list1, list2)
return sorted(zipped_pairs)
def dumpSPHERExSED(MJD, wavelength, mag, magErr, dmVarOff, randNoise, filename):
np.savetxt(filename, (MJD, wavelength, mag, magErr, dmVarOff, randNoise))
return
def simSPHERExSpec(Dast, rAU, SEDfile, dataDIR, BusTaxi, LC, obsFile, ABrange='', outfilerootname=''):
## set defaults
if (ABrange==''):
ABrange = [15.0, 20.0]
if (outfilerootname==''):
|
ABmin, ABmax = ABrange
destfiles = []
## SPHEREx standard wavelengths and expected m5 depths for static sources
wavSPH, m5static = getSPHERExSensitivity(dataDIR)
## noise-free SED computed by ATM and corrected for the Bus emissivity
magTrue = getATMBusSED(SEDfile, wavSPH, BusTaxi[0], Dast, rAU, dataDIR)
## generate light-curve offsets and noise, and produce "raw" SPHEREx spectrum
# light curve parameters
mjd0 = LC['mjd0'] # arbitrary mjd for phase=0
Amp = LC['Ampl'] # sysErr = LC[3]
Per = LC['Period'] # period in days
sysErr = LC['sysErr'] # additional systematic photometric error for SPHEREx moving objects
# and now add photometric noise and variability offsets
wavObs, mjdObs, magRaw, magErr, dmOff, dmN = getObsSED(wavSPH, magTrue, mjd0, Amp, Per, sysErr, dataDIR, obsFile)
## now analyze and plot all seasons separately
# first get seasonal and orbital information
NoPairs, MJDmin, MJDmax = getOrbitInfoFromBrendansMJDs(mjdObs)
Nobs = getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=0)
for season in range(0, len(MJDmin)):
print('Season', season)
wS, mjdS, mRawS, mErrS, dmOffS, dmNoiseS = selectSeasonSED(season, wavObs, mjdObs, magRaw, magErr, dmOff, dmN)
pt.plotSEDerrors(wS, mRawS, mErrS, ABmin, ABmax, True, wavSPH, magTrue)
## save simulated raw SPHEREx SEDs
# first data
outfile = outfilerootname + '_rawSED_Season' + str(season) + '.dat'
dumpSPHERExSED(mjdS, wS, mRawS, mErrS, dmOffS, dmNoiseS, outfile)
# and then plots
destfile = outfilerootname + '_rawSED_Season' + str(season) + '.png'
cmdstr = 'mv oneSEDerrors.png ' + destfile
destfiles.append(destfile)
os.system(cmdstr)
print('produced plots (shown above):')
for i in range(0,len(destfiles)):
print(' ', destfiles[i])
print(' and corresponding data files that have extension dat instead of png')
print(' ')
print(' Each data file lists, for a single SPHEREx season, the following quantities:')
print(' MJD wavelength magSPHEREx magUncertainty varOffset randomNoise')
print(' the last two entries are added for convenience:')
print(' the input noiseless model can be obtained as magTrue = mag - varOffset - randomNoise')
return
| outfilerootname = './simSPHERExSpecDefault' | conditional_block |
analysisTools.py | import numpy as np
import os
# importing local tools:
import plottingTools as pt
# read ATM SED file
def getSED(SEDfile):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) return wavelength in micron, flux (F_lambda in W/m2/micron), emissivity and albedo
wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# translate flux to AB mags
Dast = 10
magAB = getABmag(wavelength, flux*Dast**2)
return 1.0e6*wavelength, magAB, epsilon, albedo
# flux to AB mag
def getABmag(wave, Flambda):
# given wavelength and Flambda in SI units, return AB magnitudes
c = 2.998e8
Fnu = wave**2 / c * Flambda
return -2.5*np.log10(Fnu*1.0e26/3631)
def getSEDwithNoise(dataDIR, ATMDIR, Dast, rAU, fName, tax):
# standard wavelengths and expected m5 depths
wavSPH, m5static = getSPHERExSensitivity(dataDIR)
# get noiseless magnitudes for ATM model corrected with chosen Bus-DeMeo class
SEDfile = ATMDIR + fName + str(rAU) + '.dat'
magTrue = getATMBusSED(SEDfile, wavSPH, tax, Dast, rAU, dataDIR)
### add photometric noise
# since asteroids move, we need to shift m5 by 0.75 mag
# because we don't have 4 surveys as for static sources
m5SPH = m5static - 0.75 ## IMPORTANT !!!
# corresponding photometric errors
magErr = getPhotomErrors(magTrue, m5SPH)
## obs magnitudes: true mags with added photometric noise and var. offset
# draw from a Gaussian distribution
magObs = magTrue + np.random.normal(0, magErr)
return wavSPH, magTrue, magObs, magErr
def getPhotomErrors(mag, m5):
## generate photometric errors (use eq.5 from ls.st/lop with gamma=0.039)
rgamma = 0.039
xval = np.power(10, 0.4*(mag-m5))
# random photometric error
err_rand = np.sqrt((0.04-rgamma)*xval + rgamma*xval*xval)
# add systematic error for SPHEREx photometric extractions (in mag)
sysErr = 0.01 ## somewhat arbitrary, but realistic
return np.sqrt(err_rand**2 + sysErr**2)
# similar to getSPHERExSED, but already has noise-free static model
def getObsSED(wavSPH, magTrue, mjd0, Amp, Per, sysErr, dataDIR, BrendanFile):
# Brendan's file with SPHEREx observations
mjdObs, waveObs, m5Obs = getBrendanSpectrum(dataDIR, BrendanFile)
# interpolate true mags to observed wavelengths
magTrueObs = np.interp(waveObs, wavSPH, magTrue)
## photometric errors
magErr = getPhotomErrors(magTrueObs, m5Obs)
# draw from a Gaussian distribution
dmNoise = np.random.normal(0, magErr)
## generate light curve offsets
dmOff = Amp*np.sin(2*np.pi*(mjdObs-mjd0)/Per)
# raw magnitudes: true mags with added variability and photometric noise
magRaw = magTrueObs + dmNoise + dmOff
return waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise
def getSPHERExSensitivity(dataDIR):
# read data from Olivier Dore's Point_Source_Sensitivity_v28_base_cbe.txt file
# wavelengthSPHEREx (in micron) is the first column
# m5SPHEREx (AB magnitude) is the second column
dataFile = dataDIR + 'Point_Source_Sensitivity_v28_base_cbe.txt'
SPHERExSensitivity = np.loadtxt(dataFile)
return SPHERExSensitivity.T[0], SPHERExSensitivity.T[1]
def getATMBusSED(SEDfile, waveSPH, BusTaxi, Dast, rAU, BusDIR):
# given ATM model, scale by Dast and interpolate to waveSPH
magTrue, eps, alb = getATMmodelMag(SEDfile, Dast, waveSPH)
# if requested, correct with Bus-DeMeo reflectivity curve
if (BusTaxi!=''):
# read reflectivity curve
file = BusDIR + "/" + "reflectivity" + BusTaxi + ".dat"
refldata = np.loadtxt(file, skiprows=1)
waveSPHrefl, reflectivity = refldata.T[0], refldata.T[1]
print('read in', file)
if (waveSPHrefl.size != waveSPH.size):
print('ERROR: different standard SPHEREx wavelength grids!')
# assumption is that emission is negligible below this wavelength
wavMinEm = 2.0 # micron, OK outside Earth's orbit
# compute and apply correction
magTrue += getBusAKARIMagCorr(waveSPH, reflectivity, magTrue, wavMinEm)
return magTrue
# no-noise version
def getATMmodelMag(SEDfile, Dast, waveSPH):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) correct flux from the fiducial D=1km to Dast
# 3) given input wavelength array, compute AB magnitudes
# 4) return true AB magnitudes, epsilon, and albedo interpolated to waveSPH values
# 1) read data
|
def getBusAKARIMagCorr(wave, refl, magTot, wavMax):
## compute additive correction to magTot because of a different
## reflectivity curve affecting the scattered flux; the correction
## vanishes at the first wavelength in the SPHEREx standard grid
refl0 = refl/refl[0]
# part 1: emission negligible at short wavelengths
dmag1 = -2.5*np.log10(refl0)
# part 2: extrapolate the scattered component from short wavelengths
# compute fraction of the total flux due to scattered component
magTotATwavMax = np.interp(wavMax, wave, magTot)
ftotATwavMax = 10**(0.4*(magTot-magTotATwavMax))
# and extrapolate as Rayleigh-Jeans tail
fCorr = 1 - (1-refl0) * ftotATwavMax * (wavMax/wave)**2
dmag2 = -2.5*np.log10(fCorr)
return np.where(wave < wavMax, dmag1, dmag2)
# read MJD and wavelength from Brendan's file, and regrid wavelengths
# to the standard wavelength; return MJD and corresponding standard
# wavelength and 5-sigma SPHEREx depth
def getBrendanSpectrum(dataDIR, dataFile, singleSurvey=True):
b = np.loadtxt(dataFile, skiprows=1)
mjdBrendan = b.T[0]
wavBrendan = b.T[1]
waveSPH, m5SPH = getSPHERExSensitivity(dataDIR)
wavBrendanSPH = getStandardLambda(wavBrendan, waveSPH)
m5BrendanSPH = np.interp(wavBrendanSPH, waveSPH, m5SPH)
if (singleSurvey):
# since asteroids move, we need to increase errors by sqrt(4)
# because we don't have 4 surveys as for static sources, or
# the 5-sigma limiting depth is shallower by ~0.75 mag
m5BrendanSPH -= 1.25*np.log10(4)
return mjdBrendan, wavBrendanSPH, m5BrendanSPH
# wrapper around getOrbitInfoFromBrendanSpectrum since wav is not needed
def getOrbitInfoFromBrendansMJDs(mjd):
return getOrbitInfoFromBrendanSpectrum(mjd, mjd)
# given BrendanSpectrum (mjd, wavelength), for each SPHEREx season/survey
# (separated by >100d), find for all its orbits (<0.05d) how many pairs of
# fluxes per orbit; return as (j=0..Nseason; k=0..Norbits)
# NoPairs[j,k], MJDmin[j,k], MJDmax[j,k]
def getOrbitInfoFromBrendanSpectrum(mjd,wav):
Norbits = []
NoPairs = []
MJDmin = []
MJDmax = []
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[0])
k = 1
Nobs = 0
for i in range(0,len(mjd)):
Nobs += 1
dt = mjd[i] - mjd[i-1]
if (dt>0.05):
# new orbit...
Mmax.append(mjd[i-1])
nps.append(int(k/2))
k = 1
if (dt>100):
# and also a new season
MJDmin.append(Mmin)
MJDmax.append(Mmax)
NoPairs.append(nps)
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[i])
else:
# not new orbit, simply count the point
k += 1
if (i == (len(mjd)-1)):
# special case of the last point
Mmax.append(mjd[i])
MJDmin.append(Mmin)
MJDmax.append(Mmax)
nps.append(int(k/2))
NoPairs.append(nps)
return NoPairs, MJDmin, MJDmax
def getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=False):
Nseasons = len(MJDmin)
Nobs = 0
for i in range(0, Nseasons):
Norbits=len(MJDmin[i])
Nobs += 2*np.sum(NoPairs[i])
dt = []
if verbose:
for j in range(0,len(NoPairs[i])):
dMJD = int(60*24*(MJDmax[i][j] - MJDmin[i][j]))
dt.append(dMJD)
print('season', i, ' Norb:', Norbits, ' Nobs=', Nobs)
print(' NoPairs=', NoPairs[i])
print(' dt=', dt)
print('No. of observations:', Nobs)
return Nobs
## select observations from a single season (zero-indexed!)
def selectSeasonSED(season, waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise):
NoPairs, MJDmin, MJDmax = getOrbitInfoFromBrendansMJDs(mjdObs)
Nseasons = len(MJDmin)
if (season > Nseasons):
print('there are only', Nseasons,' seasons, not', season)
return
Norbits=len(MJDmin[season])
mjdMinVal = MJDmin[season][0]
mjdMaxVal = MJDmax[season][Norbits-1]
wS = waveObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mjdS = mjdObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mRawS = magRaw[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mErrS = magErr[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmOffS = dmOff[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmNoiseS = dmNoise[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
return wS, mjdS, mRawS, mErrS, dmOffS, dmNoiseS
# given lambda from Brendon's file, return lambdaGrid which are
# the closest values in the standard SPHEREx wavelength grid
def getStandardLambda(waveBrandon, waveSPHEREx):
lambdaGrid = 0*waveBrandon
for i in range(0,len(waveBrandon)):
delLambda = np.abs(waveSPHEREx - waveBrandon[i])
lambdaGrid[i] = getClosest(delLambda, waveSPHEREx)[0][1]
return lambdaGrid
def getClosest(list1, list2):
zipped_pairs = zip(list1, list2)
return sorted(zipped_pairs)
def dumpSPHERExSED(MJD, wavelength, mag, magErr, dmVarOff, randNoise, filename):
np.savetxt(filename, (MJD, wavelength, mag, magErr, dmVarOff, randNoise))
return
def simSPHERExSpec(Dast, rAU, SEDfile, dataDIR, BusTaxi, LC, obsFile, ABrange='', outfilerootname=''):
## set defaults
if (ABrange==''):
ABrange = [15.0, 20.0]
if (outfilerootname==''):
outfilerootname = './simSPHERExSpecDefault'
ABmin, ABmax = ABrange
destfiles = []
## SPHEREx standard wavelengths and expected m5 depths for static sources
wavSPH, m5static = getSPHERExSensitivity(dataDIR)
## noise-free SED computed by ATM and corrected for the Bus emissivity
magTrue = getATMBusSED(SEDfile, wavSPH, BusTaxi[0], Dast, rAU, dataDIR)
## generate light-curve offsets and noise, and produce "raw" SPHEREx spectrum
# light curve parameters
mjd0 = LC['mjd0'] # arbitrary mjd for phase=0
Amp = LC['Ampl'] # sysErr = LC[3]
Per = LC['Period'] # period in days
sysErr = LC['sysErr'] # additional systematic photometric error for SPHEREx moving objects
# and now add photometric noise and variability offsets
wavObs, mjdObs, magRaw, magErr, dmOff, dmN = getObsSED(wavSPH, magTrue, mjd0, Amp, Per, sysErr, dataDIR, obsFile)
## now analyze and plot all seasons separately
# first get seasonal and orbital information
NoPairs, MJDmin, MJDmax = getOrbitInfoFromBrendansMJDs(mjdObs)
Nobs = getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=0)
for season in range(0, len(MJDmin)):
print('Season', season)
wS, mjdS, mRawS, mErrS, dmOffS, dmNoiseS = selectSeasonSED(season, wavObs, mjdObs, magRaw, magErr, dmOff, dmN)
pt.plotSEDerrors(wS, mRawS, mErrS, ABmin, ABmax, True, wavSPH, magTrue)
## save simulated raw SPHEREx SEDs
# first data
outfile = outfilerootname + '_rawSED_Season' + str(season) + '.dat'
dumpSPHERExSED(mjdS, wS, mRawS, mErrS, dmOffS, dmNoiseS, outfile)
# and then plots
destfile = outfilerootname + '_rawSED_Season' + str(season) + '.png'
cmdstr = 'mv oneSEDerrors.png ' + destfile
destfiles.append(destfile)
os.system(cmdstr)
print('produced plots (shown above):')
for i in range(0,len(destfiles)):
print(' ', destfiles[i])
print(' and corresponding data files that have extension dat instead of png')
print(' ')
print(' Each data file lists, for a single SPHEREx season, the following quantities:')
print(' MJD wavelength magSPHEREx magUncertainty varOffset randomNoise')
print(' the last two entries are added for convenience:')
print(' the input noiseless model can be obtained as magTrue = mag - varOffset - randomNoise')
return
| wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# 2) correct for Dast and translate flux to AB mags
magAB = getABmag(wavelength, flux*Dast**2)
# 3) interpolate magAB, epsilon and albedo to waveSPH
SPHmagAB = np.interp(waveSPH, 1.0e6*wavelength, magAB)
SPHeps = np.interp(waveSPH, 1.0e6*wavelength, epsilon)
SPHalb = np.interp(waveSPH, 1.0e6*wavelength, albedo)
return SPHmagAB, SPHeps, SPHalb | identifier_body |
analysisTools.py | import numpy as np
import os
# importing local tools:
import plottingTools as pt
# read ATM SED file
def getSED(SEDfile):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) return wavelength in micron, flux (F_lambda in W/m2/micron), emissivity and albedo
wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# translate flux to AB mags
Dast = 10
magAB = getABmag(wavelength, flux*Dast**2)
return 1.0e6*wavelength, magAB, epsilon, albedo
# flux to AB mag
def getABmag(wave, Flambda):
# given wavelength and Flambda in SI units, return AB magnitudes
c = 2.998e8
Fnu = wave**2 / c * Flambda
return -2.5*np.log10(Fnu*1.0e26/3631)
def getSEDwithNoise(dataDIR, ATMDIR, Dast, rAU, fName, tax):
# standard wavelengths and expected m5 depths
wavSPH, m5static = getSPHERExSensitivity(dataDIR)
# get noiseless magnitudes for ATM model corrected with chosen Bus-DeMeo class
SEDfile = ATMDIR + fName + str(rAU) + '.dat'
magTrue = getATMBusSED(SEDfile, wavSPH, tax, Dast, rAU, dataDIR)
### add photometric noise
# since asteroids move, we need to shift m5 by 0.75 mag
# because we don't have 4 surveys as for static sources
m5SPH = m5static - 0.75 ## IMPORTANT !!!
# corresponding photometric errors
magErr = getPhotomErrors(magTrue, m5SPH)
## obs magnitudes: true mags with added photometric noise and var. offset
# draw from a Gaussian distribution
magObs = magTrue + np.random.normal(0, magErr)
return wavSPH, magTrue, magObs, magErr
def getPhotomErrors(mag, m5):
## generate photometric errors (use eq.5 from ls.st/lop with gamma=0.039)
rgamma = 0.039
xval = np.power(10, 0.4*(mag-m5))
# random photometric error
err_rand = np.sqrt((0.04-rgamma)*xval + rgamma*xval*xval)
# add systematic error for SPHEREx photometric extractions (in mag)
sysErr = 0.01 ## somewhat arbitrary, but realistic
return np.sqrt(err_rand**2 + sysErr**2)
# similar to getSPHERExSED, but already has noise-free static model
def getObsSED(wavSPH, magTrue, mjd0, Amp, Per, sysErr, dataDIR, BrendanFile):
# Brendan's file with SPHEREx observations
mjdObs, waveObs, m5Obs = getBrendanSpectrum(dataDIR, BrendanFile)
# interpolate true mags to observed wavelengths
magTrueObs = np.interp(waveObs, wavSPH, magTrue)
## photometric errors
magErr = getPhotomErrors(magTrueObs, m5Obs)
# draw from a Gaussian distribution
dmNoise = np.random.normal(0, magErr)
## generate light curve offsets
dmOff = Amp*np.sin(2*np.pi*(mjdObs-mjd0)/Per)
# raw magnitudes: true mags with added variability and photometric noise
magRaw = magTrueObs + dmNoise + dmOff
return waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise
def getSPHERExSensitivity(dataDIR):
# read data from Olivier Dore's Point_Source_Sensitivity_v28_base_cbe.txt file
# wavelengthSPHEREx (in micron) is the first column
# m5SPHEREx (AB magnitude) is the second column
dataFile = dataDIR + 'Point_Source_Sensitivity_v28_base_cbe.txt'
SPHERExSensitivity = np.loadtxt(dataFile)
return SPHERExSensitivity.T[0], SPHERExSensitivity.T[1]
def getATMBusSED(SEDfile, waveSPH, BusTaxi, Dast, rAU, BusDIR):
# given ATM model, scale by Dast and interpolate to waveSPH
magTrue, eps, alb = getATMmodelMag(SEDfile, Dast, waveSPH)
# if requested, correct with Bus-DeMeo reflectivity curve
if (BusTaxi!=''):
# read reflectivity curve
file = BusDIR + "/" + "reflectivity" + BusTaxi + ".dat"
refldata = np.loadtxt(file, skiprows=1)
waveSPHrefl, reflectivity = refldata.T[0], refldata.T[1]
print('read in', file)
if (waveSPHrefl.size != waveSPH.size):
print('ERROR: different standard SPHEREx wavelength grids!')
# assumption is that emission is negligible below this wavelength
wavMinEm = 2.0 # micron, OK outside Earth's orbit
# compute and apply correction
magTrue += getBusAKARIMagCorr(waveSPH, reflectivity, magTrue, wavMinEm)
return magTrue
# no-noise version
def getATMmodelMag(SEDfile, Dast, waveSPH):
# 1) read wavelength (in m), flux (F_lambda in W/m2/m), emissivity and albedo
# 2) correct flux from the fiducial D=1km to Dast
# 3) given input wavelength array, compute AB magnitudes
# 4) return true AB magnitudes, epsilon, and albedo interpolated to waveSPH values
# 1) read data
wavelength, flux, epsilon, albedo = np.loadtxt(SEDfile)
# 2) correct for Dast and translate flux to AB mags
magAB = getABmag(wavelength, flux*Dast**2)
# 3) interpolate magAB, epsilon and albedo to waveSPH
SPHmagAB = np.interp(waveSPH, 1.0e6*wavelength, magAB)
SPHeps = np.interp(waveSPH, 1.0e6*wavelength, epsilon)
SPHalb = np.interp(waveSPH, 1.0e6*wavelength, albedo)
return SPHmagAB, SPHeps, SPHalb
def getBusAKARIMagCorr(wave, refl, magTot, wavMax):
## compute additive correction to magTot because of a different
## reflectivity curve affecting the scattered flux; the correction
## vanishes at the first wavelength in the SPHEREx standard grid
refl0 = refl/refl[0]
# part 1: emission negligible at short wavelengths
dmag1 = -2.5*np.log10(refl0)
# part 2: extrapolate the scattered component from short wavelengths
# compute fraction of the total flux due to scattered component
magTotATwavMax = np.interp(wavMax, wave, magTot)
ftotATwavMax = 10**(0.4*(magTot-magTotATwavMax))
# and extrapolate as Rayleigh-Jeans tail
fCorr = 1 - (1-refl0) * ftotATwavMax * (wavMax/wave)**2
dmag2 = -2.5*np.log10(fCorr)
return np.where(wave < wavMax, dmag1, dmag2)
# read MJD and wavelength from Brendan's file, and regrid wavelengths
# to the standard wavelength; return MJD and corresponding standard
# wavelength and 5-sigma SPHEREx depth
def getBrendanSpectrum(dataDIR, dataFile, singleSurvey=True):
b = np.loadtxt(dataFile, skiprows=1)
mjdBrendan = b.T[0]
wavBrendan = b.T[1]
waveSPH, m5SPH = getSPHERExSensitivity(dataDIR)
wavBrendanSPH = getStandardLambda(wavBrendan, waveSPH)
m5BrendanSPH = np.interp(wavBrendanSPH, waveSPH, m5SPH)
if (singleSurvey):
# since asteroids move, we need to increase errors by sqrt(4)
# because we don't have 4 surveys as for static sources, or
# the 5-sigma limiting depth is shallower by ~0.75 mag
m5BrendanSPH -= 1.25*np.log10(4)
return mjdBrendan, wavBrendanSPH, m5BrendanSPH
# wrapper around getOrbitInfoFromBrendanSpectrum since wav is not needed
def getOrbitInfoFromBrendansMJDs(mjd):
return getOrbitInfoFromBrendanSpectrum(mjd, mjd)
# given BrendanSpectrum (mjd, wavelength), for each SPHEREx season/survey
# (separated by >100d), find for all its orbits (<0.05d) how many pairs of
# fluxes per orbit; return as (j=0..Nseason; k=0..Norbits)
# NoPairs[j,k], MJDmin[j,k], MJDmax[j,k]
def getOrbitInfoFromBrendanSpectrum(mjd,wav):
Norbits = []
NoPairs = []
MJDmin = []
MJDmax = []
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[0])
k = 1
Nobs = 0
for i in range(0,len(mjd)):
Nobs += 1
dt = mjd[i] - mjd[i-1]
if (dt>0.05):
# new orbit...
Mmax.append(mjd[i-1])
nps.append(int(k/2))
k = 1
if (dt>100):
# and also a new season
MJDmin.append(Mmin)
MJDmax.append(Mmax)
NoPairs.append(nps)
Mmin = []
Mmax = []
nps = []
Mmin.append(mjd[i])
else:
# not new orbit, simply count the point
k += 1
if (i == (len(mjd)-1)):
# special case of the last point
Mmax.append(mjd[i])
MJDmin.append(Mmin)
MJDmax.append(Mmax)
nps.append(int(k/2))
NoPairs.append(nps)
return NoPairs, MJDmin, MJDmax
def getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=False):
Nseasons = len(MJDmin)
Nobs = 0
for i in range(0, Nseasons):
Norbits=len(MJDmin[i])
Nobs += 2*np.sum(NoPairs[i])
dt = []
if verbose:
for j in range(0,len(NoPairs[i])):
dMJD = int(60*24*(MJDmax[i][j] - MJDmin[i][j]))
dt.append(dMJD)
print('season', i, ' Norb:', Norbits, ' Nobs=', Nobs)
print(' NoPairs=', NoPairs[i])
print(' dt=', dt)
print('No. of observations:', Nobs)
return Nobs
## select observations from a single season (zero-indexed!)
def selectSeasonSED(season, waveObs, mjdObs, magRaw, magErr, dmOff, dmNoise):
NoPairs, MJDmin, MJDmax = getOrbitInfoFromBrendansMJDs(mjdObs)
Nseasons = len(MJDmin)
if (season > Nseasons):
print('there are only', Nseasons,' seasons, not', season)
return
Norbits=len(MJDmin[season])
mjdMinVal = MJDmin[season][0]
mjdMaxVal = MJDmax[season][Norbits-1]
wS = waveObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mjdS = mjdObs[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mRawS = magRaw[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
mErrS = magErr[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmOffS = dmOff[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
dmNoiseS = dmNoise[(mjdObs>=mjdMinVal)&(mjdObs<=mjdMaxVal)]
return wS, mjdS, mRawS, mErrS, dmOffS, dmNoiseS
# given lambda from Brendon's file, return lambdaGrid which are
# the closest values in the standard SPHEREx wavelength grid
def getStandardLambda(waveBrandon, waveSPHEREx):
lambdaGrid = 0*waveBrandon
for i in range(0,len(waveBrandon)):
delLambda = np.abs(waveSPHEREx - waveBrandon[i])
lambdaGrid[i] = getClosest(delLambda, waveSPHEREx)[0][1]
return lambdaGrid
def | (list1, list2):
zipped_pairs = zip(list1, list2)
return sorted(zipped_pairs)
def dumpSPHERExSED(MJD, wavelength, mag, magErr, dmVarOff, randNoise, filename):
np.savetxt(filename, (MJD, wavelength, mag, magErr, dmVarOff, randNoise))
return
def simSPHERExSpec(Dast, rAU, SEDfile, dataDIR, BusTaxi, LC, obsFile, ABrange='', outfilerootname=''):
## set defaults
if (ABrange==''):
ABrange = [15.0, 20.0]
if (outfilerootname==''):
outfilerootname = './simSPHERExSpecDefault'
ABmin, ABmax = ABrange
destfiles = []
## SPHEREx standard wavelengths and expected m5 depths for static sources
wavSPH, m5static = getSPHERExSensitivity(dataDIR)
## noise-free SED computed by ATM and corrected for the Bus emissivity
magTrue = getATMBusSED(SEDfile, wavSPH, BusTaxi[0], Dast, rAU, dataDIR)
## generate light-curve offsets and noise, and produce "raw" SPHEREx spectrum
# light curve parameters
mjd0 = LC['mjd0'] # arbitrary mjd for phase=0
Amp = LC['Ampl'] # sysErr = LC[3]
Per = LC['Period'] # period in days
sysErr = LC['sysErr'] # additional systematic photometric error for SPHEREx moving objects
# and now add photometric noise and variability offsets
wavObs, mjdObs, magRaw, magErr, dmOff, dmN = getObsSED(wavSPH, magTrue, mjd0, Amp, Per, sysErr, dataDIR, obsFile)
## now analyze and plot all seasons separately
# first get seasonal and orbital information
NoPairs, MJDmin, MJDmax = getOrbitInfoFromBrendansMJDs(mjdObs)
Nobs = getSPHERExSeasons(NoPairs, MJDmin, MJDmax, verbose=0)
for season in range(0, len(MJDmin)):
print('Season', season)
wS, mjdS, mRawS, mErrS, dmOffS, dmNoiseS = selectSeasonSED(season, wavObs, mjdObs, magRaw, magErr, dmOff, dmN)
pt.plotSEDerrors(wS, mRawS, mErrS, ABmin, ABmax, True, wavSPH, magTrue)
## save simulated raw SPHEREx SEDs
# first data
outfile = outfilerootname + '_rawSED_Season' + str(season) + '.dat'
dumpSPHERExSED(mjdS, wS, mRawS, mErrS, dmOffS, dmNoiseS, outfile)
# and then plots
destfile = outfilerootname + '_rawSED_Season' + str(season) + '.png'
cmdstr = 'mv oneSEDerrors.png ' + destfile
destfiles.append(destfile)
os.system(cmdstr)
print('produced plots (shown above):')
for i in range(0,len(destfiles)):
print(' ', destfiles[i])
print(' and corresponding data files that have extension dat instead of png')
print(' ')
print(' Each data file lists, for a single SPHEREx season, the following quantities:')
print(' MJD wavelength magSPHEREx magUncertainty varOffset randomNoise')
print(' the last two entries are added for convenience:')
print(' the input noiseless model can be obtained as magTrue = mag - varOffset - randomNoise')
return
| getClosest | identifier_name |
timer.rs | //! The nRF51822 timer system operates off of the high frequency clock
//! (HFCLK) and provides three timers from the clock. Timer0 is tied
//! to the radio through some hard-coded peripheral linkages (e.g., there
//! are dedicated PPI connections between Timer0's compare events and
//! radio tasks, its capture tasks and radio events).
//!
//! This implementation provides a full-fledged Timer interface to
//! timers 0 and 2, and exposes Timer1 as an HIL Alarm, for a Tock
//! timer system. It may be that the Tock timer system should be ultimately
//! placed on top of the RTC (from the low frequency clock). It's currently
//! implemented this way as a demonstration that it can be and because
//! the full RTC/clock interface hasn't been finalized yet.
//!
//! This approach should be rewritten, such that the timer system uses
//! the RTC from the low frequency clock (lower power) and the scheduler
//! uses the high frequency clock.
//!
//! Author: Philip Levis <[email protected]>
//! Date: August 18, 2016
use chip;
use core::cell::Cell;
use core::mem;
use kernel::common::VolatileCell;
use kernel::hil;
use nvic;
use peripheral_interrupts::NvicIdx;
#[repr(C, packed)]
struct Registers {
pub task_start: VolatileCell<u32>,
pub task_stop: VolatileCell<u32>,
pub task_count: VolatileCell<u32>,
pub task_clear: VolatileCell<u32>,
pub task_shutdown: VolatileCell<u32>,
_reserved0: [VolatileCell<u32>; 11],
pub task_capture: [VolatileCell<u32>; 4], // 0x40
_reserved1: [VolatileCell<u32>; 60], // 0x140
pub event_compare: [VolatileCell<u32>; 4],
_reserved2: [VolatileCell<u32>; 44], // 0x150
pub shorts: VolatileCell<u32>, // 0x200
_reserved3: [VolatileCell<u32>; 64], // 0x204
pub intenset: VolatileCell<u32>, // 0x304
pub intenclr: VolatileCell<u32>, // 0x308
_reserved4: [VolatileCell<u32>; 126], // 0x30C
pub mode: VolatileCell<u32>, // 0x504
pub bitmode: VolatileCell<u32>, // 0x508
_reserved5: VolatileCell<u32>,
pub prescaler: VolatileCell<u32>, // 0x510
_reserved6: [VolatileCell<u32>; 11], // 0x514
pub cc: [VolatileCell<u32>; 4], // 0x540
}
const SIZE: usize = 0x1000;
const TIMER_BASE: usize = 0x40008000;
#[derive(Copy,Clone)]
pub enum Location {
TIMER0,
TIMER1,
TIMER2,
}
pub static mut TIMER0: Timer = Timer {
which: Location::TIMER0,
nvic: NvicIdx::TIMER0,
client: Cell::new(None),
};
pub static mut ALARM1: TimerAlarm = TimerAlarm {
which: Location::TIMER1,
nvic: NvicIdx::TIMER1,
client: Cell::new(None),
};
pub static mut TIMER2: Timer = Timer {
which: Location::TIMER2,
nvic: NvicIdx::TIMER2,
client: Cell::new(None),
};
#[allow(non_snake_case)]
fn TIMER(location: Location) -> &'static Registers {
let ptr = TIMER_BASE + (location as usize) * SIZE;
unsafe { mem::transmute(ptr) }
}
pub trait CompareClient {
/// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf).
fn compare(&self, bitmask: u8);
}
pub struct Timer {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static CompareClient>>,
}
impl Timer {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> Timer {
Timer {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn set_client(&self, client: &'static CompareClient) {
self.client.set(Some(client));
}
pub fn start(&self) {
self.timer().task_start.set(1);
}
// Stops the timer and keeps the value
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
// Stops the timer and clears the value
pub fn shutdown(&self) {
self.timer().task_shutdown.set(1);
}
// Clear the value
pub fn clear(&self) {
self.timer().task_clear.set(1);
}
/// Capture the current timer value into the CC register
/// specified by which, and return the value.
pub fn capture(&self, which: u8) -> u32 {
match which {
0 => {
self.timer().task_capture[0].set(1);
self.timer().cc[0].get()
}
1 => {
self.timer().task_capture[1].set(1);
self.timer().cc[1].get()
}
2 => {
self.timer().task_capture[2].set(1);
self.timer().cc[2].get()
}
_ => {
self.timer().task_capture[3].set(1);
self.timer().cc[3].get()
}
}
}
/// Capture the current value to the CC register specified by
/// which and do not return the value.
pub fn | (&self, which: u8) {
let _ = self.capture(which);
}
/// Shortcuts can automatically stop or clear the timer on a particular
/// compare event; refer to section 18.3 of the nRF reference manual
/// for details. Implementation currently provides shortcuts as the
/// raw bitmask.
pub fn get_shortcuts(&self) -> u32 {
self.timer().shorts.get()
}
pub fn set_shortcuts(&self, shortcut: u32) {
self.timer().shorts.set(shortcut);
}
pub fn get_cc0(&self) -> u32 {
self.timer().cc[0].get()
}
pub fn set_cc0(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc1(&self) -> u32 {
self.timer().cc[1].get()
}
pub fn set_cc1(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc2(&self) -> u32 {
self.timer().cc[2].get()
}
pub fn set_cc2(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc3(&self) -> u32 {
self.timer().cc[3].get()
}
pub fn set_cc3(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn enable_interrupts(&self, interrupts: u32) {
self.timer().intenset.set(interrupts << 16);
}
pub fn disable_interrupts(&self, interrupts: u32) {
self.timer().intenclr.set(interrupts << 16);
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn set_prescaler(&self, val: u8) {
// Only bottom 4 bits are valid, so mask them
// nRF51822 reference manual, page 102
self.timer().prescaler.set((val & 0xf) as u32);
}
pub fn get_prescaler(&self) -> u8 {
self.timer().prescaler.get() as u8
}
/// When an interrupt occurs, check if any of the 4 compares have
/// created an event, and if so, add it to the bitmask of triggered
/// events that is passed to the client.
pub fn handle_interrupt(&self) {
nvic::clear_pending(self.nvic);
self.client.get().map(|client| {
let mut val = 0;
// For each of 4 possible compare events, if it's happened,
// clear it and store its bit in val to pass in callback.
for i in 0..4 {
if self.timer().event_compare[i].get() != 0 {
val = val | 1 << i;
self.timer().event_compare[i].set(0);
self.disable_interrupts(1 << (i + 16));
}
}
client.compare(val as u8);
});
}
}
pub struct TimerAlarm {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static hil::time::Client>>,
}
// CC0 is used for capture
// CC1 is used for compare/interrupts
const ALARM_CAPTURE: usize = 0;
const ALARM_COMPARE: usize = 1;
const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE);
impl TimerAlarm {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm {
TimerAlarm {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn clear(&self) {
self.clear_alarm();
self.timer().task_clear.set(1);
}
pub fn clear_alarm(&self) {
self.timer().event_compare[ALARM_COMPARE].set(0);
self.disable_interrupts();
nvic::clear_pending(self.nvic);
}
pub fn set_client(&self, client: &'static hil::time::Client) {
self.client.set(Some(client));
}
pub fn start(&self) {
// Make timer 32 bits wide
self.timer().bitmode.set(3);
// Clock is 16MHz, so scale down by 2^10 to 16KHz
self.timer().prescaler.set(10);
self.timer().task_start.set(1);
}
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
#[inline(never)]
pub fn handle_interrupt(&self) {
self.clear_alarm();
self.client.get().map(|client| { client.fired(); });
}
// Enable and disable interrupts use the bottom 4 bits
// for the 4 compare interrupts. These functions shift
// those bits to the correct place in the register.
pub fn enable_interrupts(&self) {
self.timer().intenset.set(ALARM_INTERRUPT_BIT);
}
pub fn disable_interrupts(&self) {
self.timer().intenclr.set(ALARM_INTERRUPT_BIT);
}
pub fn interrupts_enabled(&self) -> bool {
self.timer().intenset.get() == (ALARM_INTERRUPT_BIT)
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn value(&self) -> u32 {
self.timer().task_capture[ALARM_CAPTURE].set(1);
self.timer().cc[ALARM_CAPTURE].get()
}
}
impl hil::time::Time for TimerAlarm {
fn disable(&self) {
self.disable_interrupts();
}
fn is_armed(&self) -> bool {
self.interrupts_enabled()
}
}
impl hil::time::Alarm for TimerAlarm {
type Frequency = hil::time::Freq16KHz;
fn now(&self) -> u32 {
self.value()
}
fn set_alarm(&self, tics: u32) {
self.disable_interrupts();
self.timer().cc[ALARM_COMPARE].set(tics);
self.clear_alarm();
self.enable_interrupts();
}
fn get_alarm(&self) -> u32 {
self.timer().cc[ALARM_COMPARE].get()
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER0_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER0);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER0);
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER1_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER1);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER1);
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER2_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER2);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER2);
}
| capture_to | identifier_name |
timer.rs | //! The nRF51822 timer system operates off of the high frequency clock
//! (HFCLK) and provides three timers from the clock. Timer0 is tied
//! to the radio through some hard-coded peripheral linkages (e.g., there
//! are dedicated PPI connections between Timer0's compare events and
//! radio tasks, its capture tasks and radio events).
//!
//! This implementation provides a full-fledged Timer interface to
//! timers 0 and 2, and exposes Timer1 as an HIL Alarm, for a Tock
//! timer system. It may be that the Tock timer system should be ultimately
//! placed on top of the RTC (from the low frequency clock). It's currently
//! implemented this way as a demonstration that it can be and because
//! the full RTC/clock interface hasn't been finalized yet.
//!
//! This approach should be rewritten, such that the timer system uses
//! the RTC from the low frequency clock (lower power) and the scheduler
//! uses the high frequency clock.
//!
//! Author: Philip Levis <[email protected]>
//! Date: August 18, 2016
use chip;
use core::cell::Cell;
use core::mem;
use kernel::common::VolatileCell;
use kernel::hil;
use nvic;
use peripheral_interrupts::NvicIdx;
#[repr(C, packed)]
struct Registers {
pub task_start: VolatileCell<u32>,
pub task_stop: VolatileCell<u32>,
pub task_count: VolatileCell<u32>,
pub task_clear: VolatileCell<u32>,
pub task_shutdown: VolatileCell<u32>,
_reserved0: [VolatileCell<u32>; 11],
pub task_capture: [VolatileCell<u32>; 4], // 0x40
_reserved1: [VolatileCell<u32>; 60], // 0x140
pub event_compare: [VolatileCell<u32>; 4],
_reserved2: [VolatileCell<u32>; 44], // 0x150
pub shorts: VolatileCell<u32>, // 0x200
_reserved3: [VolatileCell<u32>; 64], // 0x204
pub intenset: VolatileCell<u32>, // 0x304
pub intenclr: VolatileCell<u32>, // 0x308
_reserved4: [VolatileCell<u32>; 126], // 0x30C
pub mode: VolatileCell<u32>, // 0x504
pub bitmode: VolatileCell<u32>, // 0x508
_reserved5: VolatileCell<u32>,
pub prescaler: VolatileCell<u32>, // 0x510
_reserved6: [VolatileCell<u32>; 11], // 0x514
pub cc: [VolatileCell<u32>; 4], // 0x540
}
const SIZE: usize = 0x1000;
const TIMER_BASE: usize = 0x40008000;
#[derive(Copy,Clone)]
pub enum Location {
TIMER0,
TIMER1,
TIMER2,
}
pub static mut TIMER0: Timer = Timer {
which: Location::TIMER0,
nvic: NvicIdx::TIMER0,
client: Cell::new(None),
};
pub static mut ALARM1: TimerAlarm = TimerAlarm {
which: Location::TIMER1,
nvic: NvicIdx::TIMER1,
client: Cell::new(None),
};
pub static mut TIMER2: Timer = Timer {
which: Location::TIMER2,
nvic: NvicIdx::TIMER2,
client: Cell::new(None),
};
#[allow(non_snake_case)]
fn TIMER(location: Location) -> &'static Registers {
let ptr = TIMER_BASE + (location as usize) * SIZE;
unsafe { mem::transmute(ptr) }
}
pub trait CompareClient {
/// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf).
fn compare(&self, bitmask: u8);
}
pub struct Timer {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static CompareClient>>,
}
impl Timer {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> Timer {
Timer {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn set_client(&self, client: &'static CompareClient) {
self.client.set(Some(client));
}
pub fn start(&self) {
self.timer().task_start.set(1);
}
// Stops the timer and keeps the value
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
// Stops the timer and clears the value
pub fn shutdown(&self) {
self.timer().task_shutdown.set(1);
}
// Clear the value
pub fn clear(&self) {
self.timer().task_clear.set(1);
}
/// Capture the current timer value into the CC register
/// specified by which, and return the value.
pub fn capture(&self, which: u8) -> u32 {
match which {
0 => {
self.timer().task_capture[0].set(1);
self.timer().cc[0].get()
}
1 => {
self.timer().task_capture[1].set(1);
self.timer().cc[1].get()
}
2 => |
_ => {
self.timer().task_capture[3].set(1);
self.timer().cc[3].get()
}
}
}
/// Capture the current value to the CC register specified by
/// which and do not return the value.
pub fn capture_to(&self, which: u8) {
let _ = self.capture(which);
}
/// Shortcuts can automatically stop or clear the timer on a particular
/// compare event; refer to section 18.3 of the nRF reference manual
/// for details. Implementation currently provides shortcuts as the
/// raw bitmask.
pub fn get_shortcuts(&self) -> u32 {
self.timer().shorts.get()
}
pub fn set_shortcuts(&self, shortcut: u32) {
self.timer().shorts.set(shortcut);
}
pub fn get_cc0(&self) -> u32 {
self.timer().cc[0].get()
}
pub fn set_cc0(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc1(&self) -> u32 {
self.timer().cc[1].get()
}
pub fn set_cc1(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc2(&self) -> u32 {
self.timer().cc[2].get()
}
pub fn set_cc2(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc3(&self) -> u32 {
self.timer().cc[3].get()
}
pub fn set_cc3(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn enable_interrupts(&self, interrupts: u32) {
self.timer().intenset.set(interrupts << 16);
}
pub fn disable_interrupts(&self, interrupts: u32) {
self.timer().intenclr.set(interrupts << 16);
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn set_prescaler(&self, val: u8) {
// Only bottom 4 bits are valid, so mask them
// nRF51822 reference manual, page 102
self.timer().prescaler.set((val & 0xf) as u32);
}
pub fn get_prescaler(&self) -> u8 {
self.timer().prescaler.get() as u8
}
/// When an interrupt occurs, check if any of the 4 compares have
/// created an event, and if so, add it to the bitmask of triggered
/// events that is passed to the client.
pub fn handle_interrupt(&self) {
nvic::clear_pending(self.nvic);
self.client.get().map(|client| {
let mut val = 0;
// For each of 4 possible compare events, if it's happened,
// clear it and store its bit in val to pass in callback.
for i in 0..4 {
if self.timer().event_compare[i].get() != 0 {
val = val | 1 << i;
self.timer().event_compare[i].set(0);
self.disable_interrupts(1 << (i + 16));
}
}
client.compare(val as u8);
});
}
}
pub struct TimerAlarm {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static hil::time::Client>>,
}
// CC0 is used for capture
// CC1 is used for compare/interrupts
const ALARM_CAPTURE: usize = 0;
const ALARM_COMPARE: usize = 1;
const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE);
impl TimerAlarm {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm {
TimerAlarm {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn clear(&self) {
self.clear_alarm();
self.timer().task_clear.set(1);
}
pub fn clear_alarm(&self) {
self.timer().event_compare[ALARM_COMPARE].set(0);
self.disable_interrupts();
nvic::clear_pending(self.nvic);
}
pub fn set_client(&self, client: &'static hil::time::Client) {
self.client.set(Some(client));
}
pub fn start(&self) {
// Make timer 32 bits wide
self.timer().bitmode.set(3);
// Clock is 16MHz, so scale down by 2^10 to 16KHz
self.timer().prescaler.set(10);
self.timer().task_start.set(1);
}
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
#[inline(never)]
pub fn handle_interrupt(&self) {
self.clear_alarm();
self.client.get().map(|client| { client.fired(); });
}
// Enable and disable interrupts use the bottom 4 bits
// for the 4 compare interrupts. These functions shift
// those bits to the correct place in the register.
pub fn enable_interrupts(&self) {
self.timer().intenset.set(ALARM_INTERRUPT_BIT);
}
pub fn disable_interrupts(&self) {
self.timer().intenclr.set(ALARM_INTERRUPT_BIT);
}
pub fn interrupts_enabled(&self) -> bool {
self.timer().intenset.get() == (ALARM_INTERRUPT_BIT)
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn value(&self) -> u32 {
self.timer().task_capture[ALARM_CAPTURE].set(1);
self.timer().cc[ALARM_CAPTURE].get()
}
}
impl hil::time::Time for TimerAlarm {
fn disable(&self) {
self.disable_interrupts();
}
fn is_armed(&self) -> bool {
self.interrupts_enabled()
}
}
impl hil::time::Alarm for TimerAlarm {
type Frequency = hil::time::Freq16KHz;
fn now(&self) -> u32 {
self.value()
}
fn set_alarm(&self, tics: u32) {
self.disable_interrupts();
self.timer().cc[ALARM_COMPARE].set(tics);
self.clear_alarm();
self.enable_interrupts();
}
fn get_alarm(&self) -> u32 {
self.timer().cc[ALARM_COMPARE].get()
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER0_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER0);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER0);
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER1_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER1);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER1);
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER2_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER2);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER2);
}
| {
self.timer().task_capture[2].set(1);
self.timer().cc[2].get()
} | conditional_block |
timer.rs | //! The nRF51822 timer system operates off of the high frequency clock
//! (HFCLK) and provides three timers from the clock. Timer0 is tied
//! to the radio through some hard-coded peripheral linkages (e.g., there
//! are dedicated PPI connections between Timer0's compare events and
//! radio tasks, its capture tasks and radio events).
//!
//! This implementation provides a full-fledged Timer interface to
//! timers 0 and 2, and exposes Timer1 as an HIL Alarm, for a Tock
//! timer system. It may be that the Tock timer system should be ultimately
//! placed on top of the RTC (from the low frequency clock). It's currently
//! implemented this way as a demonstration that it can be and because
//! the full RTC/clock interface hasn't been finalized yet.
//!
//! This approach should be rewritten, such that the timer system uses
//! the RTC from the low frequency clock (lower power) and the scheduler
//! uses the high frequency clock.
//!
//! Author: Philip Levis <[email protected]>
//! Date: August 18, 2016
use chip;
use core::cell::Cell;
use core::mem;
use kernel::common::VolatileCell;
use kernel::hil;
use nvic;
use peripheral_interrupts::NvicIdx;
#[repr(C, packed)]
struct Registers {
pub task_start: VolatileCell<u32>,
pub task_stop: VolatileCell<u32>,
pub task_count: VolatileCell<u32>,
pub task_clear: VolatileCell<u32>,
pub task_shutdown: VolatileCell<u32>,
_reserved0: [VolatileCell<u32>; 11],
pub task_capture: [VolatileCell<u32>; 4], // 0x40
_reserved1: [VolatileCell<u32>; 60], // 0x140
pub event_compare: [VolatileCell<u32>; 4],
_reserved2: [VolatileCell<u32>; 44], // 0x150
pub shorts: VolatileCell<u32>, // 0x200
_reserved3: [VolatileCell<u32>; 64], // 0x204
pub intenset: VolatileCell<u32>, // 0x304
pub intenclr: VolatileCell<u32>, // 0x308
_reserved4: [VolatileCell<u32>; 126], // 0x30C
pub mode: VolatileCell<u32>, // 0x504
pub bitmode: VolatileCell<u32>, // 0x508
_reserved5: VolatileCell<u32>,
pub prescaler: VolatileCell<u32>, // 0x510
_reserved6: [VolatileCell<u32>; 11], // 0x514
pub cc: [VolatileCell<u32>; 4], // 0x540
}
const SIZE: usize = 0x1000;
const TIMER_BASE: usize = 0x40008000;
#[derive(Copy,Clone)]
pub enum Location {
TIMER0,
TIMER1,
TIMER2,
}
pub static mut TIMER0: Timer = Timer {
which: Location::TIMER0,
nvic: NvicIdx::TIMER0,
client: Cell::new(None),
};
pub static mut ALARM1: TimerAlarm = TimerAlarm {
which: Location::TIMER1,
nvic: NvicIdx::TIMER1,
client: Cell::new(None),
};
pub static mut TIMER2: Timer = Timer {
which: Location::TIMER2,
nvic: NvicIdx::TIMER2,
client: Cell::new(None),
};
#[allow(non_snake_case)]
fn TIMER(location: Location) -> &'static Registers {
let ptr = TIMER_BASE + (location as usize) * SIZE;
unsafe { mem::transmute(ptr) }
}
pub trait CompareClient {
/// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf).
fn compare(&self, bitmask: u8);
}
pub struct Timer {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static CompareClient>>,
}
impl Timer {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> Timer {
Timer {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn set_client(&self, client: &'static CompareClient) {
self.client.set(Some(client));
}
pub fn start(&self) {
self.timer().task_start.set(1);
}
// Stops the timer and keeps the value
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
// Stops the timer and clears the value
pub fn shutdown(&self) {
self.timer().task_shutdown.set(1);
}
// Clear the value
pub fn clear(&self) {
self.timer().task_clear.set(1);
}
/// Capture the current timer value into the CC register
/// specified by which, and return the value.
pub fn capture(&self, which: u8) -> u32 {
match which {
0 => {
self.timer().task_capture[0].set(1);
self.timer().cc[0].get()
}
1 => {
self.timer().task_capture[1].set(1);
self.timer().cc[1].get()
}
2 => {
self.timer().task_capture[2].set(1);
self.timer().cc[2].get()
}
_ => {
self.timer().task_capture[3].set(1);
self.timer().cc[3].get()
}
}
}
/// Capture the current value to the CC register specified by
/// which and do not return the value.
pub fn capture_to(&self, which: u8) {
let _ = self.capture(which);
}
/// Shortcuts can automatically stop or clear the timer on a particular
/// compare event; refer to section 18.3 of the nRF reference manual
/// for details. Implementation currently provides shortcuts as the
/// raw bitmask.
pub fn get_shortcuts(&self) -> u32 |
pub fn set_shortcuts(&self, shortcut: u32) {
self.timer().shorts.set(shortcut);
}
pub fn get_cc0(&self) -> u32 {
self.timer().cc[0].get()
}
pub fn set_cc0(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc1(&self) -> u32 {
self.timer().cc[1].get()
}
pub fn set_cc1(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc2(&self) -> u32 {
self.timer().cc[2].get()
}
pub fn set_cc2(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc3(&self) -> u32 {
self.timer().cc[3].get()
}
pub fn set_cc3(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn enable_interrupts(&self, interrupts: u32) {
self.timer().intenset.set(interrupts << 16);
}
pub fn disable_interrupts(&self, interrupts: u32) {
self.timer().intenclr.set(interrupts << 16);
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn set_prescaler(&self, val: u8) {
// Only bottom 4 bits are valid, so mask them
// nRF51822 reference manual, page 102
self.timer().prescaler.set((val & 0xf) as u32);
}
pub fn get_prescaler(&self) -> u8 {
self.timer().prescaler.get() as u8
}
/// When an interrupt occurs, check if any of the 4 compares have
/// created an event, and if so, add it to the bitmask of triggered
/// events that is passed to the client.
pub fn handle_interrupt(&self) {
nvic::clear_pending(self.nvic);
self.client.get().map(|client| {
let mut val = 0;
// For each of 4 possible compare events, if it's happened,
// clear it and store its bit in val to pass in callback.
for i in 0..4 {
if self.timer().event_compare[i].get() != 0 {
val = val | 1 << i;
self.timer().event_compare[i].set(0);
self.disable_interrupts(1 << (i + 16));
}
}
client.compare(val as u8);
});
}
}
pub struct TimerAlarm {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static hil::time::Client>>,
}
// CC0 is used for capture
// CC1 is used for compare/interrupts
const ALARM_CAPTURE: usize = 0;
const ALARM_COMPARE: usize = 1;
const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE);
impl TimerAlarm {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm {
TimerAlarm {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn clear(&self) {
self.clear_alarm();
self.timer().task_clear.set(1);
}
pub fn clear_alarm(&self) {
self.timer().event_compare[ALARM_COMPARE].set(0);
self.disable_interrupts();
nvic::clear_pending(self.nvic);
}
pub fn set_client(&self, client: &'static hil::time::Client) {
self.client.set(Some(client));
}
pub fn start(&self) {
// Make timer 32 bits wide
self.timer().bitmode.set(3);
// Clock is 16MHz, so scale down by 2^10 to 16KHz
self.timer().prescaler.set(10);
self.timer().task_start.set(1);
}
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
#[inline(never)]
pub fn handle_interrupt(&self) {
self.clear_alarm();
self.client.get().map(|client| { client.fired(); });
}
// Enable and disable interrupts use the bottom 4 bits
// for the 4 compare interrupts. These functions shift
// those bits to the correct place in the register.
pub fn enable_interrupts(&self) {
self.timer().intenset.set(ALARM_INTERRUPT_BIT);
}
pub fn disable_interrupts(&self) {
self.timer().intenclr.set(ALARM_INTERRUPT_BIT);
}
pub fn interrupts_enabled(&self) -> bool {
self.timer().intenset.get() == (ALARM_INTERRUPT_BIT)
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn value(&self) -> u32 {
self.timer().task_capture[ALARM_CAPTURE].set(1);
self.timer().cc[ALARM_CAPTURE].get()
}
}
impl hil::time::Time for TimerAlarm {
fn disable(&self) {
self.disable_interrupts();
}
fn is_armed(&self) -> bool {
self.interrupts_enabled()
}
}
impl hil::time::Alarm for TimerAlarm {
type Frequency = hil::time::Freq16KHz;
fn now(&self) -> u32 {
self.value()
}
fn set_alarm(&self, tics: u32) {
self.disable_interrupts();
self.timer().cc[ALARM_COMPARE].set(tics);
self.clear_alarm();
self.enable_interrupts();
}
fn get_alarm(&self) -> u32 {
self.timer().cc[ALARM_COMPARE].get()
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER0_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER0);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER0);
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER1_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER1);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER1);
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER2_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER2);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER2);
}
| {
self.timer().shorts.get()
} | identifier_body |
timer.rs | //! The nRF51822 timer system operates off of the high frequency clock
//! (HFCLK) and provides three timers from the clock. Timer0 is tied
//! to the radio through some hard-coded peripheral linkages (e.g., there
//! are dedicated PPI connections between Timer0's compare events and
//! radio tasks, its capture tasks and radio events).
//! | //! This implementation provides a full-fledged Timer interface to
//! timers 0 and 2, and exposes Timer1 as an HIL Alarm, for a Tock
//! timer system. It may be that the Tock timer system should be ultimately
//! placed on top of the RTC (from the low frequency clock). It's currently
//! implemented this way as a demonstration that it can be and because
//! the full RTC/clock interface hasn't been finalized yet.
//!
//! This approach should be rewritten, such that the timer system uses
//! the RTC from the low frequency clock (lower power) and the scheduler
//! uses the high frequency clock.
//!
//! Author: Philip Levis <[email protected]>
//! Date: August 18, 2016
use chip;
use core::cell::Cell;
use core::mem;
use kernel::common::VolatileCell;
use kernel::hil;
use nvic;
use peripheral_interrupts::NvicIdx;
#[repr(C, packed)]
struct Registers {
pub task_start: VolatileCell<u32>,
pub task_stop: VolatileCell<u32>,
pub task_count: VolatileCell<u32>,
pub task_clear: VolatileCell<u32>,
pub task_shutdown: VolatileCell<u32>,
_reserved0: [VolatileCell<u32>; 11],
pub task_capture: [VolatileCell<u32>; 4], // 0x40
_reserved1: [VolatileCell<u32>; 60], // 0x140
pub event_compare: [VolatileCell<u32>; 4],
_reserved2: [VolatileCell<u32>; 44], // 0x150
pub shorts: VolatileCell<u32>, // 0x200
_reserved3: [VolatileCell<u32>; 64], // 0x204
pub intenset: VolatileCell<u32>, // 0x304
pub intenclr: VolatileCell<u32>, // 0x308
_reserved4: [VolatileCell<u32>; 126], // 0x30C
pub mode: VolatileCell<u32>, // 0x504
pub bitmode: VolatileCell<u32>, // 0x508
_reserved5: VolatileCell<u32>,
pub prescaler: VolatileCell<u32>, // 0x510
_reserved6: [VolatileCell<u32>; 11], // 0x514
pub cc: [VolatileCell<u32>; 4], // 0x540
}
const SIZE: usize = 0x1000;
const TIMER_BASE: usize = 0x40008000;
#[derive(Copy,Clone)]
pub enum Location {
TIMER0,
TIMER1,
TIMER2,
}
pub static mut TIMER0: Timer = Timer {
which: Location::TIMER0,
nvic: NvicIdx::TIMER0,
client: Cell::new(None),
};
pub static mut ALARM1: TimerAlarm = TimerAlarm {
which: Location::TIMER1,
nvic: NvicIdx::TIMER1,
client: Cell::new(None),
};
pub static mut TIMER2: Timer = Timer {
which: Location::TIMER2,
nvic: NvicIdx::TIMER2,
client: Cell::new(None),
};
#[allow(non_snake_case)]
fn TIMER(location: Location) -> &'static Registers {
let ptr = TIMER_BASE + (location as usize) * SIZE;
unsafe { mem::transmute(ptr) }
}
pub trait CompareClient {
/// Passes a bitmask of which of the 4 compares/captures fired (0x0-0xf).
fn compare(&self, bitmask: u8);
}
pub struct Timer {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static CompareClient>>,
}
impl Timer {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> Timer {
Timer {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn set_client(&self, client: &'static CompareClient) {
self.client.set(Some(client));
}
pub fn start(&self) {
self.timer().task_start.set(1);
}
// Stops the timer and keeps the value
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
// Stops the timer and clears the value
pub fn shutdown(&self) {
self.timer().task_shutdown.set(1);
}
// Clear the value
pub fn clear(&self) {
self.timer().task_clear.set(1);
}
/// Capture the current timer value into the CC register
/// specified by which, and return the value.
pub fn capture(&self, which: u8) -> u32 {
match which {
0 => {
self.timer().task_capture[0].set(1);
self.timer().cc[0].get()
}
1 => {
self.timer().task_capture[1].set(1);
self.timer().cc[1].get()
}
2 => {
self.timer().task_capture[2].set(1);
self.timer().cc[2].get()
}
_ => {
self.timer().task_capture[3].set(1);
self.timer().cc[3].get()
}
}
}
/// Capture the current value to the CC register specified by
/// which and do not return the value.
pub fn capture_to(&self, which: u8) {
let _ = self.capture(which);
}
/// Shortcuts can automatically stop or clear the timer on a particular
/// compare event; refer to section 18.3 of the nRF reference manual
/// for details. Implementation currently provides shortcuts as the
/// raw bitmask.
pub fn get_shortcuts(&self) -> u32 {
self.timer().shorts.get()
}
pub fn set_shortcuts(&self, shortcut: u32) {
self.timer().shorts.set(shortcut);
}
pub fn get_cc0(&self) -> u32 {
self.timer().cc[0].get()
}
pub fn set_cc0(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc1(&self) -> u32 {
self.timer().cc[1].get()
}
pub fn set_cc1(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc2(&self) -> u32 {
self.timer().cc[2].get()
}
pub fn set_cc2(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn get_cc3(&self) -> u32 {
self.timer().cc[3].get()
}
pub fn set_cc3(&self, val: u32) {
self.timer().cc[0].set(val);
}
pub fn enable_interrupts(&self, interrupts: u32) {
self.timer().intenset.set(interrupts << 16);
}
pub fn disable_interrupts(&self, interrupts: u32) {
self.timer().intenclr.set(interrupts << 16);
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn set_prescaler(&self, val: u8) {
// Only bottom 4 bits are valid, so mask them
// nRF51822 reference manual, page 102
self.timer().prescaler.set((val & 0xf) as u32);
}
pub fn get_prescaler(&self) -> u8 {
self.timer().prescaler.get() as u8
}
/// When an interrupt occurs, check if any of the 4 compares have
/// created an event, and if so, add it to the bitmask of triggered
/// events that is passed to the client.
pub fn handle_interrupt(&self) {
nvic::clear_pending(self.nvic);
self.client.get().map(|client| {
let mut val = 0;
// For each of 4 possible compare events, if it's happened,
// clear it and store its bit in val to pass in callback.
for i in 0..4 {
if self.timer().event_compare[i].get() != 0 {
val = val | 1 << i;
self.timer().event_compare[i].set(0);
self.disable_interrupts(1 << (i + 16));
}
}
client.compare(val as u8);
});
}
}
pub struct TimerAlarm {
which: Location,
nvic: NvicIdx,
client: Cell<Option<&'static hil::time::Client>>,
}
// CC0 is used for capture
// CC1 is used for compare/interrupts
const ALARM_CAPTURE: usize = 0;
const ALARM_COMPARE: usize = 1;
const ALARM_INTERRUPT_BIT: u32 = 1 << (16 + ALARM_COMPARE);
impl TimerAlarm {
fn timer(&self) -> &'static Registers {
TIMER(self.which)
}
pub const fn new(location: Location, nvic: NvicIdx) -> TimerAlarm {
TimerAlarm {
which: location,
nvic: nvic,
client: Cell::new(None),
}
}
pub fn clear(&self) {
self.clear_alarm();
self.timer().task_clear.set(1);
}
pub fn clear_alarm(&self) {
self.timer().event_compare[ALARM_COMPARE].set(0);
self.disable_interrupts();
nvic::clear_pending(self.nvic);
}
pub fn set_client(&self, client: &'static hil::time::Client) {
self.client.set(Some(client));
}
pub fn start(&self) {
// Make timer 32 bits wide
self.timer().bitmode.set(3);
// Clock is 16MHz, so scale down by 2^10 to 16KHz
self.timer().prescaler.set(10);
self.timer().task_start.set(1);
}
pub fn stop(&self) {
self.timer().task_stop.set(1);
}
#[inline(never)]
pub fn handle_interrupt(&self) {
self.clear_alarm();
self.client.get().map(|client| { client.fired(); });
}
// Enable and disable interrupts use the bottom 4 bits
// for the 4 compare interrupts. These functions shift
// those bits to the correct place in the register.
pub fn enable_interrupts(&self) {
self.timer().intenset.set(ALARM_INTERRUPT_BIT);
}
pub fn disable_interrupts(&self) {
self.timer().intenclr.set(ALARM_INTERRUPT_BIT);
}
pub fn interrupts_enabled(&self) -> bool {
self.timer().intenset.get() == (ALARM_INTERRUPT_BIT)
}
pub fn enable_nvic(&self) {
nvic::enable(self.nvic);
}
pub fn disable_nvic(&self) {
nvic::disable(self.nvic);
}
pub fn value(&self) -> u32 {
self.timer().task_capture[ALARM_CAPTURE].set(1);
self.timer().cc[ALARM_CAPTURE].get()
}
}
impl hil::time::Time for TimerAlarm {
fn disable(&self) {
self.disable_interrupts();
}
fn is_armed(&self) -> bool {
self.interrupts_enabled()
}
}
impl hil::time::Alarm for TimerAlarm {
type Frequency = hil::time::Freq16KHz;
fn now(&self) -> u32 {
self.value()
}
fn set_alarm(&self, tics: u32) {
self.disable_interrupts();
self.timer().cc[ALARM_COMPARE].set(tics);
self.clear_alarm();
self.enable_interrupts();
}
fn get_alarm(&self) -> u32 {
self.timer().cc[ALARM_COMPARE].get()
}
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER0_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER0);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER0);
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER1_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER1);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER1);
}
#[no_mangle]
#[allow(non_snake_case)]
pub unsafe extern "C" fn TIMER2_Handler() {
use kernel::common::Queue;
nvic::disable(NvicIdx::TIMER2);
chip::INTERRUPT_QUEUE.as_mut().unwrap().enqueue(NvicIdx::TIMER2);
} | random_line_split |
|
session_data.rs | use super::{
configuration::{self, CoreConfig, SessionConfig},
core_data::{CoreData, CoreHandle},
};
use crate::cmd::dap_server::{
debug_adapter::{
dap::adapter::DebugAdapter, dap::dap_types::Source, protocol::ProtocolAdapter,
},
DebuggerError,
};
use anyhow::{anyhow, Result};
use probe_rs::{
config::TargetSelector,
debug::{debug_info::DebugInfo, SourceLocation},
CoreStatus, DebugProbeError, Permissions, Probe, ProbeCreationError, Session,
};
use std::env::set_current_dir;
use time::UtcOffset;
| /// A breakpoint was requested using an instruction address, and usually a result of a user requesting a
/// breakpoint while in a 'disassembly' view.
InstructionBreakpoint,
/// A breakpoint that has a Source, and usually a result of a user requesting a breakpoint while in a 'source' view.
SourceBreakpoint {
source: Source,
location: SourceLocationScope,
},
}
/// Breakpoint requests will either be refer to a specific SourceLcoation, or unspecified, in which case it will refer to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub(crate) struct ActiveBreakpoint {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
// `SessionConfig` Probe/Session level configurations initialization.
let mut target_probe = match config.probe_selector.clone() {
Some(selector) => Probe::open(selector.clone()).map_err(|e| match e {
DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => {
DebuggerError::Other(anyhow!(
"Could not find the probe_selector specified as {:04x}:{:04x}:{:?}",
selector.vendor_id,
selector.product_id,
selector.serial_number
))
}
other_error => DebuggerError::DebugProbe(other_error),
}),
None => {
// Only automatically select a probe if there is only a single probe detected.
let list = Probe::list_all();
if list.len() > 1 {
return Err(DebuggerError::Other(anyhow!(
"Found multiple ({}) probes",
list.len()
)));
}
if let Some(info) = list.first() {
Probe::open(info).map_err(DebuggerError::DebugProbe)
} else {
return Err(DebuggerError::Other(anyhow!(
"No probes found. Please check your USB connections."
)));
}
}
}?;
let target_selector = match &config.chip {
Some(identifier) => identifier.into(),
None => TargetSelector::Auto,
};
// Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed != speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len() != 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let target_memory_map = &self.session.target().memory_map.clone();
let timestamp_offset = self.timestamp_offset;
// Always set `all_cores_halted` to true, until one core is found to be running.
debug_adapter.all_cores_halted = true;
for core_config in session_config.core_configs.iter() {
let Ok(mut target_core) = self.attach_core(core_config.core_index) else {
tracing::debug!(
"Failed to attach to target core #{}. Cannot poll for RTT data.",
core_config.core_index
);
continue;
};
// We need to poll the core to determine its status.
let current_core_status = target_core.poll_core(debug_adapter).map_err(|error| {
let error = DebuggerError::ProbeRs(error);
let _ = debug_adapter.show_error_message(&error);
error
})?;
// If appropriate, check for RTT data.
if core_config.rtt_config.enabled {
if let Some(core_rtt) = &mut target_core.core_data.rtt_connection {
// We should poll the target for rtt data, and if any RTT data was processed, we clear the flag.
if core_rtt.process_rtt_data(debug_adapter, &mut target_core.core) {
suggest_delay_required = false;
}
} else if debug_adapter.configuration_is_done() {
// We have not yet reached the point in the target application where the RTT buffers are initialized,
// so, provided we have processed the MSDAP request for "configurationDone" , we should check again.
{
#[allow(clippy::unwrap_used)]
match target_core.attach_to_rtt(
debug_adapter,
target_memory_map,
core_config.program_binary.as_ref().unwrap(),
&core_config.rtt_config,
timestamp_offset,
) {
Ok(_) => {
// Nothing else to do.
}
Err(error) => {
debug_adapter
.show_error_message(&DebuggerError::Other(error))
.ok();
}
}
}
}
}
// If the core is running, we set the flag to indicate that at least one core is not halted.
// By setting it here, we ensure that RTT will be checked at least once after the core has halted.
if !current_core_status.is_halted() {
debug_adapter.all_cores_halted = false;
}
status_of_cores.push(current_core_status);
}
Ok((status_of_cores, suggest_delay_required))
}
}
pub(crate) fn debug_info_from_binary(
core_configuration: &CoreConfig,
) -> Result<DebugInfo, DebuggerError> {
let debug_info = if let Some(binary_path) = &core_configuration.program_binary {
DebugInfo::from_file(binary_path).map_err(|error| DebuggerError::Other(anyhow!(error)))?
} else {
return Err(anyhow!(
"Please provide a valid `program_binary` for debug core: {:?}",
core_configuration.core_index
)
.into());
};
Ok(debug_info)
} | /// The supported breakpoint types
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum BreakpointType { | random_line_split |
session_data.rs | use super::{
configuration::{self, CoreConfig, SessionConfig},
core_data::{CoreData, CoreHandle},
};
use crate::cmd::dap_server::{
debug_adapter::{
dap::adapter::DebugAdapter, dap::dap_types::Source, protocol::ProtocolAdapter,
},
DebuggerError,
};
use anyhow::{anyhow, Result};
use probe_rs::{
config::TargetSelector,
debug::{debug_info::DebugInfo, SourceLocation},
CoreStatus, DebugProbeError, Permissions, Probe, ProbeCreationError, Session,
};
use std::env::set_current_dir;
use time::UtcOffset;
/// The supported breakpoint types
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum BreakpointType {
/// A breakpoint was requested using an instruction address, and usually a result of a user requesting a
/// breakpoint while in a 'disassembly' view.
InstructionBreakpoint,
/// A breakpoint that has a Source, and usually a result of a user requesting a breakpoint while in a 'source' view.
SourceBreakpoint {
source: Source,
location: SourceLocationScope,
},
}
/// Breakpoint requests will either be refer to a specific SourceLcoation, or unspecified, in which case it will refer to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub(crate) struct ActiveBreakpoint {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
// `SessionConfig` Probe/Session level configurations initialization.
let mut target_probe = match config.probe_selector.clone() {
Some(selector) => Probe::open(selector.clone()).map_err(|e| match e {
DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => {
DebuggerError::Other(anyhow!(
"Could not find the probe_selector specified as {:04x}:{:04x}:{:?}",
selector.vendor_id,
selector.product_id,
selector.serial_number
))
}
other_error => DebuggerError::DebugProbe(other_error),
}),
None => {
// Only automatically select a probe if there is only a single probe detected.
let list = Probe::list_all();
if list.len() > 1 |
if let Some(info) = list.first() {
Probe::open(info).map_err(DebuggerError::DebugProbe)
} else {
return Err(DebuggerError::Other(anyhow!(
"No probes found. Please check your USB connections."
)));
}
}
}?;
let target_selector = match &config.chip {
Some(identifier) => identifier.into(),
None => TargetSelector::Auto,
};
// Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed != speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len() != 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let target_memory_map = &self.session.target().memory_map.clone();
let timestamp_offset = self.timestamp_offset;
// Always set `all_cores_halted` to true, until one core is found to be running.
debug_adapter.all_cores_halted = true;
for core_config in session_config.core_configs.iter() {
let Ok(mut target_core) = self.attach_core(core_config.core_index) else {
tracing::debug!(
"Failed to attach to target core #{}. Cannot poll for RTT data.",
core_config.core_index
);
continue;
};
// We need to poll the core to determine its status.
let current_core_status = target_core.poll_core(debug_adapter).map_err(|error| {
let error = DebuggerError::ProbeRs(error);
let _ = debug_adapter.show_error_message(&error);
error
})?;
// If appropriate, check for RTT data.
if core_config.rtt_config.enabled {
if let Some(core_rtt) = &mut target_core.core_data.rtt_connection {
// We should poll the target for rtt data, and if any RTT data was processed, we clear the flag.
if core_rtt.process_rtt_data(debug_adapter, &mut target_core.core) {
suggest_delay_required = false;
}
} else if debug_adapter.configuration_is_done() {
// We have not yet reached the point in the target application where the RTT buffers are initialized,
// so, provided we have processed the MSDAP request for "configurationDone" , we should check again.
{
#[allow(clippy::unwrap_used)]
match target_core.attach_to_rtt(
debug_adapter,
target_memory_map,
core_config.program_binary.as_ref().unwrap(),
&core_config.rtt_config,
timestamp_offset,
) {
Ok(_) => {
// Nothing else to do.
}
Err(error) => {
debug_adapter
.show_error_message(&DebuggerError::Other(error))
.ok();
}
}
}
}
}
// If the core is running, we set the flag to indicate that at least one core is not halted.
// By setting it here, we ensure that RTT will be checked at least once after the core has halted.
if !current_core_status.is_halted() {
debug_adapter.all_cores_halted = false;
}
status_of_cores.push(current_core_status);
}
Ok((status_of_cores, suggest_delay_required))
}
}
pub(crate) fn debug_info_from_binary(
core_configuration: &CoreConfig,
) -> Result<DebugInfo, DebuggerError> {
let debug_info = if let Some(binary_path) = &core_configuration.program_binary {
DebugInfo::from_file(binary_path).map_err(|error| DebuggerError::Other(anyhow!(error)))?
} else {
return Err(anyhow!(
"Please provide a valid `program_binary` for debug core: {:?}",
core_configuration.core_index
)
.into());
};
Ok(debug_info)
}
| {
return Err(DebuggerError::Other(anyhow!(
"Found multiple ({}) probes",
list.len()
)));
} | conditional_block |
session_data.rs | use super::{
configuration::{self, CoreConfig, SessionConfig},
core_data::{CoreData, CoreHandle},
};
use crate::cmd::dap_server::{
debug_adapter::{
dap::adapter::DebugAdapter, dap::dap_types::Source, protocol::ProtocolAdapter,
},
DebuggerError,
};
use anyhow::{anyhow, Result};
use probe_rs::{
config::TargetSelector,
debug::{debug_info::DebugInfo, SourceLocation},
CoreStatus, DebugProbeError, Permissions, Probe, ProbeCreationError, Session,
};
use std::env::set_current_dir;
use time::UtcOffset;
/// The supported breakpoint types
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum BreakpointType {
/// A breakpoint was requested using an instruction address, and usually a result of a user requesting a
/// breakpoint while in a 'disassembly' view.
InstructionBreakpoint,
/// A breakpoint that has a Source, and usually a result of a user requesting a breakpoint while in a 'source' view.
SourceBreakpoint {
source: Source,
location: SourceLocationScope,
},
}
/// Breakpoint requests will either be refer to a specific SourceLcoation, or unspecified, in which case it will refer to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub(crate) struct ActiveBreakpoint {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
// `SessionConfig` Probe/Session level configurations initialization.
let mut target_probe = match config.probe_selector.clone() {
Some(selector) => Probe::open(selector.clone()).map_err(|e| match e {
DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => {
DebuggerError::Other(anyhow!(
"Could not find the probe_selector specified as {:04x}:{:04x}:{:?}",
selector.vendor_id,
selector.product_id,
selector.serial_number
))
}
other_error => DebuggerError::DebugProbe(other_error),
}),
None => {
// Only automatically select a probe if there is only a single probe detected.
let list = Probe::list_all();
if list.len() > 1 {
return Err(DebuggerError::Other(anyhow!(
"Found multiple ({}) probes",
list.len()
)));
}
if let Some(info) = list.first() {
Probe::open(info).map_err(DebuggerError::DebugProbe)
} else {
return Err(DebuggerError::Other(anyhow!(
"No probes found. Please check your USB connections."
)));
}
}
}?;
let target_selector = match &config.chip {
Some(identifier) => identifier.into(),
None => TargetSelector::Auto,
};
// Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed != speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len() != 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let target_memory_map = &self.session.target().memory_map.clone();
let timestamp_offset = self.timestamp_offset;
// Always set `all_cores_halted` to true, until one core is found to be running.
debug_adapter.all_cores_halted = true;
for core_config in session_config.core_configs.iter() {
let Ok(mut target_core) = self.attach_core(core_config.core_index) else {
tracing::debug!(
"Failed to attach to target core #{}. Cannot poll for RTT data.",
core_config.core_index
);
continue;
};
// We need to poll the core to determine its status.
let current_core_status = target_core.poll_core(debug_adapter).map_err(|error| {
let error = DebuggerError::ProbeRs(error);
let _ = debug_adapter.show_error_message(&error);
error
})?;
// If appropriate, check for RTT data.
if core_config.rtt_config.enabled {
if let Some(core_rtt) = &mut target_core.core_data.rtt_connection {
// We should poll the target for rtt data, and if any RTT data was processed, we clear the flag.
if core_rtt.process_rtt_data(debug_adapter, &mut target_core.core) {
suggest_delay_required = false;
}
} else if debug_adapter.configuration_is_done() {
// We have not yet reached the point in the target application where the RTT buffers are initialized,
// so, provided we have processed the MSDAP request for "configurationDone" , we should check again.
{
#[allow(clippy::unwrap_used)]
match target_core.attach_to_rtt(
debug_adapter,
target_memory_map,
core_config.program_binary.as_ref().unwrap(),
&core_config.rtt_config,
timestamp_offset,
) {
Ok(_) => {
// Nothing else to do.
}
Err(error) => {
debug_adapter
.show_error_message(&DebuggerError::Other(error))
.ok();
}
}
}
}
}
// If the core is running, we set the flag to indicate that at least one core is not halted.
// By setting it here, we ensure that RTT will be checked at least once after the core has halted.
if !current_core_status.is_halted() {
debug_adapter.all_cores_halted = false;
}
status_of_cores.push(current_core_status);
}
Ok((status_of_cores, suggest_delay_required))
}
}
pub(crate) fn debug_info_from_binary(
core_configuration: &CoreConfig,
) -> Result<DebugInfo, DebuggerError> | {
let debug_info = if let Some(binary_path) = &core_configuration.program_binary {
DebugInfo::from_file(binary_path).map_err(|error| DebuggerError::Other(anyhow!(error)))?
} else {
return Err(anyhow!(
"Please provide a valid `program_binary` for debug core: {:?}",
core_configuration.core_index
)
.into());
};
Ok(debug_info)
} | identifier_body |
|
session_data.rs | use super::{
configuration::{self, CoreConfig, SessionConfig},
core_data::{CoreData, CoreHandle},
};
use crate::cmd::dap_server::{
debug_adapter::{
dap::adapter::DebugAdapter, dap::dap_types::Source, protocol::ProtocolAdapter,
},
DebuggerError,
};
use anyhow::{anyhow, Result};
use probe_rs::{
config::TargetSelector,
debug::{debug_info::DebugInfo, SourceLocation},
CoreStatus, DebugProbeError, Permissions, Probe, ProbeCreationError, Session,
};
use std::env::set_current_dir;
use time::UtcOffset;
/// The supported breakpoint types
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum BreakpointType {
/// A breakpoint was requested using an instruction address, and usually a result of a user requesting a
/// breakpoint while in a 'disassembly' view.
InstructionBreakpoint,
/// A breakpoint that has a Source, and usually a result of a user requesting a breakpoint while in a 'source' view.
SourceBreakpoint {
source: Source,
location: SourceLocationScope,
},
}
/// Breakpoint requests will either be refer to a specific SourceLcoation, or unspecified, in which case it will refer to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub(crate) struct | {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
// `SessionConfig` Probe/Session level configurations initialization.
let mut target_probe = match config.probe_selector.clone() {
Some(selector) => Probe::open(selector.clone()).map_err(|e| match e {
DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => {
DebuggerError::Other(anyhow!(
"Could not find the probe_selector specified as {:04x}:{:04x}:{:?}",
selector.vendor_id,
selector.product_id,
selector.serial_number
))
}
other_error => DebuggerError::DebugProbe(other_error),
}),
None => {
// Only automatically select a probe if there is only a single probe detected.
let list = Probe::list_all();
if list.len() > 1 {
return Err(DebuggerError::Other(anyhow!(
"Found multiple ({}) probes",
list.len()
)));
}
if let Some(info) = list.first() {
Probe::open(info).map_err(DebuggerError::DebugProbe)
} else {
return Err(DebuggerError::Other(anyhow!(
"No probes found. Please check your USB connections."
)));
}
}
}?;
let target_selector = match &config.chip {
Some(identifier) => identifier.into(),
None => TargetSelector::Auto,
};
// Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed != speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len() != 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let target_memory_map = &self.session.target().memory_map.clone();
let timestamp_offset = self.timestamp_offset;
// Always set `all_cores_halted` to true, until one core is found to be running.
debug_adapter.all_cores_halted = true;
for core_config in session_config.core_configs.iter() {
let Ok(mut target_core) = self.attach_core(core_config.core_index) else {
tracing::debug!(
"Failed to attach to target core #{}. Cannot poll for RTT data.",
core_config.core_index
);
continue;
};
// We need to poll the core to determine its status.
let current_core_status = target_core.poll_core(debug_adapter).map_err(|error| {
let error = DebuggerError::ProbeRs(error);
let _ = debug_adapter.show_error_message(&error);
error
})?;
// If appropriate, check for RTT data.
if core_config.rtt_config.enabled {
if let Some(core_rtt) = &mut target_core.core_data.rtt_connection {
// We should poll the target for rtt data, and if any RTT data was processed, we clear the flag.
if core_rtt.process_rtt_data(debug_adapter, &mut target_core.core) {
suggest_delay_required = false;
}
} else if debug_adapter.configuration_is_done() {
// We have not yet reached the point in the target application where the RTT buffers are initialized,
// so, provided we have processed the MSDAP request for "configurationDone" , we should check again.
{
#[allow(clippy::unwrap_used)]
match target_core.attach_to_rtt(
debug_adapter,
target_memory_map,
core_config.program_binary.as_ref().unwrap(),
&core_config.rtt_config,
timestamp_offset,
) {
Ok(_) => {
// Nothing else to do.
}
Err(error) => {
debug_adapter
.show_error_message(&DebuggerError::Other(error))
.ok();
}
}
}
}
}
// If the core is running, we set the flag to indicate that at least one core is not halted.
// By setting it here, we ensure that RTT will be checked at least once after the core has halted.
if !current_core_status.is_halted() {
debug_adapter.all_cores_halted = false;
}
status_of_cores.push(current_core_status);
}
Ok((status_of_cores, suggest_delay_required))
}
}
pub(crate) fn debug_info_from_binary(
core_configuration: &CoreConfig,
) -> Result<DebugInfo, DebuggerError> {
let debug_info = if let Some(binary_path) = &core_configuration.program_binary {
DebugInfo::from_file(binary_path).map_err(|error| DebuggerError::Other(anyhow!(error)))?
} else {
return Err(anyhow!(
"Please provide a valid `program_binary` for debug core: {:?}",
core_configuration.core_index
)
.into());
};
Ok(debug_info)
}
| ActiveBreakpoint | identifier_name |
render.rs | //! HTML generation
//!
use crate::{Result, TomlMap};
use chrono::DateTime;
use handlebars::Handlebars;
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use toml::value::Value as TomlValue;
/// Html to insert before and after diff chunks
pub struct DiffStyle {
/// Html to insert before a span of inserted content
/// `<span class="...">`
pub ins_start: String,
/// Html to insert after a span of inserted content
/// `</span>`
pub ins_end: String,
/// Html to insert before a span of deleted content
/// `<span class="...">`
pub del_start: String,
/// Html to insert after a span of deleted content
/// `</span>`
pub del_end: String,
}
impl Default for DiffStyle {
fn default() -> DiffStyle {
DiffStyle {
ins_start: r#"<span class="bg-green-100 text-gray-600">"#.to_string(),
ins_end: r#"</span>"#.to_string(),
del_start: r#"<span class="bg-red-100 text-gray-600 line-through">"#.to_string(),
del_end: r#"</span>"#.to_string(),
}
}
}
// these defaults can be overridden by the config file
/// Pairing of template name and contents
///
pub type Template<'template> = (&'template str, &'template str);
#[derive(Debug)]
pub struct RenderConfig<'render> {
/// Templates to be loaded for renderer. List of template name, data
pub templates: Vec<Template<'render>>,
/// Whether parser is in strict mode (e.g. if true, a variable used in template
/// that is undefined would raise an error; if false, it would evaluate to 'falsey'
pub strict_mode: bool,
}
impl<'render> Default for RenderConfig<'render> {
fn default() -> Self {
Self {
templates: Vec::new(),
strict_mode: false,
}
}
}
/// HBTemplate processor for HTML generation
pub struct Renderer<'gen> {
/// Handlebars processor
hb: Handlebars<'gen>,
/// Additional dictionary that supplements data passed to render() method
vars: TomlMap,
}
impl<'gen> Default for Renderer<'gen> {
fn default() -> Self {
// unwrap ok because only error condition occurs with templates, and default has none.
Self::init(&RenderConfig::default()).unwrap()
}
}
impl<'gen> Renderer<'gen> {
/// Initialize handlebars template processor.
pub fn init(config: &RenderConfig) -> Result<Self> {
let mut hb = Handlebars::new();
// don't use strict mode because docs may have different frontmatter vars
// and it's easier in templates to use if we allow undefined ~= false-y
hb.set_strict_mode(config.strict_mode);
hb.register_escape_fn(handlebars::no_escape); //html escaping is the default and cause issue0
add_base_helpers(&mut hb);
for t in &config.templates {
hb.register_template_string(t.0, t.1)?;
}
let renderer = Self {
hb,
vars: TomlMap::new(),
};
Ok(renderer)
}
/// Replace renderer dict.
/// Values in the renderer dict override any values passed to render()
pub fn set_vars(&mut self, vars: TomlMap) {
self.vars = vars
}
/// Sets all the vars from the hashap into the render dict
pub fn set_from<T: Into<toml::Value>>(&mut self, vars: HashMap<String, T>) {
for (k, v) in vars.into_iter() {
self.set(k, v);
}
}
/// Set a value in the renderer dict. If the key was previously set, it is replaced.
/// Values in the renderer dict override any values passed to render()
pub fn set<T: Into<TomlValue>>(&mut self, key: String, val: T) {
self.vars.insert(key, val.into());
}
/// Remove key if it was present
pub fn remove(&mut self, key: &str) {
self.vars.remove(key);
}
/// Adds template to internal dictionary
pub fn add_template(&mut self, template: Template) -> Result<()> {
self.hb.register_template_string(template.0, template.1)?;
Ok(())
}
/// Render a template with data.
pub fn render<W>(&self, template_name: &str, mut data: TomlMap, writer: &mut W) -> Result<()>
where
W: std::io::Write,
{
// add variables that extend/override passed data
data.extend(self.vars.clone().into_iter());
self.hb.render_to_write(template_name, &data, writer)?;
Ok(())
}
/// Convert markdown to html and generate html page,
/// using 'map' data as render vars
pub fn write_page_html<W: std::io::Write>(
&self,
mut map: TomlMap,
markdown: &str,
template_name: &str,
mut writer: &mut W,
) -> Result<()> {
let html = crate::md_parser::markdown_to_html(markdown)?;
map.insert("content".into(), TomlValue::from(html.content));
if let Some(toc) = html.toc {
map.insert("toc".into(), TomlValue::from(toc));
}
self.render(template_name, map, &mut writer)?;
Ok(())
}
}
/// Convert Value to string without adding quotes around strings
fn json_value_to_string(v: &JsonValue) -> String {
match v {
JsonValue::String(s) => s.clone(),
_ => v.to_string(),
}
}
/// Add template helpers functions
/// 'join-csv' turns array of values into comma-separate list
/// 'format-date' rewrites an ISO8601-formatted date into another format
fn add_base_helpers(hb: &mut Handlebars) |
/// Generate diff between two text segments.
/// Enclose additions with <span class="add_style">...</span>
/// and deletions with <span class="del_style">
/// add_style, e.g., "bg-green 100 text-gray-500"
///
pub fn generate_diff(first: &str, second: &str, style: &DiffStyle) -> Result<String> {
use dissimilar::Chunk;
let chunks = dissimilar::diff(&first, &second);
// "<span class=\"bg-red-100 text-gray-600 line-through\">");
// <span class=\"bg-green-100 text-gray-600\">");
let mut diff_content = String::with_capacity(second.len() + 1048 + 30 * chunks.len());
for chunk in chunks.iter() {
match chunk {
Chunk::Equal(s) => {
diff_content.push_str(s);
}
Chunk::Delete(s) => {
diff_content.push_str(&style.del_start);
diff_content.push_str(s);
diff_content.push_str(&style.del_end);
}
Chunk::Insert(s) => {
diff_content.push_str(&style.ins_start);
diff_content.push_str(s);
diff_content.push_str(&style.ins_end);
}
}
}
Ok(diff_content)
}
#[test]
fn initializers() {
let mut r1 = Renderer::default();
r1.set("x".into(), toml::Value::from("xyz"));
assert!(true);
let mut r2 = Renderer::init(&RenderConfig::default()).expect("ok");
r2.set("x".into(), toml::Value::from("xyz"));
assert!(true);
}
/// Test template processor
#[test]
fn test_html_page() {
use crate::render::Renderer;
const TEST_TEMPLATE: &str = "<html><body><h1>{{title}}</h1>{{content}}</body></html>";
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
// simulate processing
let expected = TEST_TEMPLATE
.replace("{{content}}", "<p>hello</p>")
.replace("{{title}}", "Abc");
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
let mut gen = Renderer::default();
gen.add_template(("test_template", TEST_TEMPLATE))
.expect("add test template");
let mut buf: Vec<u8> = Vec::new();
let result = gen.write_page_html(map, "hello", "test_template", &mut buf);
assert!(result.is_ok());
// had to remove newlines - there's an added \n after
let output = String::from_utf8_lossy(&buf).replace("\n", "");
assert_eq!(expected, output);
}
| {
use handlebars::{Context, Helper, HelperResult, Output, RenderContext, RenderError};
// "join-csv" turns array of values into comma-separated list
// Converts each value using to_string()
hb.register_helper(
"join-csv",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
let csv = h
.param(0)
.ok_or_else(|| RenderError::new("param not found"))?
.value()
.as_array()
.ok_or_else(|| RenderError::new("expected array"))?
.iter()
.map(json_value_to_string)
.collect::<Vec<String>>()
.join(",");
out.write(&csv)?;
Ok(())
},
),
);
//
// format-date: strftime-like function to reformat date
hb.register_helper(
"format-date",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
// get first arg as string, an ISO8601-formatted date
let date = h
.param(0)
.ok_or_else(|| RenderError::new("expect first param as date"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// parse into DateTime
let date = DateTime::parse_from_rfc3339(date)
.map_err(|e| RenderError::from_error("date parse", e))?;
// get second arg - the format string
let format = h
.param(1)
.ok_or_else(|| RenderError::new("expect second param as format"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// print date in specified format
let formatted = date.format(format).to_string();
out.write(&formatted)?;
Ok(())
},
),
);
} | identifier_body |
render.rs | //! HTML generation
//!
use crate::{Result, TomlMap};
use chrono::DateTime;
use handlebars::Handlebars;
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use toml::value::Value as TomlValue;
/// Html to insert before and after diff chunks
pub struct DiffStyle {
/// Html to insert before a span of inserted content
/// `<span class="...">`
pub ins_start: String,
/// Html to insert after a span of inserted content
/// `</span>`
pub ins_end: String,
/// Html to insert before a span of deleted content
/// `<span class="...">`
pub del_start: String,
/// Html to insert after a span of deleted content
/// `</span>`
pub del_end: String,
}
impl Default for DiffStyle {
fn default() -> DiffStyle {
DiffStyle {
ins_start: r#"<span class="bg-green-100 text-gray-600">"#.to_string(),
ins_end: r#"</span>"#.to_string(),
del_start: r#"<span class="bg-red-100 text-gray-600 line-through">"#.to_string(),
del_end: r#"</span>"#.to_string(),
}
}
}
// these defaults can be overridden by the config file
/// Pairing of template name and contents
///
pub type Template<'template> = (&'template str, &'template str);
#[derive(Debug)]
pub struct RenderConfig<'render> {
/// Templates to be loaded for renderer. List of template name, data
pub templates: Vec<Template<'render>>,
/// Whether parser is in strict mode (e.g. if true, a variable used in template
/// that is undefined would raise an error; if false, it would evaluate to 'falsey'
pub strict_mode: bool,
}
impl<'render> Default for RenderConfig<'render> {
fn default() -> Self {
Self {
templates: Vec::new(),
strict_mode: false,
}
}
}
/// HBTemplate processor for HTML generation
pub struct Renderer<'gen> {
/// Handlebars processor
hb: Handlebars<'gen>,
/// Additional dictionary that supplements data passed to render() method
vars: TomlMap,
}
impl<'gen> Default for Renderer<'gen> {
fn default() -> Self {
// unwrap ok because only error condition occurs with templates, and default has none.
Self::init(&RenderConfig::default()).unwrap()
}
}
impl<'gen> Renderer<'gen> {
/// Initialize handlebars template processor.
pub fn init(config: &RenderConfig) -> Result<Self> {
let mut hb = Handlebars::new();
// don't use strict mode because docs may have different frontmatter vars
// and it's easier in templates to use if we allow undefined ~= false-y
hb.set_strict_mode(config.strict_mode);
hb.register_escape_fn(handlebars::no_escape); //html escaping is the default and cause issue0
add_base_helpers(&mut hb);
for t in &config.templates {
hb.register_template_string(t.0, t.1)?;
}
let renderer = Self {
hb,
vars: TomlMap::new(),
};
Ok(renderer)
}
/// Replace renderer dict.
/// Values in the renderer dict override any values passed to render()
pub fn set_vars(&mut self, vars: TomlMap) {
self.vars = vars
}
/// Sets all the vars from the hashap into the render dict
pub fn set_from<T: Into<toml::Value>>(&mut self, vars: HashMap<String, T>) {
for (k, v) in vars.into_iter() {
self.set(k, v);
}
}
/// Set a value in the renderer dict. If the key was previously set, it is replaced.
/// Values in the renderer dict override any values passed to render()
pub fn set<T: Into<TomlValue>>(&mut self, key: String, val: T) {
self.vars.insert(key, val.into());
}
/// Remove key if it was present
pub fn remove(&mut self, key: &str) {
self.vars.remove(key);
}
/// Adds template to internal dictionary
pub fn add_template(&mut self, template: Template) -> Result<()> {
self.hb.register_template_string(template.0, template.1)?;
Ok(())
}
/// Render a template with data.
pub fn render<W>(&self, template_name: &str, mut data: TomlMap, writer: &mut W) -> Result<()>
where
W: std::io::Write,
{
// add variables that extend/override passed data
data.extend(self.vars.clone().into_iter());
self.hb.render_to_write(template_name, &data, writer)?;
Ok(())
}
/// Convert markdown to html and generate html page,
/// using 'map' data as render vars
pub fn write_page_html<W: std::io::Write>(
&self,
mut map: TomlMap,
markdown: &str,
template_name: &str,
mut writer: &mut W,
) -> Result<()> {
let html = crate::md_parser::markdown_to_html(markdown)?;
map.insert("content".into(), TomlValue::from(html.content));
if let Some(toc) = html.toc {
map.insert("toc".into(), TomlValue::from(toc));
}
self.render(template_name, map, &mut writer)?;
Ok(())
}
}
/// Convert Value to string without adding quotes around strings
fn json_value_to_string(v: &JsonValue) -> String {
match v {
JsonValue::String(s) => s.clone(),
_ => v.to_string(),
}
}
/// Add template helpers functions
/// 'join-csv' turns array of values into comma-separate list
/// 'format-date' rewrites an ISO8601-formatted date into another format
fn add_base_helpers(hb: &mut Handlebars) {
use handlebars::{Context, Helper, HelperResult, Output, RenderContext, RenderError};
// "join-csv" turns array of values into comma-separated list
// Converts each value using to_string()
hb.register_helper(
"join-csv",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
let csv = h
.param(0)
.ok_or_else(|| RenderError::new("param not found"))?
.value()
.as_array()
.ok_or_else(|| RenderError::new("expected array"))?
.iter()
.map(json_value_to_string)
.collect::<Vec<String>>()
.join(",");
out.write(&csv)?;
Ok(())
},
),
);
//
// format-date: strftime-like function to reformat date
hb.register_helper(
"format-date",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
// get first arg as string, an ISO8601-formatted date
let date = h
.param(0)
.ok_or_else(|| RenderError::new("expect first param as date"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// parse into DateTime
let date = DateTime::parse_from_rfc3339(date)
.map_err(|e| RenderError::from_error("date parse", e))?;
// get second arg - the format string
let format = h
.param(1)
.ok_or_else(|| RenderError::new("expect second param as format"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// print date in specified format
let formatted = date.format(format).to_string();
out.write(&formatted)?;
Ok(())
},
),
);
}
/// Generate diff between two text segments.
/// Enclose additions with <span class="add_style">...</span>
/// and deletions with <span class="del_style">
/// add_style, e.g., "bg-green 100 text-gray-500"
///
pub fn generate_diff(first: &str, second: &str, style: &DiffStyle) -> Result<String> {
use dissimilar::Chunk;
let chunks = dissimilar::diff(&first, &second);
// "<span class=\"bg-red-100 text-gray-600 line-through\">");
// <span class=\"bg-green-100 text-gray-600\">");
let mut diff_content = String::with_capacity(second.len() + 1048 + 30 * chunks.len());
for chunk in chunks.iter() {
match chunk {
Chunk::Equal(s) => {
diff_content.push_str(s);
}
Chunk::Delete(s) => {
diff_content.push_str(&style.del_start);
diff_content.push_str(s);
diff_content.push_str(&style.del_end);
}
Chunk::Insert(s) => {
diff_content.push_str(&style.ins_start);
diff_content.push_str(s);
diff_content.push_str(&style.ins_end);
}
}
}
Ok(diff_content)
}
#[test]
fn initializers() {
let mut r1 = Renderer::default();
r1.set("x".into(), toml::Value::from("xyz"));
assert!(true);
| /// Test template processor
#[test]
fn test_html_page() {
use crate::render::Renderer;
const TEST_TEMPLATE: &str = "<html><body><h1>{{title}}</h1>{{content}}</body></html>";
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
// simulate processing
let expected = TEST_TEMPLATE
.replace("{{content}}", "<p>hello</p>")
.replace("{{title}}", "Abc");
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
let mut gen = Renderer::default();
gen.add_template(("test_template", TEST_TEMPLATE))
.expect("add test template");
let mut buf: Vec<u8> = Vec::new();
let result = gen.write_page_html(map, "hello", "test_template", &mut buf);
assert!(result.is_ok());
// had to remove newlines - there's an added \n after
let output = String::from_utf8_lossy(&buf).replace("\n", "");
assert_eq!(expected, output);
} | let mut r2 = Renderer::init(&RenderConfig::default()).expect("ok");
r2.set("x".into(), toml::Value::from("xyz"));
assert!(true);
}
| random_line_split |
render.rs | //! HTML generation
//!
use crate::{Result, TomlMap};
use chrono::DateTime;
use handlebars::Handlebars;
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use toml::value::Value as TomlValue;
/// Html to insert before and after diff chunks
pub struct DiffStyle {
/// Html to insert before a span of inserted content
/// `<span class="...">`
pub ins_start: String,
/// Html to insert after a span of inserted content
/// `</span>`
pub ins_end: String,
/// Html to insert before a span of deleted content
/// `<span class="...">`
pub del_start: String,
/// Html to insert after a span of deleted content
/// `</span>`
pub del_end: String,
}
impl Default for DiffStyle {
fn default() -> DiffStyle {
DiffStyle {
ins_start: r#"<span class="bg-green-100 text-gray-600">"#.to_string(),
ins_end: r#"</span>"#.to_string(),
del_start: r#"<span class="bg-red-100 text-gray-600 line-through">"#.to_string(),
del_end: r#"</span>"#.to_string(),
}
}
}
// these defaults can be overridden by the config file
/// Pairing of template name and contents
///
pub type Template<'template> = (&'template str, &'template str);
#[derive(Debug)]
pub struct RenderConfig<'render> {
/// Templates to be loaded for renderer. List of template name, data
pub templates: Vec<Template<'render>>,
/// Whether parser is in strict mode (e.g. if true, a variable used in template
/// that is undefined would raise an error; if false, it would evaluate to 'falsey'
pub strict_mode: bool,
}
impl<'render> Default for RenderConfig<'render> {
fn default() -> Self {
Self {
templates: Vec::new(),
strict_mode: false,
}
}
}
/// HBTemplate processor for HTML generation
pub struct Renderer<'gen> {
/// Handlebars processor
hb: Handlebars<'gen>,
/// Additional dictionary that supplements data passed to render() method
vars: TomlMap,
}
impl<'gen> Default for Renderer<'gen> {
fn | () -> Self {
// unwrap ok because only error condition occurs with templates, and default has none.
Self::init(&RenderConfig::default()).unwrap()
}
}
impl<'gen> Renderer<'gen> {
/// Initialize handlebars template processor.
pub fn init(config: &RenderConfig) -> Result<Self> {
let mut hb = Handlebars::new();
// don't use strict mode because docs may have different frontmatter vars
// and it's easier in templates to use if we allow undefined ~= false-y
hb.set_strict_mode(config.strict_mode);
hb.register_escape_fn(handlebars::no_escape); //html escaping is the default and cause issue0
add_base_helpers(&mut hb);
for t in &config.templates {
hb.register_template_string(t.0, t.1)?;
}
let renderer = Self {
hb,
vars: TomlMap::new(),
};
Ok(renderer)
}
/// Replace renderer dict.
/// Values in the renderer dict override any values passed to render()
pub fn set_vars(&mut self, vars: TomlMap) {
self.vars = vars
}
/// Sets all the vars from the hashap into the render dict
pub fn set_from<T: Into<toml::Value>>(&mut self, vars: HashMap<String, T>) {
for (k, v) in vars.into_iter() {
self.set(k, v);
}
}
/// Set a value in the renderer dict. If the key was previously set, it is replaced.
/// Values in the renderer dict override any values passed to render()
pub fn set<T: Into<TomlValue>>(&mut self, key: String, val: T) {
self.vars.insert(key, val.into());
}
/// Remove key if it was present
pub fn remove(&mut self, key: &str) {
self.vars.remove(key);
}
/// Adds template to internal dictionary
pub fn add_template(&mut self, template: Template) -> Result<()> {
self.hb.register_template_string(template.0, template.1)?;
Ok(())
}
/// Render a template with data.
pub fn render<W>(&self, template_name: &str, mut data: TomlMap, writer: &mut W) -> Result<()>
where
W: std::io::Write,
{
// add variables that extend/override passed data
data.extend(self.vars.clone().into_iter());
self.hb.render_to_write(template_name, &data, writer)?;
Ok(())
}
/// Convert markdown to html and generate html page,
/// using 'map' data as render vars
pub fn write_page_html<W: std::io::Write>(
&self,
mut map: TomlMap,
markdown: &str,
template_name: &str,
mut writer: &mut W,
) -> Result<()> {
let html = crate::md_parser::markdown_to_html(markdown)?;
map.insert("content".into(), TomlValue::from(html.content));
if let Some(toc) = html.toc {
map.insert("toc".into(), TomlValue::from(toc));
}
self.render(template_name, map, &mut writer)?;
Ok(())
}
}
/// Convert Value to string without adding quotes around strings
fn json_value_to_string(v: &JsonValue) -> String {
match v {
JsonValue::String(s) => s.clone(),
_ => v.to_string(),
}
}
/// Add template helpers functions
/// 'join-csv' turns array of values into comma-separate list
/// 'format-date' rewrites an ISO8601-formatted date into another format
fn add_base_helpers(hb: &mut Handlebars) {
use handlebars::{Context, Helper, HelperResult, Output, RenderContext, RenderError};
// "join-csv" turns array of values into comma-separated list
// Converts each value using to_string()
hb.register_helper(
"join-csv",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
let csv = h
.param(0)
.ok_or_else(|| RenderError::new("param not found"))?
.value()
.as_array()
.ok_or_else(|| RenderError::new("expected array"))?
.iter()
.map(json_value_to_string)
.collect::<Vec<String>>()
.join(",");
out.write(&csv)?;
Ok(())
},
),
);
//
// format-date: strftime-like function to reformat date
hb.register_helper(
"format-date",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
// get first arg as string, an ISO8601-formatted date
let date = h
.param(0)
.ok_or_else(|| RenderError::new("expect first param as date"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// parse into DateTime
let date = DateTime::parse_from_rfc3339(date)
.map_err(|e| RenderError::from_error("date parse", e))?;
// get second arg - the format string
let format = h
.param(1)
.ok_or_else(|| RenderError::new("expect second param as format"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// print date in specified format
let formatted = date.format(format).to_string();
out.write(&formatted)?;
Ok(())
},
),
);
}
/// Generate diff between two text segments.
/// Enclose additions with <span class="add_style">...</span>
/// and deletions with <span class="del_style">
/// add_style, e.g., "bg-green 100 text-gray-500"
///
pub fn generate_diff(first: &str, second: &str, style: &DiffStyle) -> Result<String> {
use dissimilar::Chunk;
let chunks = dissimilar::diff(&first, &second);
// "<span class=\"bg-red-100 text-gray-600 line-through\">");
// <span class=\"bg-green-100 text-gray-600\">");
let mut diff_content = String::with_capacity(second.len() + 1048 + 30 * chunks.len());
for chunk in chunks.iter() {
match chunk {
Chunk::Equal(s) => {
diff_content.push_str(s);
}
Chunk::Delete(s) => {
diff_content.push_str(&style.del_start);
diff_content.push_str(s);
diff_content.push_str(&style.del_end);
}
Chunk::Insert(s) => {
diff_content.push_str(&style.ins_start);
diff_content.push_str(s);
diff_content.push_str(&style.ins_end);
}
}
}
Ok(diff_content)
}
#[test]
fn initializers() {
let mut r1 = Renderer::default();
r1.set("x".into(), toml::Value::from("xyz"));
assert!(true);
let mut r2 = Renderer::init(&RenderConfig::default()).expect("ok");
r2.set("x".into(), toml::Value::from("xyz"));
assert!(true);
}
/// Test template processor
#[test]
fn test_html_page() {
use crate::render::Renderer;
const TEST_TEMPLATE: &str = "<html><body><h1>{{title}}</h1>{{content}}</body></html>";
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
// simulate processing
let expected = TEST_TEMPLATE
.replace("{{content}}", "<p>hello</p>")
.replace("{{title}}", "Abc");
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
let mut gen = Renderer::default();
gen.add_template(("test_template", TEST_TEMPLATE))
.expect("add test template");
let mut buf: Vec<u8> = Vec::new();
let result = gen.write_page_html(map, "hello", "test_template", &mut buf);
assert!(result.is_ok());
// had to remove newlines - there's an added \n after
let output = String::from_utf8_lossy(&buf).replace("\n", "");
assert_eq!(expected, output);
}
| default | identifier_name |
quantity.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"bytes"
"errors"
"fmt"
"math"
"math/big"
"strconv"
"strings"
inf "gopkg.in/inf.v0"
)
// Quantity is a fixed-point representation of a number.
// It provides convenient marshaling/unmarshaling in JSON and YAML,
// in addition to String() and AsInt64() accessors.
//
// The serialization format is:
//
// ```
// <quantity> ::= <signedNumber><suffix>
//
// (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
//
// <digit> ::= 0 | 1 | ... | 9
// <digits> ::= <digit> | <digit><digits>
// <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits>
// <sign> ::= "+" | "-"
// <signedNumber> ::= <number> | <sign><number>
// <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI>
// <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei
//
// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)
//
// <decimalSI> ::= m | "" | k | M | G | T | P | E
//
// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
//
// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
// ```
//
// No matter which of the three exponent forms is used, no quantity may represent
// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
// places. Numbers larger or more precise will be capped or rounded up.
// (E.g.: 0.1m will rounded up to 1m.)
// This may be extended in the future if we require larger or smaller quantities.
//
// When a Quantity is parsed from a string, it will remember the type of suffix
// it had, and will use the same type again when it is serialized.
//
// Before serializing, Quantity will be put in "canonical form".
// This means that Exponent/suffix will be adjusted up or down (with a
// corresponding increase or decrease in Mantissa) such that:
//
// - No precision is lost
// - No fractional digits will be emitted
// - The exponent (or suffix) is as large as possible.
//
// The sign will be omitted unless the number is negative.
//
// Examples:
//
// - 1.5 will be serialized as "1500m"
// - 1.5Gi will be serialized as "1536Mi"
//
// Note that the quantity will NEVER be internally represented by a
// floating point number. That is the whole point of this exercise.
//
// Non-canonical values will still parse as long as they are well formed,
// but will be re-emitted in their canonical form. (So always use canonical
// form, or don't diff.)
//
// This format is intended to make it difficult to use these numbers without
// writing some sort of special handling code in the hopes that that will
// cause implementors to also use a fixed point implementation.
//
// +protobuf=true
// +protobuf.embed=string
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true
type Quantity struct {
// i is the quantity in int64 scaled form, if d.Dec == nil
i int64Amount
// d is the quantity in inf.Dec form if d.Dec != nil
d infDecAmount
// s is the generated value of this quantity to avoid recalculation
s string
// Change Format at will. See the comment for Canonicalize for
// more details.
Format
}
// CanonicalValue allows a quantity amount to be converted to a string.
type CanonicalValue interface {
// AsCanonicalBytes returns a byte array representing the string representation
// of the value mantissa and an int32 representing its exponent in base-10. Callers may
// pass a byte slice to the method to avoid allocations.
AsCanonicalBytes(out []byte) ([]byte, int32)
// AsCanonicalBase1024Bytes returns a byte array representing the string representation
// of the value mantissa and an int32 representing its exponent in base-1024. Callers
// may pass a byte slice to the method to avoid allocations.
AsCanonicalBase1024Bytes(out []byte) ([]byte, int32)
}
// Format lists the three possible formattings of a quantity.
type Format string
const (
DecimalExponent = Format("DecimalExponent") // e.g., 12e6
BinarySI = Format("BinarySI") // e.g., 12Mi (12 * 2^20)
DecimalSI = Format("DecimalSI") // e.g., 12M (12 * 10^6)
)
// MustParse turns the given string into a quantity or panics; for tests
// or other cases where you know the string is valid.
func MustParse(str string) Quantity {
q, err := ParseQuantity(str)
if err != nil {
panic(fmt.Errorf("cannot parse '%v': %v", str, err))
}
return q
}
const (
// splitREString is used to separate a number from its suffix; as such,
// this is overly permissive, but that's OK-- it will be checked later.
splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
)
var (
// Errors that could happen while parsing a string.
ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
ErrNumeric = errors.New("unable to parse numeric part of quantity")
ErrSuffix = errors.New("unable to parse quantity's suffix")
)
// parseQuantityString is a fast scanner for quantity values.
func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) {
positive = true
pos := 0
end := len(str)
// handle leading sign
if pos < end {
switch str[0] {
case '-':
positive = false
pos++
case '+':
pos++
}
}
// strip leading zeros
Zeroes:
for i := pos; ; i++ {
if i >= end {
num = "0"
value = num
return
}
switch str[i] {
case '0':
pos++
default:
break Zeroes
}
}
// extract the numerator
Num:
for i := pos; ; i++ {
if i >= end {
num = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
num = str[pos:i]
pos = i
break Num
}
}
// if we stripped all numerator positions, always return 0
if len(num) == 0 {
num = "0"
}
// handle a denominator
if pos < end && str[pos] == '.' {
pos++
Denom:
for i := pos; ; i++ {
if i >= end {
denom = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
denom = str[pos:i]
pos = i
break Denom
}
}
// TODO: we currently allow 1.G, but we may not want to in the future.
// if len(denom) == 0 {
// err = ErrFormatWrong
// return
// }
}
value = str[0:pos]
// grab the elements of the suffix
suffixStart := pos
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") {
pos = i
break
}
}
if pos < end {
switch str[pos] {
case '-', '+':
pos++
}
}
Suffix:
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
break Suffix
}
}
// we encountered a non decimal in the Suffix loop, but the last character
// was not a valid exponent
err = ErrFormatWrong
return
}
// ParseQuantity turns str into a Quantity, or returns an error.
func ParseQuantity(str string) (Quantity, error) {
if len(str) == 0 {
return Quantity{}, ErrFormatWrong
}
if str == "0" {
return Quantity{Format: DecimalSI, s: str}, nil
}
positive, value, num, denom, suf, err := parseQuantityString(str)
if err != nil {
return Quantity{}, err
}
base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf))
if !ok {
return Quantity{}, ErrSuffix
}
precision := int32(0)
scale := int32(0)
mantissa := int64(1)
switch format {
case DecimalExponent, DecimalSI:
scale = exponent
precision = maxInt64Factors - int32(len(num)+len(denom))
case BinarySI:
scale = 0
switch {
case exponent >= 0 && len(denom) == 0:
// only handle positive binary numbers with the fast path
mantissa = int64(int64(mantissa) << uint64(exponent))
// 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision
precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1
default:
precision = -1
}
}
if precision >= 0 {
// if we have a denominator, shift the entire value to the left by the number of places in the
// denominator
scale -= int32(len(denom))
if scale >= int32(Nano) {
shifted := num + denom
var value int64
value, err := strconv.ParseInt(shifted, 10, 64)
if err != nil {
return Quantity{}, ErrNumeric
}
if result, ok := int64Multiply(value, int64(mantissa)); ok {
if !positive {
result = -result
}
// if the number is in canonical form, reuse the string
switch format {
case BinarySI:
if exponent%10 == 0 && (value&0x07 != 0) {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
default:
if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
}
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil
}
}
}
amount := new(inf.Dec)
if _, ok := amount.SetString(value); !ok {
return Quantity{}, ErrNumeric
}
// So that no one but us has to think about suffixes, remove it.
if base == 10 {
amount.SetScale(amount.Scale() + Scale(exponent).infScale())
} else if base == 2 {
// numericSuffix = 2 ** exponent
numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent))
ub := amount.UnscaledBig()
amount.SetUnscaledBig(ub.Mul(ub, numericSuffix))
}
// Cap at min/max bounds.
sign := amount.Sign()
if sign == -1 {
amount.Neg(amount)
}
// This rounds non-zero values up to the minimum representable value, under the theory that
// if you want some resources, you should get some resources, even if you asked for way too small
// of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have
// the side effect of rounding values < .5n to zero.
if v, ok := amount.Unscaled(); v != int64(0) || !ok {
amount.Round(amount, Nano.infScale(), inf.RoundUp)
}
// The max is just a simple cap.
// TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
amount.Set(maxAllowed.Dec)
}
if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
}
if sign == -1 {
amount.Neg(amount)
}
return Quantity{d: infDecAmount{amount}, Format: format}, nil
}
// DeepCopy returns a deep-copy of the Quantity value. Note that the method
// receiver is a value, so we can mutate it in-place and return it.
func (q Quantity) DeepCopy() Quantity {
if q.d.Dec != nil {
tmp := &inf.Dec{}
q.d.Dec = tmp.Set(q.d.Dec)
}
return q
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} }
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (_ Quantity) OpenAPISchemaFormat() string { return "" }
// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing
// the OpenAPI v3 spec of this type.
func (Quantity) OpenAPIV3OneOfTypes() []string { return []string{"string", "number"} }
// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity).
//
// Note about BinarySI:
// - If q.Format is set to BinarySI and q.Amount represents a non-zero value between
// -1 and +1, it will be emitted as if q.Format were DecimalSI.
// - Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be
// rounded up. (1.1i becomes 2i.)
func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
if q.IsZero() {
return zeroBytes, nil
}
var rounded CanonicalValue
format := q.Format
switch format {
case DecimalExponent, DecimalSI:
case BinarySI:
if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
} else {
var exact bool
if rounded, exact = q.AsScale(0); !exact {
// Don't lose precision-- show as DecimalSI
format = DecimalSI
}
}
default:
format = DecimalExponent
}
// TODO: If BinarySI formatting is requested but would cause rounding, upgrade to
// one of the other formats.
switch format {
case DecimalExponent, DecimalSI:
number, exponent := q.AsCanonicalBytes(out)
suffix, _ := quantitySuffixer.constructBytes(10, exponent, format)
return number, suffix
default:
// format must be BinarySI
number, exponent := rounded.AsCanonicalBase1024Bytes(out)
suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)
return number, suffix
}
}
// AsApproximateFloat64 returns a float64 representation of the quantity which may
// lose precision. If the value of the quantity is outside the range of a float64
// +Inf/-Inf will be returned.
func (q *Quantity) AsApproximateFloat64() float64 {
var base float64
var exponent int
if q.d.Dec != nil {
base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64()
exponent = int(-q.d.Dec.Scale())
} else {
base = float64(q.i.value)
exponent = int(q.i.scale)
}
if exponent == 0 {
return base
}
return base * math.Pow10(exponent)
}
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) {
if q.d.Dec != nil {
return 0, false
}
return q.i.AsInt64()
}
// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself.
func (q *Quantity) ToDec() *Quantity {
if q.d.Dec == nil {
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
}
return q
}
// AsDec returns the quantity as represented by a scaled inf.Dec.
func (q *Quantity) AsDec() *inf.Dec {
if q.d.Dec != nil {
return q.d.Dec
}
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
return q.d.Dec
}
// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa
// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra
// allocation.
func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
if q.d.Dec != nil {
return q.d.AsCanonicalBytes(out)
}
return q.i.AsCanonicalBytes(out)
}
// IsZero returns true if the quantity is equal to zero.
func (q *Quantity) IsZero() bool {
if q.d.Dec != nil {
return q.d.Dec.Sign() == 0
}
return q.i.value == 0
}
// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the
// quantity is greater than zero.
func (q *Quantity) Sign() int {
if q.d.Dec != nil {
return q.d.Dec.Sign()
}
return q.i.Sign()
}
// AsScale returns the current value, rounded up to the provided scale, and returns
// false if the scale resulted in a loss of precision.
func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
if q.d.Dec != nil {
return q.d.AsScale(scale)
}
return q.i.AsScale(scale)
}
// RoundUp updates the quantity to the provided scale, ensuring that the value is at
// least 1. False is returned if the rounding operation resulted in a loss of precision.
// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10).
func (q *Quantity) RoundUp(scale Scale) bool {
if q.d.Dec != nil {
q.s = ""
d, exact := q.d.AsScale(scale)
q.d = d
return exact
}
// avoid clearing the string value if we have already calculated it
if q.i.scale >= scale {
return true
}
q.s = ""
i, exact := q.i.AsScale(scale)
q.i = i
return exact
}
// Add adds the provide y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
func (q *Quantity) Add(y Quantity) {
q.s = ""
if q.d.Dec == nil && y.d.Dec == nil {
if q.i.value == 0 {
q.Format = y.Format
}
if q.i.Add(y.i) {
return
}
} else if q.IsZero() {
q.Format = y.Format
}
q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec())
}
// Sub subtracts the provided quantity from the current value in place. If the current
// value is zero, the format of the quantity will be updated to the format of y.
func (q *Quantity) Sub(y Quantity) {
q.s = ""
if q.IsZero() {
q.Format = y.Format
}
if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) {
return
}
q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec())
}
// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) Cmp(y Quantity) int {
if q.d.Dec == nil && y.d.Dec == nil {
return q.i.Cmp(y.i)
}
return q.AsDec().Cmp(y.AsDec())
}
// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) CmpInt64(y int64) int {
if q.d.Dec != nil {
return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0)))
}
return q.i.Cmp(int64Amount{value: y})
}
// Neg sets quantity to be the negative value of itself.
func (q *Quantity) Neg() {
q.s = ""
if q.d.Dec == nil {
q.i.value = -q.i.value
return
}
q.d.Dec.Neg(q.d.Dec)
}
// Equal checks equality of two Quantities. This is useful for testing with
// cmp.Equal.
func (q Quantity) Equal(v Quantity) bool {
return q.Cmp(v) == 0
}
// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation
// of most Quantity values.
const int64QuantityExpectedBytes = 18
// String formats the Quantity as a string, caching the result if not calculated.
// String is an expensive operation and caching this result significantly reduces the cost of
// normal parse / marshal operations on Quantity.
func (q *Quantity) String() string {
if q == nil {
return "<nil>"
}
if len(q.s) == 0 {
result := make([]byte, 0, int64QuantityExpectedBytes)
number, suffix := q.CanonicalizeBytes(result)
number = append(number, suffix...)
q.s = string(number)
}
return q.s
}
// MarshalJSON implements the json.Marshaller interface.
func (q Quantity) MarshalJSON() ([]byte, error) {
if len(q.s) > 0 |
result := make([]byte, int64QuantityExpectedBytes)
result[0] = '"'
number, suffix := q.CanonicalizeBytes(result[1:1])
// if the same slice was returned to us that we passed in, avoid another allocation by copying number into
// the source slice and returning that
if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes {
number = append(number, suffix...)
number = append(number, '"')
return result[:1+len(number)], nil
}
// if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use
// append
result = result[:1]
result = append(result, number...)
result = append(result, suffix...)
result = append(result, '"')
return result, nil
}
// ToUnstructured implements the value.UnstructuredConverter interface.
func (q Quantity) ToUnstructured() interface{} {
return q.String()
}
// UnmarshalJSON implements the json.Unmarshaller interface.
// TODO: Remove support for leading/trailing whitespace
func (q *Quantity) UnmarshalJSON(value []byte) error {
l := len(value)
if l == 4 && bytes.Equal(value, []byte("null")) {
q.d.Dec = nil
q.i = int64Amount{}
return nil
}
if l >= 2 && value[0] == '"' && value[l-1] == '"' {
value = value[1 : l-1]
}
parsed, err := ParseQuantity(strings.TrimSpace(string(value)))
if err != nil {
return err
}
// This copy is safe because parsed will not be referred to again.
*q = parsed
return nil
}
// NewDecimalQuantity returns a new Quantity representing the given
// value in the given format.
func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
return &Quantity{
d: infDecAmount{&b},
Format: format,
}
}
// NewQuantity returns a new Quantity representing the given
// value in the given format.
func NewQuantity(value int64, format Format) *Quantity {
return &Quantity{
i: int64Amount{value: value},
Format: format,
}
}
// NewMilliQuantity returns a new Quantity representing the given
// value * 1/1000 in the given format. Note that BinarySI formatting
// will round fractional values, and will be changed to DecimalSI for
// values x where (-1 < x < 1) && (x != 0).
func NewMilliQuantity(value int64, format Format) *Quantity {
return &Quantity{
i: int64Amount{value: value, scale: -3},
Format: format,
}
}
// NewScaledQuantity returns a new Quantity representing the given
// value * 10^scale in DecimalSI format.
func NewScaledQuantity(value int64, scale Scale) *Quantity {
return &Quantity{
i: int64Amount{value: value, scale: scale},
Format: DecimalSI,
}
}
// Value returns the unscaled value of q rounded up to the nearest integer away from 0.
func (q *Quantity) Value() int64 {
return q.ScaledValue(0)
}
// MilliValue returns the value of ceil(q * 1000); this could overflow an int64;
// if that's a concern, call Value() first to verify the number is small enough.
func (q *Quantity) MilliValue() int64 {
return q.ScaledValue(Milli)
}
// ScaledValue returns the value of ceil(q / 10^scale).
// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000.
// This could overflow an int64.
// To detect overflow, call Value() first and verify the expected magnitude.
func (q *Quantity) ScaledValue(scale Scale) int64 {
if q.d.Dec == nil {
i, _ := q.i.AsScaledInt64(scale)
return i
}
dec := q.d.Dec
return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale()))
}
// Set sets q's value to be value.
func (q *Quantity) Set(value int64) {
q.SetScaled(value, 0)
}
// SetMilli sets q's value to be value * 1/1000.
func (q *Quantity) SetMilli(value int64) {
q.SetScaled(value, Milli)
}
// SetScaled sets q's value to be value * 10^scale
func (q *Quantity) SetScaled(value int64, scale Scale) {
q.s = ""
q.d.Dec = nil
q.i = int64Amount{value: value, scale: scale}
}
// QuantityValue makes it possible to use a Quantity as value for a command
// line parameter.
//
// +protobuf=true
// +protobuf.embed=string
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
type QuantityValue struct {
Quantity
}
// Set implements pflag.Value.Set and Go flag.Value.Set.
func (q *QuantityValue) Set(s string) error {
quantity, err := ParseQuantity(s)
if err != nil {
return err
}
q.Quantity = quantity
return nil
}
// Type implements pflag.Value.Type.
func (q QuantityValue) Type() string {
return "quantity"
}
| {
out := make([]byte, len(q.s)+2)
out[0], out[len(out)-1] = '"', '"'
copy(out[1:], q.s)
return out, nil
} | conditional_block |
quantity.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"bytes"
"errors"
"fmt"
"math"
"math/big"
"strconv"
"strings"
inf "gopkg.in/inf.v0"
)
// Quantity is a fixed-point representation of a number.
// It provides convenient marshaling/unmarshaling in JSON and YAML,
// in addition to String() and AsInt64() accessors.
//
// The serialization format is:
//
// ```
// <quantity> ::= <signedNumber><suffix>
//
// (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
//
// <digit> ::= 0 | 1 | ... | 9
// <digits> ::= <digit> | <digit><digits>
// <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits>
// <sign> ::= "+" | "-"
// <signedNumber> ::= <number> | <sign><number>
// <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI>
// <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei
//
// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)
//
// <decimalSI> ::= m | "" | k | M | G | T | P | E
//
// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
//
// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
// ```
//
// No matter which of the three exponent forms is used, no quantity may represent
// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
// places. Numbers larger or more precise will be capped or rounded up.
// (E.g.: 0.1m will rounded up to 1m.)
// This may be extended in the future if we require larger or smaller quantities.
//
// When a Quantity is parsed from a string, it will remember the type of suffix
// it had, and will use the same type again when it is serialized.
//
// Before serializing, Quantity will be put in "canonical form".
// This means that Exponent/suffix will be adjusted up or down (with a
// corresponding increase or decrease in Mantissa) such that:
//
// - No precision is lost
// - No fractional digits will be emitted
// - The exponent (or suffix) is as large as possible.
//
// The sign will be omitted unless the number is negative.
//
// Examples:
//
// - 1.5 will be serialized as "1500m"
// - 1.5Gi will be serialized as "1536Mi"
//
// Note that the quantity will NEVER be internally represented by a
// floating point number. That is the whole point of this exercise.
//
// Non-canonical values will still parse as long as they are well formed,
// but will be re-emitted in their canonical form. (So always use canonical
// form, or don't diff.)
//
// This format is intended to make it difficult to use these numbers without
// writing some sort of special handling code in the hopes that that will
// cause implementors to also use a fixed point implementation.
//
// +protobuf=true
// +protobuf.embed=string
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true
type Quantity struct {
// i is the quantity in int64 scaled form, if d.Dec == nil
i int64Amount
// d is the quantity in inf.Dec form if d.Dec != nil
d infDecAmount
// s is the generated value of this quantity to avoid recalculation
s string
// Change Format at will. See the comment for Canonicalize for
// more details.
Format
}
// CanonicalValue allows a quantity amount to be converted to a string.
type CanonicalValue interface {
// AsCanonicalBytes returns a byte array representing the string representation
// of the value mantissa and an int32 representing its exponent in base-10. Callers may
// pass a byte slice to the method to avoid allocations.
AsCanonicalBytes(out []byte) ([]byte, int32)
// AsCanonicalBase1024Bytes returns a byte array representing the string representation
// of the value mantissa and an int32 representing its exponent in base-1024. Callers
// may pass a byte slice to the method to avoid allocations.
AsCanonicalBase1024Bytes(out []byte) ([]byte, int32)
}
// Format lists the three possible formattings of a quantity.
type Format string
const (
DecimalExponent = Format("DecimalExponent") // e.g., 12e6
BinarySI = Format("BinarySI") // e.g., 12Mi (12 * 2^20)
DecimalSI = Format("DecimalSI") // e.g., 12M (12 * 10^6)
)
// MustParse turns the given string into a quantity or panics; for tests
// or other cases where you know the string is valid.
func MustParse(str string) Quantity {
q, err := ParseQuantity(str)
if err != nil {
panic(fmt.Errorf("cannot parse '%v': %v", str, err))
}
return q
}
const (
// splitREString is used to separate a number from its suffix; as such,
// this is overly permissive, but that's OK-- it will be checked later.
splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
)
var (
// Errors that could happen while parsing a string.
ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
ErrNumeric = errors.New("unable to parse numeric part of quantity")
ErrSuffix = errors.New("unable to parse quantity's suffix")
)
// parseQuantityString is a fast scanner for quantity values.
func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) {
positive = true
pos := 0
end := len(str)
// handle leading sign
if pos < end {
switch str[0] {
case '-':
positive = false
pos++
case '+':
pos++
}
}
// strip leading zeros
Zeroes:
for i := pos; ; i++ {
if i >= end {
num = "0"
value = num
return
}
switch str[i] {
case '0':
pos++
default:
break Zeroes
}
}
// extract the numerator
Num:
for i := pos; ; i++ {
if i >= end {
num = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
num = str[pos:i]
pos = i
break Num
}
}
// if we stripped all numerator positions, always return 0
if len(num) == 0 {
num = "0"
}
// handle a denominator
if pos < end && str[pos] == '.' {
pos++
Denom:
for i := pos; ; i++ {
if i >= end {
denom = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
denom = str[pos:i]
pos = i
break Denom
}
}
// TODO: we currently allow 1.G, but we may not want to in the future.
// if len(denom) == 0 {
// err = ErrFormatWrong
// return
// }
}
value = str[0:pos]
// grab the elements of the suffix
suffixStart := pos
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") {
pos = i
break
}
}
if pos < end {
switch str[pos] {
case '-', '+':
pos++
}
}
Suffix:
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
break Suffix
}
}
// we encountered a non decimal in the Suffix loop, but the last character
// was not a valid exponent
err = ErrFormatWrong
return
}
// ParseQuantity turns str into a Quantity, or returns an error.
func ParseQuantity(str string) (Quantity, error) {
if len(str) == 0 {
return Quantity{}, ErrFormatWrong
}
if str == "0" {
return Quantity{Format: DecimalSI, s: str}, nil
}
positive, value, num, denom, suf, err := parseQuantityString(str)
if err != nil {
return Quantity{}, err
}
base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf))
if !ok {
return Quantity{}, ErrSuffix
}
precision := int32(0)
scale := int32(0)
mantissa := int64(1)
switch format {
case DecimalExponent, DecimalSI:
scale = exponent
precision = maxInt64Factors - int32(len(num)+len(denom))
case BinarySI:
scale = 0
switch {
case exponent >= 0 && len(denom) == 0:
// only handle positive binary numbers with the fast path
mantissa = int64(int64(mantissa) << uint64(exponent))
// 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision
precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1
default:
precision = -1
}
}
if precision >= 0 {
// if we have a denominator, shift the entire value to the left by the number of places in the
// denominator
scale -= int32(len(denom))
if scale >= int32(Nano) {
shifted := num + denom
var value int64
value, err := strconv.ParseInt(shifted, 10, 64)
if err != nil {
return Quantity{}, ErrNumeric
}
if result, ok := int64Multiply(value, int64(mantissa)); ok {
if !positive {
result = -result
}
// if the number is in canonical form, reuse the string
switch format {
case BinarySI:
if exponent%10 == 0 && (value&0x07 != 0) {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
default:
if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
}
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil
}
}
}
amount := new(inf.Dec)
if _, ok := amount.SetString(value); !ok {
return Quantity{}, ErrNumeric
}
// So that no one but us has to think about suffixes, remove it.
if base == 10 {
amount.SetScale(amount.Scale() + Scale(exponent).infScale())
} else if base == 2 {
// numericSuffix = 2 ** exponent
numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent))
ub := amount.UnscaledBig()
amount.SetUnscaledBig(ub.Mul(ub, numericSuffix))
}
// Cap at min/max bounds.
sign := amount.Sign()
if sign == -1 {
amount.Neg(amount)
}
// This rounds non-zero values up to the minimum representable value, under the theory that
// if you want some resources, you should get some resources, even if you asked for way too small
// of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have
// the side effect of rounding values < .5n to zero.
if v, ok := amount.Unscaled(); v != int64(0) || !ok {
amount.Round(amount, Nano.infScale(), inf.RoundUp)
}
// The max is just a simple cap.
// TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
amount.Set(maxAllowed.Dec)
}
if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
}
if sign == -1 {
amount.Neg(amount)
}
return Quantity{d: infDecAmount{amount}, Format: format}, nil
}
// DeepCopy returns a deep-copy of the Quantity value. Note that the method
// receiver is a value, so we can mutate it in-place and return it.
func (q Quantity) DeepCopy() Quantity {
if q.d.Dec != nil {
tmp := &inf.Dec{}
q.d.Dec = tmp.Set(q.d.Dec)
}
return q
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} }
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (_ Quantity) OpenAPISchemaFormat() string { return "" }
// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing
// the OpenAPI v3 spec of this type.
func (Quantity) OpenAPIV3OneOfTypes() []string { return []string{"string", "number"} }
// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity).
//
// Note about BinarySI:
// - If q.Format is set to BinarySI and q.Amount represents a non-zero value between
// -1 and +1, it will be emitted as if q.Format were DecimalSI.
// - Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be
// rounded up. (1.1i becomes 2i.)
func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
if q.IsZero() {
return zeroBytes, nil
}
var rounded CanonicalValue
format := q.Format
switch format {
case DecimalExponent, DecimalSI:
case BinarySI:
if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
} else {
var exact bool
if rounded, exact = q.AsScale(0); !exact {
// Don't lose precision-- show as DecimalSI
format = DecimalSI
}
}
default:
format = DecimalExponent
}
// TODO: If BinarySI formatting is requested but would cause rounding, upgrade to
// one of the other formats.
switch format {
case DecimalExponent, DecimalSI:
number, exponent := q.AsCanonicalBytes(out)
suffix, _ := quantitySuffixer.constructBytes(10, exponent, format)
return number, suffix
default:
// format must be BinarySI
number, exponent := rounded.AsCanonicalBase1024Bytes(out)
suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)
return number, suffix
}
}
// AsApproximateFloat64 returns a float64 representation of the quantity which may
// lose precision. If the value of the quantity is outside the range of a float64
// +Inf/-Inf will be returned.
func (q *Quantity) AsApproximateFloat64() float64 {
var base float64
var exponent int
if q.d.Dec != nil {
base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64()
exponent = int(-q.d.Dec.Scale())
} else {
base = float64(q.i.value)
exponent = int(q.i.scale)
}
if exponent == 0 {
return base
}
return base * math.Pow10(exponent)
}
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) {
if q.d.Dec != nil {
return 0, false
}
return q.i.AsInt64()
}
// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself.
func (q *Quantity) ToDec() *Quantity {
if q.d.Dec == nil {
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
}
return q
}
// AsDec returns the quantity as represented by a scaled inf.Dec.
func (q *Quantity) AsDec() *inf.Dec {
if q.d.Dec != nil {
return q.d.Dec
}
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
return q.d.Dec
}
// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa
// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra
// allocation.
func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
if q.d.Dec != nil {
return q.d.AsCanonicalBytes(out)
}
return q.i.AsCanonicalBytes(out)
}
// IsZero returns true if the quantity is equal to zero.
func (q *Quantity) IsZero() bool {
if q.d.Dec != nil {
return q.d.Dec.Sign() == 0
}
return q.i.value == 0
}
// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the
// quantity is greater than zero.
func (q *Quantity) Sign() int {
if q.d.Dec != nil {
return q.d.Dec.Sign()
}
return q.i.Sign()
}
// AsScale returns the current value, rounded up to the provided scale, and returns
// false if the scale resulted in a loss of precision.
func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
if q.d.Dec != nil {
return q.d.AsScale(scale)
}
return q.i.AsScale(scale)
}
// RoundUp updates the quantity to the provided scale, ensuring that the value is at
// least 1. False is returned if the rounding operation resulted in a loss of precision.
// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10).
func (q *Quantity) RoundUp(scale Scale) bool {
if q.d.Dec != nil {
q.s = ""
d, exact := q.d.AsScale(scale)
q.d = d
return exact
}
// avoid clearing the string value if we have already calculated it
if q.i.scale >= scale {
return true
}
q.s = ""
i, exact := q.i.AsScale(scale)
q.i = i
return exact
}
// Add adds the provide y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
func (q *Quantity) Add(y Quantity) {
q.s = ""
if q.d.Dec == nil && y.d.Dec == nil {
if q.i.value == 0 {
q.Format = y.Format
}
if q.i.Add(y.i) {
return
}
} else if q.IsZero() {
q.Format = y.Format
}
q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec())
}
// Sub subtracts the provided quantity from the current value in place. If the current
// value is zero, the format of the quantity will be updated to the format of y.
func (q *Quantity) | (y Quantity) {
q.s = ""
if q.IsZero() {
q.Format = y.Format
}
if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) {
return
}
q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec())
}
// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) Cmp(y Quantity) int {
if q.d.Dec == nil && y.d.Dec == nil {
return q.i.Cmp(y.i)
}
return q.AsDec().Cmp(y.AsDec())
}
// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) CmpInt64(y int64) int {
if q.d.Dec != nil {
return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0)))
}
return q.i.Cmp(int64Amount{value: y})
}
// Neg sets quantity to be the negative value of itself.
func (q *Quantity) Neg() {
q.s = ""
if q.d.Dec == nil {
q.i.value = -q.i.value
return
}
q.d.Dec.Neg(q.d.Dec)
}
// Equal checks equality of two Quantities. This is useful for testing with
// cmp.Equal.
func (q Quantity) Equal(v Quantity) bool {
return q.Cmp(v) == 0
}
// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation
// of most Quantity values.
const int64QuantityExpectedBytes = 18
// String formats the Quantity as a string, caching the result if not calculated.
// String is an expensive operation and caching this result significantly reduces the cost of
// normal parse / marshal operations on Quantity.
func (q *Quantity) String() string {
if q == nil {
return "<nil>"
}
if len(q.s) == 0 {
result := make([]byte, 0, int64QuantityExpectedBytes)
number, suffix := q.CanonicalizeBytes(result)
number = append(number, suffix...)
q.s = string(number)
}
return q.s
}
// MarshalJSON implements the json.Marshaller interface.
func (q Quantity) MarshalJSON() ([]byte, error) {
if len(q.s) > 0 {
out := make([]byte, len(q.s)+2)
out[0], out[len(out)-1] = '"', '"'
copy(out[1:], q.s)
return out, nil
}
result := make([]byte, int64QuantityExpectedBytes)
result[0] = '"'
number, suffix := q.CanonicalizeBytes(result[1:1])
// if the same slice was returned to us that we passed in, avoid another allocation by copying number into
// the source slice and returning that
if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes {
number = append(number, suffix...)
number = append(number, '"')
return result[:1+len(number)], nil
}
// if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use
// append
result = result[:1]
result = append(result, number...)
result = append(result, suffix...)
result = append(result, '"')
return result, nil
}
// ToUnstructured implements the value.UnstructuredConverter interface.
func (q Quantity) ToUnstructured() interface{} {
return q.String()
}
// UnmarshalJSON implements the json.Unmarshaller interface.
// TODO: Remove support for leading/trailing whitespace
func (q *Quantity) UnmarshalJSON(value []byte) error {
l := len(value)
if l == 4 && bytes.Equal(value, []byte("null")) {
q.d.Dec = nil
q.i = int64Amount{}
return nil
}
if l >= 2 && value[0] == '"' && value[l-1] == '"' {
value = value[1 : l-1]
}
parsed, err := ParseQuantity(strings.TrimSpace(string(value)))
if err != nil {
return err
}
// This copy is safe because parsed will not be referred to again.
*q = parsed
return nil
}
// NewDecimalQuantity returns a new Quantity representing the given
// value in the given format.
func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
return &Quantity{
d: infDecAmount{&b},
Format: format,
}
}
// NewQuantity returns a new Quantity representing the given
// value in the given format.
func NewQuantity(value int64, format Format) *Quantity {
return &Quantity{
i: int64Amount{value: value},
Format: format,
}
}
// NewMilliQuantity returns a new Quantity representing the given
// value * 1/1000 in the given format. Note that BinarySI formatting
// will round fractional values, and will be changed to DecimalSI for
// values x where (-1 < x < 1) && (x != 0).
func NewMilliQuantity(value int64, format Format) *Quantity {
return &Quantity{
i: int64Amount{value: value, scale: -3},
Format: format,
}
}
// NewScaledQuantity returns a new Quantity representing the given
// value * 10^scale in DecimalSI format.
func NewScaledQuantity(value int64, scale Scale) *Quantity {
return &Quantity{
i: int64Amount{value: value, scale: scale},
Format: DecimalSI,
}
}
// Value returns the unscaled value of q rounded up to the nearest integer away from 0.
func (q *Quantity) Value() int64 {
return q.ScaledValue(0)
}
// MilliValue returns the value of ceil(q * 1000); this could overflow an int64;
// if that's a concern, call Value() first to verify the number is small enough.
func (q *Quantity) MilliValue() int64 {
return q.ScaledValue(Milli)
}
// ScaledValue returns the value of ceil(q / 10^scale).
// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000.
// This could overflow an int64.
// To detect overflow, call Value() first and verify the expected magnitude.
func (q *Quantity) ScaledValue(scale Scale) int64 {
if q.d.Dec == nil {
i, _ := q.i.AsScaledInt64(scale)
return i
}
dec := q.d.Dec
return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale()))
}
// Set sets q's value to be value.
func (q *Quantity) Set(value int64) {
q.SetScaled(value, 0)
}
// SetMilli sets q's value to be value * 1/1000.
func (q *Quantity) SetMilli(value int64) {
q.SetScaled(value, Milli)
}
// SetScaled sets q's value to be value * 10^scale
func (q *Quantity) SetScaled(value int64, scale Scale) {
q.s = ""
q.d.Dec = nil
q.i = int64Amount{value: value, scale: scale}
}
// QuantityValue makes it possible to use a Quantity as value for a command
// line parameter.
//
// +protobuf=true
// +protobuf.embed=string
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
type QuantityValue struct {
Quantity
}
// Set implements pflag.Value.Set and Go flag.Value.Set.
func (q *QuantityValue) Set(s string) error {
quantity, err := ParseQuantity(s)
if err != nil {
return err
}
q.Quantity = quantity
return nil
}
// Type implements pflag.Value.Type.
func (q QuantityValue) Type() string {
return "quantity"
}
| Sub | identifier_name |
quantity.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"bytes"
"errors"
"fmt"
"math"
"math/big"
"strconv"
"strings"
inf "gopkg.in/inf.v0"
)
// Quantity is a fixed-point representation of a number.
// It provides convenient marshaling/unmarshaling in JSON and YAML,
// in addition to String() and AsInt64() accessors.
//
// The serialization format is:
//
// ```
// <quantity> ::= <signedNumber><suffix>
//
// (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
//
// <digit> ::= 0 | 1 | ... | 9
// <digits> ::= <digit> | <digit><digits>
// <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits>
// <sign> ::= "+" | "-"
// <signedNumber> ::= <number> | <sign><number>
// <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI>
// <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei
//
// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)
//
// <decimalSI> ::= m | "" | k | M | G | T | P | E
//
// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
//
// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
// ```
//
// No matter which of the three exponent forms is used, no quantity may represent
// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
// places. Numbers larger or more precise will be capped or rounded up.
// (E.g.: 0.1m will rounded up to 1m.)
// This may be extended in the future if we require larger or smaller quantities.
//
// When a Quantity is parsed from a string, it will remember the type of suffix
// it had, and will use the same type again when it is serialized.
//
// Before serializing, Quantity will be put in "canonical form".
// This means that Exponent/suffix will be adjusted up or down (with a
// corresponding increase or decrease in Mantissa) such that:
//
// - No precision is lost
// - No fractional digits will be emitted
// - The exponent (or suffix) is as large as possible.
//
// The sign will be omitted unless the number is negative.
//
// Examples:
//
// - 1.5 will be serialized as "1500m"
// - 1.5Gi will be serialized as "1536Mi"
//
// Note that the quantity will NEVER be internally represented by a
// floating point number. That is the whole point of this exercise.
//
// Non-canonical values will still parse as long as they are well formed,
// but will be re-emitted in their canonical form. (So always use canonical
// form, or don't diff.)
//
// This format is intended to make it difficult to use these numbers without
// writing some sort of special handling code in the hopes that that will
// cause implementors to also use a fixed point implementation.
//
// +protobuf=true
// +protobuf.embed=string
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true
type Quantity struct {
// i is the quantity in int64 scaled form, if d.Dec == nil
i int64Amount
// d is the quantity in inf.Dec form if d.Dec != nil
d infDecAmount
// s is the generated value of this quantity to avoid recalculation
s string
// Change Format at will. See the comment for Canonicalize for
// more details.
Format
}
// CanonicalValue allows a quantity amount to be converted to a string.
type CanonicalValue interface {
// AsCanonicalBytes returns a byte array representing the string representation
// of the value mantissa and an int32 representing its exponent in base-10. Callers may
// pass a byte slice to the method to avoid allocations.
AsCanonicalBytes(out []byte) ([]byte, int32)
// AsCanonicalBase1024Bytes returns a byte array representing the string representation
// of the value mantissa and an int32 representing its exponent in base-1024. Callers
// may pass a byte slice to the method to avoid allocations.
AsCanonicalBase1024Bytes(out []byte) ([]byte, int32)
}
// Format lists the three possible formattings of a quantity.
type Format string
const (
DecimalExponent = Format("DecimalExponent") // e.g., 12e6
BinarySI = Format("BinarySI") // e.g., 12Mi (12 * 2^20)
DecimalSI = Format("DecimalSI") // e.g., 12M (12 * 10^6)
)
// MustParse turns the given string into a quantity or panics; for tests
// or other cases where you know the string is valid.
func MustParse(str string) Quantity |
const (
// splitREString is used to separate a number from its suffix; as such,
// this is overly permissive, but that's OK-- it will be checked later.
splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
)
var (
// Errors that could happen while parsing a string.
ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
ErrNumeric = errors.New("unable to parse numeric part of quantity")
ErrSuffix = errors.New("unable to parse quantity's suffix")
)
// parseQuantityString is a fast scanner for quantity values.
func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) {
positive = true
pos := 0
end := len(str)
// handle leading sign
if pos < end {
switch str[0] {
case '-':
positive = false
pos++
case '+':
pos++
}
}
// strip leading zeros
Zeroes:
for i := pos; ; i++ {
if i >= end {
num = "0"
value = num
return
}
switch str[i] {
case '0':
pos++
default:
break Zeroes
}
}
// extract the numerator
Num:
for i := pos; ; i++ {
if i >= end {
num = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
num = str[pos:i]
pos = i
break Num
}
}
// if we stripped all numerator positions, always return 0
if len(num) == 0 {
num = "0"
}
// handle a denominator
if pos < end && str[pos] == '.' {
pos++
Denom:
for i := pos; ; i++ {
if i >= end {
denom = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
denom = str[pos:i]
pos = i
break Denom
}
}
// TODO: we currently allow 1.G, but we may not want to in the future.
// if len(denom) == 0 {
// err = ErrFormatWrong
// return
// }
}
value = str[0:pos]
// grab the elements of the suffix
suffixStart := pos
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") {
pos = i
break
}
}
if pos < end {
switch str[pos] {
case '-', '+':
pos++
}
}
Suffix:
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
break Suffix
}
}
// we encountered a non decimal in the Suffix loop, but the last character
// was not a valid exponent
err = ErrFormatWrong
return
}
// ParseQuantity turns str into a Quantity, or returns an error.
func ParseQuantity(str string) (Quantity, error) {
if len(str) == 0 {
return Quantity{}, ErrFormatWrong
}
if str == "0" {
return Quantity{Format: DecimalSI, s: str}, nil
}
positive, value, num, denom, suf, err := parseQuantityString(str)
if err != nil {
return Quantity{}, err
}
base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf))
if !ok {
return Quantity{}, ErrSuffix
}
precision := int32(0)
scale := int32(0)
mantissa := int64(1)
switch format {
case DecimalExponent, DecimalSI:
scale = exponent
precision = maxInt64Factors - int32(len(num)+len(denom))
case BinarySI:
scale = 0
switch {
case exponent >= 0 && len(denom) == 0:
// only handle positive binary numbers with the fast path
mantissa = int64(int64(mantissa) << uint64(exponent))
// 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision
precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1
default:
precision = -1
}
}
if precision >= 0 {
// if we have a denominator, shift the entire value to the left by the number of places in the
// denominator
scale -= int32(len(denom))
if scale >= int32(Nano) {
shifted := num + denom
var value int64
value, err := strconv.ParseInt(shifted, 10, 64)
if err != nil {
return Quantity{}, ErrNumeric
}
if result, ok := int64Multiply(value, int64(mantissa)); ok {
if !positive {
result = -result
}
// if the number is in canonical form, reuse the string
switch format {
case BinarySI:
if exponent%10 == 0 && (value&0x07 != 0) {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
default:
if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
}
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil
}
}
}
amount := new(inf.Dec)
if _, ok := amount.SetString(value); !ok {
return Quantity{}, ErrNumeric
}
// So that no one but us has to think about suffixes, remove it.
if base == 10 {
amount.SetScale(amount.Scale() + Scale(exponent).infScale())
} else if base == 2 {
// numericSuffix = 2 ** exponent
numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent))
ub := amount.UnscaledBig()
amount.SetUnscaledBig(ub.Mul(ub, numericSuffix))
}
// Cap at min/max bounds.
sign := amount.Sign()
if sign == -1 {
amount.Neg(amount)
}
// This rounds non-zero values up to the minimum representable value, under the theory that
// if you want some resources, you should get some resources, even if you asked for way too small
// of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have
// the side effect of rounding values < .5n to zero.
if v, ok := amount.Unscaled(); v != int64(0) || !ok {
amount.Round(amount, Nano.infScale(), inf.RoundUp)
}
// The max is just a simple cap.
// TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
amount.Set(maxAllowed.Dec)
}
if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
}
if sign == -1 {
amount.Neg(amount)
}
return Quantity{d: infDecAmount{amount}, Format: format}, nil
}
// DeepCopy returns a deep-copy of the Quantity value. Note that the method
// receiver is a value, so we can mutate it in-place and return it.
func (q Quantity) DeepCopy() Quantity {
if q.d.Dec != nil {
tmp := &inf.Dec{}
q.d.Dec = tmp.Set(q.d.Dec)
}
return q
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} }
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (_ Quantity) OpenAPISchemaFormat() string { return "" }
// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing
// the OpenAPI v3 spec of this type.
func (Quantity) OpenAPIV3OneOfTypes() []string { return []string{"string", "number"} }
// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity).
//
// Note about BinarySI:
// - If q.Format is set to BinarySI and q.Amount represents a non-zero value between
// -1 and +1, it will be emitted as if q.Format were DecimalSI.
// - Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be
// rounded up. (1.1i becomes 2i.)
func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
if q.IsZero() {
return zeroBytes, nil
}
var rounded CanonicalValue
format := q.Format
switch format {
case DecimalExponent, DecimalSI:
case BinarySI:
if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
} else {
var exact bool
if rounded, exact = q.AsScale(0); !exact {
// Don't lose precision-- show as DecimalSI
format = DecimalSI
}
}
default:
format = DecimalExponent
}
// TODO: If BinarySI formatting is requested but would cause rounding, upgrade to
// one of the other formats.
switch format {
case DecimalExponent, DecimalSI:
number, exponent := q.AsCanonicalBytes(out)
suffix, _ := quantitySuffixer.constructBytes(10, exponent, format)
return number, suffix
default:
// format must be BinarySI
number, exponent := rounded.AsCanonicalBase1024Bytes(out)
suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)
return number, suffix
}
}
// AsApproximateFloat64 returns a float64 representation of the quantity which may
// lose precision. If the value of the quantity is outside the range of a float64
// +Inf/-Inf will be returned.
func (q *Quantity) AsApproximateFloat64() float64 {
var base float64
var exponent int
if q.d.Dec != nil {
base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64()
exponent = int(-q.d.Dec.Scale())
} else {
base = float64(q.i.value)
exponent = int(q.i.scale)
}
if exponent == 0 {
return base
}
return base * math.Pow10(exponent)
}
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) {
if q.d.Dec != nil {
return 0, false
}
return q.i.AsInt64()
}
// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself.
func (q *Quantity) ToDec() *Quantity {
if q.d.Dec == nil {
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
}
return q
}
// AsDec returns the quantity as represented by a scaled inf.Dec.
func (q *Quantity) AsDec() *inf.Dec {
if q.d.Dec != nil {
return q.d.Dec
}
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
return q.d.Dec
}
// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa
// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra
// allocation.
func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
if q.d.Dec != nil {
return q.d.AsCanonicalBytes(out)
}
return q.i.AsCanonicalBytes(out)
}
// IsZero returns true if the quantity is equal to zero.
func (q *Quantity) IsZero() bool {
if q.d.Dec != nil {
return q.d.Dec.Sign() == 0
}
return q.i.value == 0
}
// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the
// quantity is greater than zero.
func (q *Quantity) Sign() int {
if q.d.Dec != nil {
return q.d.Dec.Sign()
}
return q.i.Sign()
}
// AsScale returns the current value, rounded up to the provided scale, and returns
// false if the scale resulted in a loss of precision.
func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
if q.d.Dec != nil {
return q.d.AsScale(scale)
}
return q.i.AsScale(scale)
}
// RoundUp updates the quantity to the provided scale, ensuring that the value is at
// least 1. False is returned if the rounding operation resulted in a loss of precision.
// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10).
func (q *Quantity) RoundUp(scale Scale) bool {
if q.d.Dec != nil {
q.s = ""
d, exact := q.d.AsScale(scale)
q.d = d
return exact
}
// avoid clearing the string value if we have already calculated it
if q.i.scale >= scale {
return true
}
q.s = ""
i, exact := q.i.AsScale(scale)
q.i = i
return exact
}
// Add adds the provide y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
func (q *Quantity) Add(y Quantity) {
q.s = ""
if q.d.Dec == nil && y.d.Dec == nil {
if q.i.value == 0 {
q.Format = y.Format
}
if q.i.Add(y.i) {
return
}
} else if q.IsZero() {
q.Format = y.Format
}
q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec())
}
// Sub subtracts the provided quantity from the current value in place. If the current
// value is zero, the format of the quantity will be updated to the format of y.
func (q *Quantity) Sub(y Quantity) {
q.s = ""
if q.IsZero() {
q.Format = y.Format
}
if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) {
return
}
q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec())
}
// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) Cmp(y Quantity) int {
if q.d.Dec == nil && y.d.Dec == nil {
return q.i.Cmp(y.i)
}
return q.AsDec().Cmp(y.AsDec())
}
// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) CmpInt64(y int64) int {
if q.d.Dec != nil {
return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0)))
}
return q.i.Cmp(int64Amount{value: y})
}
// Neg sets quantity to be the negative value of itself.
func (q *Quantity) Neg() {
q.s = ""
if q.d.Dec == nil {
q.i.value = -q.i.value
return
}
q.d.Dec.Neg(q.d.Dec)
}
// Equal checks equality of two Quantities. This is useful for testing with
// cmp.Equal.
func (q Quantity) Equal(v Quantity) bool {
return q.Cmp(v) == 0
}
// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation
// of most Quantity values.
const int64QuantityExpectedBytes = 18
// String formats the Quantity as a string, caching the result if not calculated.
// String is an expensive operation and caching this result significantly reduces the cost of
// normal parse / marshal operations on Quantity.
func (q *Quantity) String() string {
if q == nil {
return "<nil>"
}
if len(q.s) == 0 {
result := make([]byte, 0, int64QuantityExpectedBytes)
number, suffix := q.CanonicalizeBytes(result)
number = append(number, suffix...)
q.s = string(number)
}
return q.s
}
// MarshalJSON implements the json.Marshaller interface.
func (q Quantity) MarshalJSON() ([]byte, error) {
if len(q.s) > 0 {
out := make([]byte, len(q.s)+2)
out[0], out[len(out)-1] = '"', '"'
copy(out[1:], q.s)
return out, nil
}
result := make([]byte, int64QuantityExpectedBytes)
result[0] = '"'
number, suffix := q.CanonicalizeBytes(result[1:1])
// if the same slice was returned to us that we passed in, avoid another allocation by copying number into
// the source slice and returning that
if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes {
number = append(number, suffix...)
number = append(number, '"')
return result[:1+len(number)], nil
}
// if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use
// append
result = result[:1]
result = append(result, number...)
result = append(result, suffix...)
result = append(result, '"')
return result, nil
}
// ToUnstructured implements the value.UnstructuredConverter interface.
func (q Quantity) ToUnstructured() interface{} {
return q.String()
}
// UnmarshalJSON implements the json.Unmarshaller interface.
// TODO: Remove support for leading/trailing whitespace
func (q *Quantity) UnmarshalJSON(value []byte) error {
l := len(value)
if l == 4 && bytes.Equal(value, []byte("null")) {
q.d.Dec = nil
q.i = int64Amount{}
return nil
}
if l >= 2 && value[0] == '"' && value[l-1] == '"' {
value = value[1 : l-1]
}
parsed, err := ParseQuantity(strings.TrimSpace(string(value)))
if err != nil {
return err
}
// This copy is safe because parsed will not be referred to again.
*q = parsed
return nil
}
// NewDecimalQuantity returns a new Quantity representing the given
// value in the given format.
func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
return &Quantity{
d: infDecAmount{&b},
Format: format,
}
}
// NewQuantity returns a new Quantity representing the given
// value in the given format.
func NewQuantity(value int64, format Format) *Quantity {
return &Quantity{
i: int64Amount{value: value},
Format: format,
}
}
// NewMilliQuantity returns a new Quantity representing the given
// value * 1/1000 in the given format. Note that BinarySI formatting
// will round fractional values, and will be changed to DecimalSI for
// values x where (-1 < x < 1) && (x != 0).
func NewMilliQuantity(value int64, format Format) *Quantity {
return &Quantity{
i: int64Amount{value: value, scale: -3},
Format: format,
}
}
// NewScaledQuantity returns a new Quantity representing the given
// value * 10^scale in DecimalSI format.
func NewScaledQuantity(value int64, scale Scale) *Quantity {
return &Quantity{
i: int64Amount{value: value, scale: scale},
Format: DecimalSI,
}
}
// Value returns the unscaled value of q rounded up to the nearest integer away from 0.
func (q *Quantity) Value() int64 {
return q.ScaledValue(0)
}
// MilliValue returns the value of ceil(q * 1000); this could overflow an int64;
// if that's a concern, call Value() first to verify the number is small enough.
func (q *Quantity) MilliValue() int64 {
return q.ScaledValue(Milli)
}
// ScaledValue returns the value of ceil(q / 10^scale).
// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000.
// This could overflow an int64.
// To detect overflow, call Value() first and verify the expected magnitude.
func (q *Quantity) ScaledValue(scale Scale) int64 {
if q.d.Dec == nil {
i, _ := q.i.AsScaledInt64(scale)
return i
}
dec := q.d.Dec
return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale()))
}
// Set sets q's value to be value.
func (q *Quantity) Set(value int64) {
q.SetScaled(value, 0)
}
// SetMilli sets q's value to be value * 1/1000.
func (q *Quantity) SetMilli(value int64) {
q.SetScaled(value, Milli)
}
// SetScaled sets q's value to be value * 10^scale
func (q *Quantity) SetScaled(value int64, scale Scale) {
q.s = ""
q.d.Dec = nil
q.i = int64Amount{value: value, scale: scale}
}
// QuantityValue makes it possible to use a Quantity as value for a command
// line parameter.
//
// +protobuf=true
// +protobuf.embed=string
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
type QuantityValue struct {
Quantity
}
// Set implements pflag.Value.Set and Go flag.Value.Set.
func (q *QuantityValue) Set(s string) error {
quantity, err := ParseQuantity(s)
if err != nil {
return err
}
q.Quantity = quantity
return nil
}
// Type implements pflag.Value.Type.
func (q QuantityValue) Type() string {
return "quantity"
}
| {
q, err := ParseQuantity(str)
if err != nil {
panic(fmt.Errorf("cannot parse '%v': %v", str, err))
}
return q
} | identifier_body |
quantity.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"bytes"
"errors"
"fmt"
"math"
"math/big"
"strconv"
"strings"
inf "gopkg.in/inf.v0"
)
// Quantity is a fixed-point representation of a number.
// It provides convenient marshaling/unmarshaling in JSON and YAML,
// in addition to String() and AsInt64() accessors.
//
// The serialization format is:
//
// ```
// <quantity> ::= <signedNumber><suffix>
//
// (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
//
// <digit> ::= 0 | 1 | ... | 9
// <digits> ::= <digit> | <digit><digits>
// <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits>
// <sign> ::= "+" | "-"
// <signedNumber> ::= <number> | <sign><number>
// <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI>
// <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei
//
// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)
//
// <decimalSI> ::= m | "" | k | M | G | T | P | E
//
// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
//
// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
// ```
//
// No matter which of the three exponent forms is used, no quantity may represent
// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
// places. Numbers larger or more precise will be capped or rounded up.
// (E.g.: 0.1m will rounded up to 1m.)
// This may be extended in the future if we require larger or smaller quantities.
//
// When a Quantity is parsed from a string, it will remember the type of suffix
// it had, and will use the same type again when it is serialized.
//
// Before serializing, Quantity will be put in "canonical form".
// This means that Exponent/suffix will be adjusted up or down (with a
// corresponding increase or decrease in Mantissa) such that:
//
// - No precision is lost
// - No fractional digits will be emitted
// - The exponent (or suffix) is as large as possible.
//
// The sign will be omitted unless the number is negative.
//
// Examples:
//
// - 1.5 will be serialized as "1500m"
// - 1.5Gi will be serialized as "1536Mi"
//
// Note that the quantity will NEVER be internally represented by a
// floating point number. That is the whole point of this exercise.
//
// Non-canonical values will still parse as long as they are well formed,
// but will be re-emitted in their canonical form. (So always use canonical
// form, or don't diff.)
//
// This format is intended to make it difficult to use these numbers without
// writing some sort of special handling code in the hopes that that will
// cause implementors to also use a fixed point implementation.
//
// +protobuf=true
// +protobuf.embed=string
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true
type Quantity struct {
// i is the quantity in int64 scaled form, if d.Dec == nil
i int64Amount
// d is the quantity in inf.Dec form if d.Dec != nil
d infDecAmount
// s is the generated value of this quantity to avoid recalculation
s string
// Change Format at will. See the comment for Canonicalize for
// more details.
Format
}
// CanonicalValue allows a quantity amount to be converted to a string.
type CanonicalValue interface {
// AsCanonicalBytes returns a byte array representing the string representation
// of the value mantissa and an int32 representing its exponent in base-10. Callers may
// pass a byte slice to the method to avoid allocations.
AsCanonicalBytes(out []byte) ([]byte, int32)
// AsCanonicalBase1024Bytes returns a byte array representing the string representation
// of the value mantissa and an int32 representing its exponent in base-1024. Callers
// may pass a byte slice to the method to avoid allocations.
AsCanonicalBase1024Bytes(out []byte) ([]byte, int32)
}
// Format lists the three possible formattings of a quantity.
type Format string
const (
DecimalExponent = Format("DecimalExponent") // e.g., 12e6
BinarySI = Format("BinarySI") // e.g., 12Mi (12 * 2^20)
DecimalSI = Format("DecimalSI") // e.g., 12M (12 * 10^6)
)
// MustParse turns the given string into a quantity or panics; for tests
// or other cases where you know the string is valid.
func MustParse(str string) Quantity {
q, err := ParseQuantity(str)
if err != nil {
panic(fmt.Errorf("cannot parse '%v': %v", str, err))
}
return q
}
const (
// splitREString is used to separate a number from its suffix; as such,
// this is overly permissive, but that's OK-- it will be checked later.
splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
)
var (
// Errors that could happen while parsing a string.
ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
ErrNumeric = errors.New("unable to parse numeric part of quantity")
ErrSuffix = errors.New("unable to parse quantity's suffix")
)
|
// handle leading sign
if pos < end {
switch str[0] {
case '-':
positive = false
pos++
case '+':
pos++
}
}
// strip leading zeros
Zeroes:
for i := pos; ; i++ {
if i >= end {
num = "0"
value = num
return
}
switch str[i] {
case '0':
pos++
default:
break Zeroes
}
}
// extract the numerator
Num:
for i := pos; ; i++ {
if i >= end {
num = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
num = str[pos:i]
pos = i
break Num
}
}
// if we stripped all numerator positions, always return 0
if len(num) == 0 {
num = "0"
}
// handle a denominator
if pos < end && str[pos] == '.' {
pos++
Denom:
for i := pos; ; i++ {
if i >= end {
denom = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
denom = str[pos:i]
pos = i
break Denom
}
}
// TODO: we currently allow 1.G, but we may not want to in the future.
// if len(denom) == 0 {
// err = ErrFormatWrong
// return
// }
}
value = str[0:pos]
// grab the elements of the suffix
suffixStart := pos
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") {
pos = i
break
}
}
if pos < end {
switch str[pos] {
case '-', '+':
pos++
}
}
Suffix:
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
break Suffix
}
}
// we encountered a non decimal in the Suffix loop, but the last character
// was not a valid exponent
err = ErrFormatWrong
return
}
// ParseQuantity turns str into a Quantity, or returns an error.
func ParseQuantity(str string) (Quantity, error) {
if len(str) == 0 {
return Quantity{}, ErrFormatWrong
}
if str == "0" {
return Quantity{Format: DecimalSI, s: str}, nil
}
positive, value, num, denom, suf, err := parseQuantityString(str)
if err != nil {
return Quantity{}, err
}
base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf))
if !ok {
return Quantity{}, ErrSuffix
}
precision := int32(0)
scale := int32(0)
mantissa := int64(1)
switch format {
case DecimalExponent, DecimalSI:
scale = exponent
precision = maxInt64Factors - int32(len(num)+len(denom))
case BinarySI:
scale = 0
switch {
case exponent >= 0 && len(denom) == 0:
// only handle positive binary numbers with the fast path
mantissa = int64(int64(mantissa) << uint64(exponent))
// 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision
precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1
default:
precision = -1
}
}
if precision >= 0 {
// if we have a denominator, shift the entire value to the left by the number of places in the
// denominator
scale -= int32(len(denom))
if scale >= int32(Nano) {
shifted := num + denom
var value int64
value, err := strconv.ParseInt(shifted, 10, 64)
if err != nil {
return Quantity{}, ErrNumeric
}
if result, ok := int64Multiply(value, int64(mantissa)); ok {
if !positive {
result = -result
}
// if the number is in canonical form, reuse the string
switch format {
case BinarySI:
if exponent%10 == 0 && (value&0x07 != 0) {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
default:
if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' {
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
}
}
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil
}
}
}
amount := new(inf.Dec)
if _, ok := amount.SetString(value); !ok {
return Quantity{}, ErrNumeric
}
// So that no one but us has to think about suffixes, remove it.
if base == 10 {
amount.SetScale(amount.Scale() + Scale(exponent).infScale())
} else if base == 2 {
// numericSuffix = 2 ** exponent
numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent))
ub := amount.UnscaledBig()
amount.SetUnscaledBig(ub.Mul(ub, numericSuffix))
}
// Cap at min/max bounds.
sign := amount.Sign()
if sign == -1 {
amount.Neg(amount)
}
// This rounds non-zero values up to the minimum representable value, under the theory that
// if you want some resources, you should get some resources, even if you asked for way too small
// of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have
// the side effect of rounding values < .5n to zero.
if v, ok := amount.Unscaled(); v != int64(0) || !ok {
amount.Round(amount, Nano.infScale(), inf.RoundUp)
}
// The max is just a simple cap.
// TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
amount.Set(maxAllowed.Dec)
}
if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
}
if sign == -1 {
amount.Neg(amount)
}
return Quantity{d: infDecAmount{amount}, Format: format}, nil
}
// DeepCopy returns a deep-copy of the Quantity value. Note that the method
// receiver is a value, so we can mutate it in-place and return it.
func (q Quantity) DeepCopy() Quantity {
if q.d.Dec != nil {
tmp := &inf.Dec{}
q.d.Dec = tmp.Set(q.d.Dec)
}
return q
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//
// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} }
// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
func (_ Quantity) OpenAPISchemaFormat() string { return "" }
// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing
// the OpenAPI v3 spec of this type.
func (Quantity) OpenAPIV3OneOfTypes() []string { return []string{"string", "number"} }
// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity).
//
// Note about BinarySI:
// - If q.Format is set to BinarySI and q.Amount represents a non-zero value between
// -1 and +1, it will be emitted as if q.Format were DecimalSI.
// - Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be
// rounded up. (1.1i becomes 2i.)
func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
if q.IsZero() {
return zeroBytes, nil
}
var rounded CanonicalValue
format := q.Format
switch format {
case DecimalExponent, DecimalSI:
case BinarySI:
if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
} else {
var exact bool
if rounded, exact = q.AsScale(0); !exact {
// Don't lose precision-- show as DecimalSI
format = DecimalSI
}
}
default:
format = DecimalExponent
}
// TODO: If BinarySI formatting is requested but would cause rounding, upgrade to
// one of the other formats.
switch format {
case DecimalExponent, DecimalSI:
number, exponent := q.AsCanonicalBytes(out)
suffix, _ := quantitySuffixer.constructBytes(10, exponent, format)
return number, suffix
default:
// format must be BinarySI
number, exponent := rounded.AsCanonicalBase1024Bytes(out)
suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)
return number, suffix
}
}
// AsApproximateFloat64 returns a float64 representation of the quantity which may
// lose precision. If the value of the quantity is outside the range of a float64
// +Inf/-Inf will be returned.
func (q *Quantity) AsApproximateFloat64() float64 {
var base float64
var exponent int
if q.d.Dec != nil {
base, _ = big.NewFloat(0).SetInt(q.d.Dec.UnscaledBig()).Float64()
exponent = int(-q.d.Dec.Scale())
} else {
base = float64(q.i.value)
exponent = int(q.i.scale)
}
if exponent == 0 {
return base
}
return base * math.Pow10(exponent)
}
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) {
if q.d.Dec != nil {
return 0, false
}
return q.i.AsInt64()
}
// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself.
func (q *Quantity) ToDec() *Quantity {
if q.d.Dec == nil {
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
}
return q
}
// AsDec returns the quantity as represented by a scaled inf.Dec.
func (q *Quantity) AsDec() *inf.Dec {
if q.d.Dec != nil {
return q.d.Dec
}
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
return q.d.Dec
}
// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa
// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra
// allocation.
func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
if q.d.Dec != nil {
return q.d.AsCanonicalBytes(out)
}
return q.i.AsCanonicalBytes(out)
}
// IsZero returns true if the quantity is equal to zero.
func (q *Quantity) IsZero() bool {
if q.d.Dec != nil {
return q.d.Dec.Sign() == 0
}
return q.i.value == 0
}
// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the
// quantity is greater than zero.
func (q *Quantity) Sign() int {
if q.d.Dec != nil {
return q.d.Dec.Sign()
}
return q.i.Sign()
}
// AsScale returns the current value, rounded up to the provided scale, and returns
// false if the scale resulted in a loss of precision.
func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
if q.d.Dec != nil {
return q.d.AsScale(scale)
}
return q.i.AsScale(scale)
}
// RoundUp updates the quantity to the provided scale, ensuring that the value is at
// least 1. False is returned if the rounding operation resulted in a loss of precision.
// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10).
func (q *Quantity) RoundUp(scale Scale) bool {
if q.d.Dec != nil {
q.s = ""
d, exact := q.d.AsScale(scale)
q.d = d
return exact
}
// avoid clearing the string value if we have already calculated it
if q.i.scale >= scale {
return true
}
q.s = ""
i, exact := q.i.AsScale(scale)
q.i = i
return exact
}
// Add adds the provide y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
func (q *Quantity) Add(y Quantity) {
q.s = ""
if q.d.Dec == nil && y.d.Dec == nil {
if q.i.value == 0 {
q.Format = y.Format
}
if q.i.Add(y.i) {
return
}
} else if q.IsZero() {
q.Format = y.Format
}
q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec())
}
// Sub subtracts the provided quantity from the current value in place. If the current
// value is zero, the format of the quantity will be updated to the format of y.
func (q *Quantity) Sub(y Quantity) {
q.s = ""
if q.IsZero() {
q.Format = y.Format
}
if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) {
return
}
q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec())
}
// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) Cmp(y Quantity) int {
if q.d.Dec == nil && y.d.Dec == nil {
return q.i.Cmp(y.i)
}
return q.AsDec().Cmp(y.AsDec())
}
// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) CmpInt64(y int64) int {
if q.d.Dec != nil {
return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0)))
}
return q.i.Cmp(int64Amount{value: y})
}
// Neg sets quantity to be the negative value of itself.
func (q *Quantity) Neg() {
q.s = ""
if q.d.Dec == nil {
q.i.value = -q.i.value
return
}
q.d.Dec.Neg(q.d.Dec)
}
// Equal checks equality of two Quantities. This is useful for testing with
// cmp.Equal.
func (q Quantity) Equal(v Quantity) bool {
return q.Cmp(v) == 0
}
// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation
// of most Quantity values.
const int64QuantityExpectedBytes = 18
// String formats the Quantity as a string, caching the result if not calculated.
// String is an expensive operation and caching this result significantly reduces the cost of
// normal parse / marshal operations on Quantity.
func (q *Quantity) String() string {
if q == nil {
return "<nil>"
}
if len(q.s) == 0 {
result := make([]byte, 0, int64QuantityExpectedBytes)
number, suffix := q.CanonicalizeBytes(result)
number = append(number, suffix...)
q.s = string(number)
}
return q.s
}
// MarshalJSON implements the json.Marshaller interface.
func (q Quantity) MarshalJSON() ([]byte, error) {
if len(q.s) > 0 {
out := make([]byte, len(q.s)+2)
out[0], out[len(out)-1] = '"', '"'
copy(out[1:], q.s)
return out, nil
}
result := make([]byte, int64QuantityExpectedBytes)
result[0] = '"'
number, suffix := q.CanonicalizeBytes(result[1:1])
// if the same slice was returned to us that we passed in, avoid another allocation by copying number into
// the source slice and returning that
if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes {
number = append(number, suffix...)
number = append(number, '"')
return result[:1+len(number)], nil
}
// if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use
// append
result = result[:1]
result = append(result, number...)
result = append(result, suffix...)
result = append(result, '"')
return result, nil
}
// ToUnstructured implements the value.UnstructuredConverter interface.
func (q Quantity) ToUnstructured() interface{} {
return q.String()
}
// UnmarshalJSON implements the json.Unmarshaller interface.
// TODO: Remove support for leading/trailing whitespace
func (q *Quantity) UnmarshalJSON(value []byte) error {
l := len(value)
if l == 4 && bytes.Equal(value, []byte("null")) {
q.d.Dec = nil
q.i = int64Amount{}
return nil
}
if l >= 2 && value[0] == '"' && value[l-1] == '"' {
value = value[1 : l-1]
}
parsed, err := ParseQuantity(strings.TrimSpace(string(value)))
if err != nil {
return err
}
// This copy is safe because parsed will not be referred to again.
*q = parsed
return nil
}
// NewDecimalQuantity returns a new Quantity representing the given
// value in the given format.
func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
return &Quantity{
d: infDecAmount{&b},
Format: format,
}
}
// NewQuantity returns a new Quantity representing the given
// value in the given format.
func NewQuantity(value int64, format Format) *Quantity {
return &Quantity{
i: int64Amount{value: value},
Format: format,
}
}
// NewMilliQuantity returns a new Quantity representing the given
// value * 1/1000 in the given format. Note that BinarySI formatting
// will round fractional values, and will be changed to DecimalSI for
// values x where (-1 < x < 1) && (x != 0).
func NewMilliQuantity(value int64, format Format) *Quantity {
return &Quantity{
i: int64Amount{value: value, scale: -3},
Format: format,
}
}
// NewScaledQuantity returns a new Quantity representing the given
// value * 10^scale in DecimalSI format.
func NewScaledQuantity(value int64, scale Scale) *Quantity {
return &Quantity{
i: int64Amount{value: value, scale: scale},
Format: DecimalSI,
}
}
// Value returns the unscaled value of q rounded up to the nearest integer away from 0.
func (q *Quantity) Value() int64 {
return q.ScaledValue(0)
}
// MilliValue returns the value of ceil(q * 1000); this could overflow an int64;
// if that's a concern, call Value() first to verify the number is small enough.
func (q *Quantity) MilliValue() int64 {
return q.ScaledValue(Milli)
}
// ScaledValue returns the value of ceil(q / 10^scale).
// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000.
// This could overflow an int64.
// To detect overflow, call Value() first and verify the expected magnitude.
func (q *Quantity) ScaledValue(scale Scale) int64 {
if q.d.Dec == nil {
i, _ := q.i.AsScaledInt64(scale)
return i
}
dec := q.d.Dec
return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale()))
}
// Set sets q's value to be value.
func (q *Quantity) Set(value int64) {
q.SetScaled(value, 0)
}
// SetMilli sets q's value to be value * 1/1000.
func (q *Quantity) SetMilli(value int64) {
q.SetScaled(value, Milli)
}
// SetScaled sets q's value to be value * 10^scale
func (q *Quantity) SetScaled(value int64, scale Scale) {
q.s = ""
q.d.Dec = nil
q.i = int64Amount{value: value, scale: scale}
}
// QuantityValue makes it possible to use a Quantity as value for a command
// line parameter.
//
// +protobuf=true
// +protobuf.embed=string
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
type QuantityValue struct {
Quantity
}
// Set implements pflag.Value.Set and Go flag.Value.Set.
func (q *QuantityValue) Set(s string) error {
quantity, err := ParseQuantity(s)
if err != nil {
return err
}
q.Quantity = quantity
return nil
}
// Type implements pflag.Value.Type.
func (q QuantityValue) Type() string {
return "quantity"
} | // parseQuantityString is a fast scanner for quantity values.
func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) {
positive = true
pos := 0
end := len(str) | random_line_split |
generate_tiles.py | import sys
from pathlib import Path
from functools import partial
from multiprocessing import Pool
from datetime import datetime
import click
import rasterio as rio
from rasterio.windows import Window
from tiling.const_stride import ConstStrideTiles
from tiling.const_size import ConstSizeTiles
from dataflow.io_utils import imwrite_rasterio, read_data_rasterio
def get_files_from_folder(input_dir, extensions=None):
"""Method to get files from a folder with optional filter on extensions
Args:
input_dir: input folder
extensions (list or tuple): List of extensions to filter files (Default value = None)
Returns:
List of filepaths
"""
output = []
if extensions is None:
extensions = [""]
elif not isinstance(extensions, (tuple, list)):
extensions = [extensions]
for ext in extensions:
|
return output
@click.group()
def cli():
pass
def _parse_options(options):
# this import is needed to correctly run `eval`
from rasterio.enums import Resampling
assert isinstance(options, str), "Options should be a string"
output = {}
if len(options) == 0:
return output
options = options.split(';')
for opt in options:
assert "=" in opt, "Option '{}' should contain '='".format(opt)
k, v = opt.split('=')
output[k] = eval(v)
return output
def run_task(filepath, output_dir, get_tiles_fn, output_extension, options):
try:
src = rio.open(filepath)
except rio.errors.RasterioIOError as e:
raise RuntimeError("Failed to open file: '%s'. Check if it exists or has supported format." % filepath +
"\nRasterio error message: {}".format(e))
output_tiles_dir = Path(output_dir) / "{}_tiles".format(Path(filepath).stem)
output_tiles_dir.mkdir(exist_ok=True)
tiles = get_tiles_fn((src.width, src.height))
output_extension = Path(filepath).suffix[1:] if output_extension is None else output_extension
kwargs = {}
for extent, out_size in tiles:
x, y, w, h = extent
# get data
tile = read_data_rasterio(src, src_rect=[x, y, w, h],
dst_width=out_size[0],
dst_height=out_size[1],
**_parse_options(options))
kwargs['crs'] = src.crs
if src.transform is not None:
kwargs['transform'] = src.window_transform(Window(x, y, w, h))
output_tile_filepath = output_tiles_dir / ("tile_%i_%i.%s" % (x, y, output_extension))
imwrite_rasterio(output_tile_filepath.as_posix(), tile, **kwargs)
src.close()
def _run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str):
if not quiet:
click.echo(conf_str)
if Path(input_dir_or_file).is_dir():
if extensions is not None:
extensions = extensions.split(",")
files = get_files_from_folder(input_dir_or_file, extensions)
assert len(files) > 0, "No files with extensions '{}' found at '{}'".format(extensions, input_dir_or_file)
else:
files = [input_dir_or_file]
if not Path(output_dir).exists():
if not quiet:
click.echo("Create output folder: %s" % output_dir)
Path(output_dir).mkdir(parents=True)
if not without_log_file:
cmd = sys.argv
now = datetime.now()
log_filepath = Path(output_dir) / ("%s.log" % now.strftime("%Y%m%d_%H%M%S"))
with log_filepath.open('w') as handler:
handler.write("Command:\n")
cmd_str = " ".join(cmd)
handler.write(cmd_str + "\n\n")
handler.write(conf_str + "\n")
func = partial(run_task,
output_dir=output_dir,
get_tiles_fn=get_tiles_fn,
output_extension=output_extension,
options=options)
progressbar = click.progressbar if not quiet else EmptyBar
chunk_size = 10
if n_workers > 1 and len(files) > chunk_size // 2:
with Pool(n_workers) as pool:
with progressbar(length=len(files)) as bar:
for i in range(0, len(files), chunk_size):
chunk_files = files[i: i + chunk_size]
pool.map(func, chunk_files)
bar.update(chunk_size)
else:
with progressbar(files, label='Run tile generator on files') as bar:
for f in bar:
func(f)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('stride', nargs=2, type=int)
@click.option('--origin', nargs=2, type=int, default=(0, 0),
help="Point in pixels in the original image from where to start the tiling. " +
"Values can be positive or negative")
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--without_nodata', type=bool, is_flag=True,
help="Do not include nodata. Default, nodata is included. If nodata is included then tile extents " +
"have all the same size, otherwise tiles at boundaries will be reduced.")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_stride_tiler(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
stride: {}
origin: {}
scale: {}
without_nodata: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstStrideTiles, tile_size=tile_size, stride=stride, scale=scale, origin=origin,
include_nodata=not without_nodata)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('min_overlapping', type=int)
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_size_tiler(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
min_overlapping: {}
scale: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstSizeTiles, tile_size=tile_size, min_overlapping=min_overlapping, scale=scale)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
class EmptyBar(object):
def __init__(self, iterable=None, **kwargs):
self.iterable = iterable
def __enter__(self):
return self.iterable
def __exit__(self, *args, **kwargs):
pass
def update(self, n_steps):
pass
cli.add_command(run_const_stride_tiler, name="const_stride")
cli.add_command(run_const_size_tiler, name="const_size")
if __name__ == "__main__":
cli()
| files = Path(input_dir).rglob("*{}".format(ext))
output.extend([f.as_posix() for f in files if f.is_file()]) | conditional_block |
generate_tiles.py | import sys
from pathlib import Path
from functools import partial
from multiprocessing import Pool
from datetime import datetime
import click
import rasterio as rio
from rasterio.windows import Window
from tiling.const_stride import ConstStrideTiles
from tiling.const_size import ConstSizeTiles
from dataflow.io_utils import imwrite_rasterio, read_data_rasterio
def get_files_from_folder(input_dir, extensions=None):
"""Method to get files from a folder with optional filter on extensions
Args:
input_dir: input folder
extensions (list or tuple): List of extensions to filter files (Default value = None)
Returns:
List of filepaths
"""
output = []
if extensions is None:
extensions = [""]
elif not isinstance(extensions, (tuple, list)):
extensions = [extensions]
for ext in extensions:
files = Path(input_dir).rglob("*{}".format(ext))
output.extend([f.as_posix() for f in files if f.is_file()])
return output
@click.group()
def cli():
pass
def _parse_options(options):
# this import is needed to correctly run `eval`
from rasterio.enums import Resampling
assert isinstance(options, str), "Options should be a string"
output = {}
if len(options) == 0:
return output
options = options.split(';')
for opt in options:
assert "=" in opt, "Option '{}' should contain '='".format(opt)
k, v = opt.split('=')
output[k] = eval(v)
return output
def run_task(filepath, output_dir, get_tiles_fn, output_extension, options):
try:
src = rio.open(filepath)
except rio.errors.RasterioIOError as e:
raise RuntimeError("Failed to open file: '%s'. Check if it exists or has supported format." % filepath +
"\nRasterio error message: {}".format(e))
output_tiles_dir = Path(output_dir) / "{}_tiles".format(Path(filepath).stem)
output_tiles_dir.mkdir(exist_ok=True)
tiles = get_tiles_fn((src.width, src.height))
output_extension = Path(filepath).suffix[1:] if output_extension is None else output_extension
kwargs = {}
for extent, out_size in tiles:
x, y, w, h = extent
# get data
tile = read_data_rasterio(src, src_rect=[x, y, w, h],
dst_width=out_size[0],
dst_height=out_size[1],
**_parse_options(options))
kwargs['crs'] = src.crs
if src.transform is not None:
kwargs['transform'] = src.window_transform(Window(x, y, w, h))
output_tile_filepath = output_tiles_dir / ("tile_%i_%i.%s" % (x, y, output_extension))
imwrite_rasterio(output_tile_filepath.as_posix(), tile, **kwargs)
src.close()
def _run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str):
if not quiet:
click.echo(conf_str)
if Path(input_dir_or_file).is_dir():
if extensions is not None:
extensions = extensions.split(",")
files = get_files_from_folder(input_dir_or_file, extensions)
assert len(files) > 0, "No files with extensions '{}' found at '{}'".format(extensions, input_dir_or_file)
else:
files = [input_dir_or_file]
if not Path(output_dir).exists():
if not quiet:
click.echo("Create output folder: %s" % output_dir)
Path(output_dir).mkdir(parents=True)
if not without_log_file:
cmd = sys.argv
now = datetime.now()
log_filepath = Path(output_dir) / ("%s.log" % now.strftime("%Y%m%d_%H%M%S"))
with log_filepath.open('w') as handler:
handler.write("Command:\n")
cmd_str = " ".join(cmd)
handler.write(cmd_str + "\n\n")
handler.write(conf_str + "\n")
func = partial(run_task,
output_dir=output_dir,
get_tiles_fn=get_tiles_fn,
output_extension=output_extension,
options=options)
progressbar = click.progressbar if not quiet else EmptyBar
chunk_size = 10
if n_workers > 1 and len(files) > chunk_size // 2:
with Pool(n_workers) as pool:
with progressbar(length=len(files)) as bar:
for i in range(0, len(files), chunk_size):
chunk_files = files[i: i + chunk_size]
pool.map(func, chunk_files)
bar.update(chunk_size)
else:
with progressbar(files, label='Run tile generator on files') as bar:
for f in bar:
func(f)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('stride', nargs=2, type=int)
@click.option('--origin', nargs=2, type=int, default=(0, 0),
help="Point in pixels in the original image from where to start the tiling. " +
"Values can be positive or negative")
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--without_nodata', type=bool, is_flag=True,
help="Do not include nodata. Default, nodata is included. If nodata is included then tile extents " +
"have all the same size, otherwise tiles at boundaries will be reduced.")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_stride_tiler(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
stride: {}
origin: {}
scale: {}
without_nodata: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstStrideTiles, tile_size=tile_size, stride=stride, scale=scale, origin=origin,
include_nodata=not without_nodata)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('min_overlapping', type=int)
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_size_tiler(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
min_overlapping: {}
scale: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstSizeTiles, tile_size=tile_size, min_overlapping=min_overlapping, scale=scale)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
class EmptyBar(object):
def __init__(self, iterable=None, **kwargs):
self.iterable = iterable
def __enter__(self):
return self.iterable
def __exit__(self, *args, **kwargs):
pass
def update(self, n_steps):
pass
cli.add_command(run_const_stride_tiler, name="const_stride") | if __name__ == "__main__":
cli() | cli.add_command(run_const_size_tiler, name="const_size")
| random_line_split |
generate_tiles.py | import sys
from pathlib import Path
from functools import partial
from multiprocessing import Pool
from datetime import datetime
import click
import rasterio as rio
from rasterio.windows import Window
from tiling.const_stride import ConstStrideTiles
from tiling.const_size import ConstSizeTiles
from dataflow.io_utils import imwrite_rasterio, read_data_rasterio
def get_files_from_folder(input_dir, extensions=None):
|
@click.group()
def cli():
pass
def _parse_options(options):
# this import is needed to correctly run `eval`
from rasterio.enums import Resampling
assert isinstance(options, str), "Options should be a string"
output = {}
if len(options) == 0:
return output
options = options.split(';')
for opt in options:
assert "=" in opt, "Option '{}' should contain '='".format(opt)
k, v = opt.split('=')
output[k] = eval(v)
return output
def run_task(filepath, output_dir, get_tiles_fn, output_extension, options):
try:
src = rio.open(filepath)
except rio.errors.RasterioIOError as e:
raise RuntimeError("Failed to open file: '%s'. Check if it exists or has supported format." % filepath +
"\nRasterio error message: {}".format(e))
output_tiles_dir = Path(output_dir) / "{}_tiles".format(Path(filepath).stem)
output_tiles_dir.mkdir(exist_ok=True)
tiles = get_tiles_fn((src.width, src.height))
output_extension = Path(filepath).suffix[1:] if output_extension is None else output_extension
kwargs = {}
for extent, out_size in tiles:
x, y, w, h = extent
# get data
tile = read_data_rasterio(src, src_rect=[x, y, w, h],
dst_width=out_size[0],
dst_height=out_size[1],
**_parse_options(options))
kwargs['crs'] = src.crs
if src.transform is not None:
kwargs['transform'] = src.window_transform(Window(x, y, w, h))
output_tile_filepath = output_tiles_dir / ("tile_%i_%i.%s" % (x, y, output_extension))
imwrite_rasterio(output_tile_filepath.as_posix(), tile, **kwargs)
src.close()
def _run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str):
if not quiet:
click.echo(conf_str)
if Path(input_dir_or_file).is_dir():
if extensions is not None:
extensions = extensions.split(",")
files = get_files_from_folder(input_dir_or_file, extensions)
assert len(files) > 0, "No files with extensions '{}' found at '{}'".format(extensions, input_dir_or_file)
else:
files = [input_dir_or_file]
if not Path(output_dir).exists():
if not quiet:
click.echo("Create output folder: %s" % output_dir)
Path(output_dir).mkdir(parents=True)
if not without_log_file:
cmd = sys.argv
now = datetime.now()
log_filepath = Path(output_dir) / ("%s.log" % now.strftime("%Y%m%d_%H%M%S"))
with log_filepath.open('w') as handler:
handler.write("Command:\n")
cmd_str = " ".join(cmd)
handler.write(cmd_str + "\n\n")
handler.write(conf_str + "\n")
func = partial(run_task,
output_dir=output_dir,
get_tiles_fn=get_tiles_fn,
output_extension=output_extension,
options=options)
progressbar = click.progressbar if not quiet else EmptyBar
chunk_size = 10
if n_workers > 1 and len(files) > chunk_size // 2:
with Pool(n_workers) as pool:
with progressbar(length=len(files)) as bar:
for i in range(0, len(files), chunk_size):
chunk_files = files[i: i + chunk_size]
pool.map(func, chunk_files)
bar.update(chunk_size)
else:
with progressbar(files, label='Run tile generator on files') as bar:
for f in bar:
func(f)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('stride', nargs=2, type=int)
@click.option('--origin', nargs=2, type=int, default=(0, 0),
help="Point in pixels in the original image from where to start the tiling. " +
"Values can be positive or negative")
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--without_nodata', type=bool, is_flag=True,
help="Do not include nodata. Default, nodata is included. If nodata is included then tile extents " +
"have all the same size, otherwise tiles at boundaries will be reduced.")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_stride_tiler(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
stride: {}
origin: {}
scale: {}
without_nodata: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstStrideTiles, tile_size=tile_size, stride=stride, scale=scale, origin=origin,
include_nodata=not without_nodata)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('min_overlapping', type=int)
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_size_tiler(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
min_overlapping: {}
scale: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstSizeTiles, tile_size=tile_size, min_overlapping=min_overlapping, scale=scale)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
class EmptyBar(object):
def __init__(self, iterable=None, **kwargs):
self.iterable = iterable
def __enter__(self):
return self.iterable
def __exit__(self, *args, **kwargs):
pass
def update(self, n_steps):
pass
cli.add_command(run_const_stride_tiler, name="const_stride")
cli.add_command(run_const_size_tiler, name="const_size")
if __name__ == "__main__":
cli()
| """Method to get files from a folder with optional filter on extensions
Args:
input_dir: input folder
extensions (list or tuple): List of extensions to filter files (Default value = None)
Returns:
List of filepaths
"""
output = []
if extensions is None:
extensions = [""]
elif not isinstance(extensions, (tuple, list)):
extensions = [extensions]
for ext in extensions:
files = Path(input_dir).rglob("*{}".format(ext))
output.extend([f.as_posix() for f in files if f.is_file()])
return output | identifier_body |
generate_tiles.py | import sys
from pathlib import Path
from functools import partial
from multiprocessing import Pool
from datetime import datetime
import click
import rasterio as rio
from rasterio.windows import Window
from tiling.const_stride import ConstStrideTiles
from tiling.const_size import ConstSizeTiles
from dataflow.io_utils import imwrite_rasterio, read_data_rasterio
def | (input_dir, extensions=None):
"""Method to get files from a folder with optional filter on extensions
Args:
input_dir: input folder
extensions (list or tuple): List of extensions to filter files (Default value = None)
Returns:
List of filepaths
"""
output = []
if extensions is None:
extensions = [""]
elif not isinstance(extensions, (tuple, list)):
extensions = [extensions]
for ext in extensions:
files = Path(input_dir).rglob("*{}".format(ext))
output.extend([f.as_posix() for f in files if f.is_file()])
return output
@click.group()
def cli():
pass
def _parse_options(options):
# this import is needed to correctly run `eval`
from rasterio.enums import Resampling
assert isinstance(options, str), "Options should be a string"
output = {}
if len(options) == 0:
return output
options = options.split(';')
for opt in options:
assert "=" in opt, "Option '{}' should contain '='".format(opt)
k, v = opt.split('=')
output[k] = eval(v)
return output
def run_task(filepath, output_dir, get_tiles_fn, output_extension, options):
try:
src = rio.open(filepath)
except rio.errors.RasterioIOError as e:
raise RuntimeError("Failed to open file: '%s'. Check if it exists or has supported format." % filepath +
"\nRasterio error message: {}".format(e))
output_tiles_dir = Path(output_dir) / "{}_tiles".format(Path(filepath).stem)
output_tiles_dir.mkdir(exist_ok=True)
tiles = get_tiles_fn((src.width, src.height))
output_extension = Path(filepath).suffix[1:] if output_extension is None else output_extension
kwargs = {}
for extent, out_size in tiles:
x, y, w, h = extent
# get data
tile = read_data_rasterio(src, src_rect=[x, y, w, h],
dst_width=out_size[0],
dst_height=out_size[1],
**_parse_options(options))
kwargs['crs'] = src.crs
if src.transform is not None:
kwargs['transform'] = src.window_transform(Window(x, y, w, h))
output_tile_filepath = output_tiles_dir / ("tile_%i_%i.%s" % (x, y, output_extension))
imwrite_rasterio(output_tile_filepath.as_posix(), tile, **kwargs)
src.close()
def _run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str):
if not quiet:
click.echo(conf_str)
if Path(input_dir_or_file).is_dir():
if extensions is not None:
extensions = extensions.split(",")
files = get_files_from_folder(input_dir_or_file, extensions)
assert len(files) > 0, "No files with extensions '{}' found at '{}'".format(extensions, input_dir_or_file)
else:
files = [input_dir_or_file]
if not Path(output_dir).exists():
if not quiet:
click.echo("Create output folder: %s" % output_dir)
Path(output_dir).mkdir(parents=True)
if not without_log_file:
cmd = sys.argv
now = datetime.now()
log_filepath = Path(output_dir) / ("%s.log" % now.strftime("%Y%m%d_%H%M%S"))
with log_filepath.open('w') as handler:
handler.write("Command:\n")
cmd_str = " ".join(cmd)
handler.write(cmd_str + "\n\n")
handler.write(conf_str + "\n")
func = partial(run_task,
output_dir=output_dir,
get_tiles_fn=get_tiles_fn,
output_extension=output_extension,
options=options)
progressbar = click.progressbar if not quiet else EmptyBar
chunk_size = 10
if n_workers > 1 and len(files) > chunk_size // 2:
with Pool(n_workers) as pool:
with progressbar(length=len(files)) as bar:
for i in range(0, len(files), chunk_size):
chunk_files = files[i: i + chunk_size]
pool.map(func, chunk_files)
bar.update(chunk_size)
else:
with progressbar(files, label='Run tile generator on files') as bar:
for f in bar:
func(f)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('stride', nargs=2, type=int)
@click.option('--origin', nargs=2, type=int, default=(0, 0),
help="Point in pixels in the original image from where to start the tiling. " +
"Values can be positive or negative")
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--without_nodata', type=bool, is_flag=True,
help="Do not include nodata. Default, nodata is included. If nodata is included then tile extents " +
"have all the same size, otherwise tiles at boundaries will be reduced.")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_stride_tiler(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
stride: {}
origin: {}
scale: {}
without_nodata: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, stride, origin, scale, without_nodata, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstStrideTiles, tile_size=tile_size, stride=stride, scale=scale, origin=origin,
include_nodata=not without_nodata)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
@click.command()
@click.argument('input_dir_or_file', type=click.Path(exists=True, file_okay=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.argument('tile_size', nargs=2, type=int)
@click.argument('min_overlapping', type=int)
@click.option('--scale', type=float, default=1.0,
help="Scaling applied to the input image parameters before extracting tile's extent." +
"For example, scale of 0.75 corresponds to a zoom out")
@click.option('--extensions', type=str, default=None,
help="String of file extensions to select (if input is a directory), e.g. 'jpg,png,tif'")
@click.option('--output_extension', type=str, default=None, help="Output tile file extension. " +
"By default, input file extension is taken")
@click.option('--n_workers', default=4, type=int, help="Number of workers in the processing pool [default=4]")
@click.option('--options', type=str, default="", help="Options to pass when read data with rasterio. " +
"Example --options='resampling=Resampling.nearest;" +
"dtype=np.float32;nodata_value=-1'")
@click.option('-q', '--quiet', is_flag=True, help='Disable verbose mode')
@click.option('--without_log_file', type=bool, is_flag=True,
help="Do not write a log file in the output folder")
def run_const_size_tiler(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, quiet, without_log_file):
conf_str = """
input: {}
output: {}
tile size: {}
min_overlapping: {}
scale: {}
extensions: {}
output_ext: {}
n_workers: {}
options: {}
without_log_file: {}
""".format(input_dir_or_file, output_dir, tile_size, min_overlapping, scale, extensions,
output_extension, n_workers, options, without_log_file)
get_tiles_fn = partial(ConstSizeTiles, tile_size=tile_size, min_overlapping=min_overlapping, scale=scale)
_run_xyz_tiler(get_tiles_fn, input_dir_or_file, output_dir, extensions, output_extension,
n_workers, options, without_log_file, quiet, conf_str)
class EmptyBar(object):
def __init__(self, iterable=None, **kwargs):
self.iterable = iterable
def __enter__(self):
return self.iterable
def __exit__(self, *args, **kwargs):
pass
def update(self, n_steps):
pass
cli.add_command(run_const_stride_tiler, name="const_stride")
cli.add_command(run_const_size_tiler, name="const_size")
if __name__ == "__main__":
cli()
| get_files_from_folder | identifier_name |
gridsummary.js | Ext.ns('Ext.ux.grid');
Ext.ux.grid.GridSummary = function(config) {
Ext.apply(this, config);
};
Ext.extend(Ext.ux.grid.GridSummary, Ext.util.Observable, {
// configurable scrollbar width (used only in the event the Ext.getScrollBarWidth() method is not available)
scrollBarWidth : 17,
// private
init : function(grid) {
var v = grid.getView();
Ext.apply(this, {
grid : grid,
view : v
});
// override GridView's onLayout() method
v.onLayout = this.onLayout;
// IE6/7 disappearing vertical scrollbar workaround
if (Ext.isIE6 || Ext.isIE7) {
if (!grid.events['viewready']) {
// check for "viewready" event on GridPanel -- this event is only available in Ext 3.x,
// so the plugin hotwires it in if it doesn't exist
v.afterMethod('afterRender', function() {
this.grid.fireEvent('viewready', this.grid);
}, this);
}
// a small (hacky) delay of ~10ms is required to prevent
// the vertical scrollbar from disappearing in IE6/7
grid.on('viewready', function() { |
v.afterMethod('render', this.refreshSummary, this);
v.afterMethod('refresh', this.refreshSummary, this);
v.afterMethod('onColumnWidthUpdated', this.doWidth, this);
v.afterMethod('onAllColumnWidthsUpdated', this.doAllWidths, this);
v.afterMethod('onColumnHiddenUpdated', this.doHidden, this);
grid.on('columnresize', this.refreshSummary, this);
grid.on('columnmove', this.refreshSummary, this);
grid.getColumnModel().on('hiddenchange', this.refreshSummary, this);
grid.on('resize', this.refreshSummary, this);
if (Ext.isGecko || Ext.isOpera) {
// restore gridview's horizontal scroll position when store data is changed
//
// TODO -- when sorting a column in Opera, the summary row's horizontal scroll position is
// synced with the gridview, but is displaced 1 vertical scrollbar width to the right
v.afterMethod('onDataChange', this.restoreGridHScroll, this);
}
grid.on({
bodyscroll : this.syncSummaryScroll,
beforedestroy : this.beforeDestroy,
scope : this
});
// update summary row on store's add/remove/clear/update events
grid.store.on({
add : this.refreshSummary,
remove : this.refreshSummary,
clear : this.refreshSummary,
update : this.refreshSummary,
scope : this
});
if (!this.rowTpl) {
this.rowTpl = new Ext.Template(
'<div class="x-grid3-summary-row x-grid3-gridsummary-row-offset">',
'<table class="x-grid3-summary-table" border="0" cellspacing="0" cellpadding="0" style="{tstyle}">',
'<tbody><tr>{cells}</tr></tbody>',
'</table>',
'</div>'
);
this.rowTpl.disableFormats = true;
}
this.rowTpl.compile();
if (!this.cellTpl) {
this.cellTpl = new Ext.Template(
'<td class="x-grid3-col x-grid3-cell x-grid3-td-{id} {css}" style="{style}">',
'<div class="x-grid3-cell-inner x-grid3-col-{id}" unselectable="on" {attr}>{value}</div>',
"</td>"
);
this.cellTpl.disableFormats = true;
}
this.cellTpl.compile();
},
// private
calculate : function(rs, cm) {
var data = {},
cfg = cm.config,
i, len, cf, cname, j, jlen, r;
for (i = 0, len = cfg.length; i < len; i++) { // loop through all columns in ColumnModel
cf = cfg[i]; // get column's configuration
cname = cf.dataIndex; // get column dataIndex
// initialise grid summary row data for
// the current column being worked on
data[cname] = 0;
if (cf.summaryType) {
for (j = 0, jlen = rs.length; j < jlen; j++) {
r = rs[j]; // get a single Record
data[cname] = Ext.ux.grid.GridSummary.Calculations[cf.summaryType](r.get(cname), r, cname, data, j);
}
}
}
return data;
},
// private
onLayout : function(vw, vh) { // note: this method is scoped to the GridView
if (typeof(vh) != 'number') { // handles grid's height:'auto' config
return;
}
if (!this.grid.getGridEl().hasClass('x-grid3-hide-gridsummary')) {
// readjust gridview's height only if grid summary row is visible
this.scroller.setHeight(vh - this.summaryWrap.getHeight());
}
},
// private
syncScroll : function(refEl, scrollEl, currX, currY) {
currX = currX || refEl.scrollLeft;
currY = currY || refEl.scrollTop;
if (this.oldX != currX) { // only adjust horizontal scroll when horizontal scroll is detected
scrollEl.scrollLeft = currX;
scrollEl.scrollLeft = currX; // second time for IE (1/2 the time first call fails. other browsers simply ignore repeated calls)
}
// remember current scroll position
this.oldX = currX;
this.oldY = currY;
},
// private
syncSummaryScroll : function(currX, currY) {
var v = this.view,
y = this.oldY;
if (
// workaround for Gecko's horizontal-scroll reset bug
// (see unresolved mozilla bug: https://bugzilla.mozilla.org/show_bug.cgi?id=386444
// "using vertical scrollbar changes horizontal scroll position with overflow-x:hidden and overflow-y:scroll")
Ext.isGecko && // 1) <div>s with overflow-x:hidden have their DOM.scrollLeft property set to 0 when scrolling vertically
currX === 0 && // 2) current x-ordinate is now zero
this.oldX > 0 && // 3) gridview is not at x=0 ordinate
(y !== currY || y === 0) // 4) vertical scroll detected / vertical scrollbar is moved rapidly all the way to the top
) {
this.restoreGridHScroll();
} else {
this.syncScroll(v.scroller.dom, v.summaryWrap.dom, currX, currY);
}
},
// private
restoreGridHScroll : function() {
// restore gridview's original x-ordinate
// (note: this causes an unvoidable flicker in the gridview)
this.view.scroller.dom.scrollLeft = this.oldX || 0;
},
// private
syncGridHScroll : function() {
var v = this.view;
this.syncScroll(v.summaryWrap.dom, v.scroller.dom);
},
// private
doWidth : function(col, w, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild;
fc.style.width = tw;
fc.rows[0].childNodes[col].style.width = w;
this.updateSummaryWidth();
},
// private
doAllWidths : function(ws, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild,
cells = fc.rows[0].childNodes,
wlen = ws.length,
j;
fc.style.width = tw;
for (j = 0; j < wlen; j++) {
cells[j].style.width = ws[j];
}
this.updateSummaryWidth();
},
// private
doHidden : function(col, hidden, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild,
display = hidden ? 'none' : '';
fc.style.width = tw;
fc.rows[0].childNodes[col].style.display = display;
this.updateSummaryWidth();
},
// private
getGridHeader : function() {
if (!this.gridHeader) {
this.gridHeader = this.view.mainHd.child('.x-grid3-header-offset');
}
return this.gridHeader;
},
// private
updateSummaryWidth : function() {
// all browsers add a 1 pixel space between the edges of the vert. and hori. scrollbars,
// so subtract one from the grid header width before setting the summary row's width
//kirov this.getSummaryNode().setWidth(this.getGridHeader().getWidth() - 1);
if (this.getSummaryNode()) {
this.getSummaryNode().setWidth(this.view.getTotalWidth()); //kirov
}
// kirov
if (Ext.isIE) {
var elWidth = this.grid.getGridEl().getSize().width;
if (this.grid.getColumnModel().getTotalWidth()+this.view.getScrollOffset() > elWidth){
//console.log('scroll');
//debugger;
this.view.summaryWrap.dom.style['overflow-y'] = 'hidden';
this.view.summaryWrap.setHeight(((Ext.getScrollBarWidth ? Ext.getScrollBarWidth() : this.scrollBarWidth) + 18 /* 18 = row-expander height */));
} else {
this.view.summaryWrap.dom.style['overflow-y'] = 'visible';
this.view.summaryWrap.setHeight((Ext.getScrollBarWidth ? Ext.getScrollBarWidth() : this.scrollBarWidth));
}
}
},
// private
renderSummary : function(o, cs, cm) {
cs = cs || this.view.getColumnData();
var cfg = cm.config,
buf = [],
last = cs.length - 1,
i, len, c, cf, p;
for (i = 0, len = cs.length; i < len; i++) {
c = cs[i];
cf = cfg[i];
p = {};
p.id = c.id;
p.style = c.style;
p.css = i === 0 ? 'x-grid3-cell-first ' : (i == last ? 'x-grid3-cell-last ' : '');
if (cf.summaryType || cf.summaryRenderer) {
p.value = (cf.summaryRenderer || c.renderer)(o.data[c.name], p, o);
} else {
p.value = '';
}
if (p.value === undefined || p.value === "") {
p.value = " ";
}
buf[buf.length] = this.cellTpl.apply(p);
}
return this.rowTpl.apply({
tstyle: 'width:' + this.view.getTotalWidth() + ';',
cells: buf.join('')
});
},
// private
refreshSummary : function() {
var g = this.grid,
ds = g.store,
cs = this.view.getColumnData(),
cm = g.getColumnModel(),
rs = ds.getRange(),
data = this.calculate(rs, cm),
buf = this.renderSummary({data: data}, cs, cm);
if (!this.view.summaryWrap) {
this.view.summaryWrap = Ext.DomHelper.insertAfter(this.view.scroller, {
// IE6/7/8 style hacks:
// - width:100% required for horizontal scroll to appear (all the time for IE6/7, only in GroupingView for IE8)
// - explicit height required for summary row to appear (only for IE6/7, no effect in IE8)
// - overflow-y:hidden required to hide vertical scrollbar in summary row (only for IE6/7, no effect in IE8)
style : 'overflow:auto;' + (Ext.isIE ? 'width:100%;overflow-y:hidden;height:' + ((Ext.getScrollBarWidth ? Ext.getScrollBarWidth() : this.scrollBarWidth) + 18 /* 18 = row-expander height */) + 'px;' : ''),
tag : 'div',
cls : 'x-grid3-gridsummary-row-inner'
}, true);
// synchronise GridView's and GridSummary's horizontal scroll
this.view.summaryWrap.on('scroll', this.syncGridHScroll, this);
}
// update summary row data
this.setSummaryNode(this.view.summaryWrap.update(buf).first());
this.updateSummaryWidth();
},
// private
toggleGridHScroll : function(allowHScroll) {
// toggle GridView's horizontal scrollbar
//kirov
if (allowHScroll){
this.view.scroller.dom.style.overflow = 'auto';
} else {
//скрываем только горизонтальный скролл, нависающий над итоговой строкой.
//вертикальный надо оставить для прокрутки в гриде по записям
this.view.scroller.dom.style.overflowX = "hidden";
}
this.view.scroller[allowHScroll === undefined ? 'toggleClass' : allowHScroll ? 'removeClass' : 'addClass']('x-grid3-gridsummary-hide-hscroll');
},
// show/hide summary row
toggleSummary : function(visible) { // true to display summary row
var el = this.grid.getGridEl(),
v = this.view;
if (el) {
el[visible === undefined ? 'toggleClass' : visible ? 'removeClass' : 'addClass']('x-grid3-hide-gridsummary');
// toggle gridview's horizontal scrollbar
this.toggleGridHScroll();
// readjust gridview height
v.layout();
// sync summary row scroll position
v.summaryWrap.dom.scrollLeft = v.scroller.dom.scrollLeft;
}
},
// get summary row Element
getSummaryNode : function() {
return this.view.summary;
},
// private
setSummaryNode : function(sn) {
this.view.summary = sn;
},
// private
beforeDestroy : function() {
Ext.destroy(
this.view.summary,
this.view.summaryWrap
);
delete this.grid;
delete this.view;
delete this.gridHeader;
delete this.oldX;
delete this.oldY;
}
});
Ext.reg('gridsummary', Ext.ux.grid.GridSummary);
/*
* all Calculation methods are called on each Record in the Store
* with the following 5 parameters:
*
* v - cell value
* record - reference to the current Record
* colName - column name (i.e. the ColumnModel's dataIndex)
* data - the cumulative data for the current column + summaryType up to the current Record
* rowIdx - current row index
*/
Ext.ux.grid.GridSummary.Calculations = {
sum : function(v, record, colName, data, rowIdx) {
return data[colName] + Ext.num(v, 0);
},
count : function(v, record, colName, data, rowIdx) {
return rowIdx + 1;
},
max : function(v, record, colName, data, rowIdx) {
return Math.max(Ext.num(v, 0), data[colName]);
},
min : function(v, record, colName, data, rowIdx) {
return Math.min(Ext.num(v, 0), data[colName]);
},
average : function(v, record, colName, data, rowIdx) {
var t = data[colName] + Ext.num(v, 0),
count = record.store.getCount();
return rowIdx == count - 1 ? (t / count) : t;
}
}; | this.toggleGridHScroll(false);
}, this, { delay: 10 });
} else {
v.afterMethod('render', this.toggleGridHScroll, this);
} | random_line_split |
gridsummary.js | Ext.ns('Ext.ux.grid');
Ext.ux.grid.GridSummary = function(config) {
Ext.apply(this, config);
};
Ext.extend(Ext.ux.grid.GridSummary, Ext.util.Observable, {
// configurable scrollbar width (used only in the event the Ext.getScrollBarWidth() method is not available)
scrollBarWidth : 17,
// private
init : function(grid) {
var v = grid.getView();
Ext.apply(this, {
grid : grid,
view : v
});
// override GridView's onLayout() method
v.onLayout = this.onLayout;
// IE6/7 disappearing vertical scrollbar workaround
if (Ext.isIE6 || Ext.isIE7) {
if (!grid.events['viewready']) {
// check for "viewready" event on GridPanel -- this event is only available in Ext 3.x,
// so the plugin hotwires it in if it doesn't exist
v.afterMethod('afterRender', function() {
this.grid.fireEvent('viewready', this.grid);
}, this);
}
// a small (hacky) delay of ~10ms is required to prevent
// the vertical scrollbar from disappearing in IE6/7
grid.on('viewready', function() {
this.toggleGridHScroll(false);
}, this, { delay: 10 });
} else {
v.afterMethod('render', this.toggleGridHScroll, this);
}
v.afterMethod('render', this.refreshSummary, this);
v.afterMethod('refresh', this.refreshSummary, this);
v.afterMethod('onColumnWidthUpdated', this.doWidth, this);
v.afterMethod('onAllColumnWidthsUpdated', this.doAllWidths, this);
v.afterMethod('onColumnHiddenUpdated', this.doHidden, this);
grid.on('columnresize', this.refreshSummary, this);
grid.on('columnmove', this.refreshSummary, this);
grid.getColumnModel().on('hiddenchange', this.refreshSummary, this);
grid.on('resize', this.refreshSummary, this);
if (Ext.isGecko || Ext.isOpera) {
// restore gridview's horizontal scroll position when store data is changed
//
// TODO -- when sorting a column in Opera, the summary row's horizontal scroll position is
// synced with the gridview, but is displaced 1 vertical scrollbar width to the right
v.afterMethod('onDataChange', this.restoreGridHScroll, this);
}
grid.on({
bodyscroll : this.syncSummaryScroll,
beforedestroy : this.beforeDestroy,
scope : this
});
// update summary row on store's add/remove/clear/update events
grid.store.on({
add : this.refreshSummary,
remove : this.refreshSummary,
clear : this.refreshSummary,
update : this.refreshSummary,
scope : this
});
if (!this.rowTpl) {
this.rowTpl = new Ext.Template(
'<div class="x-grid3-summary-row x-grid3-gridsummary-row-offset">',
'<table class="x-grid3-summary-table" border="0" cellspacing="0" cellpadding="0" style="{tstyle}">',
'<tbody><tr>{cells}</tr></tbody>',
'</table>',
'</div>'
);
this.rowTpl.disableFormats = true;
}
this.rowTpl.compile();
if (!this.cellTpl) {
this.cellTpl = new Ext.Template(
'<td class="x-grid3-col x-grid3-cell x-grid3-td-{id} {css}" style="{style}">',
'<div class="x-grid3-cell-inner x-grid3-col-{id}" unselectable="on" {attr}>{value}</div>',
"</td>"
);
this.cellTpl.disableFormats = true;
}
this.cellTpl.compile();
},
// private
calculate : function(rs, cm) {
var data = {},
cfg = cm.config,
i, len, cf, cname, j, jlen, r;
for (i = 0, len = cfg.length; i < len; i++) { // loop through all columns in ColumnModel
cf = cfg[i]; // get column's configuration
cname = cf.dataIndex; // get column dataIndex
// initialise grid summary row data for
// the current column being worked on
data[cname] = 0;
if (cf.summaryType) {
for (j = 0, jlen = rs.length; j < jlen; j++) {
r = rs[j]; // get a single Record
data[cname] = Ext.ux.grid.GridSummary.Calculations[cf.summaryType](r.get(cname), r, cname, data, j);
}
}
}
return data;
},
// private
onLayout : function(vw, vh) { // note: this method is scoped to the GridView
if (typeof(vh) != 'number') |
if (!this.grid.getGridEl().hasClass('x-grid3-hide-gridsummary')) {
// readjust gridview's height only if grid summary row is visible
this.scroller.setHeight(vh - this.summaryWrap.getHeight());
}
},
// private
syncScroll : function(refEl, scrollEl, currX, currY) {
currX = currX || refEl.scrollLeft;
currY = currY || refEl.scrollTop;
if (this.oldX != currX) { // only adjust horizontal scroll when horizontal scroll is detected
scrollEl.scrollLeft = currX;
scrollEl.scrollLeft = currX; // second time for IE (1/2 the time first call fails. other browsers simply ignore repeated calls)
}
// remember current scroll position
this.oldX = currX;
this.oldY = currY;
},
// private
syncSummaryScroll : function(currX, currY) {
var v = this.view,
y = this.oldY;
if (
// workaround for Gecko's horizontal-scroll reset bug
// (see unresolved mozilla bug: https://bugzilla.mozilla.org/show_bug.cgi?id=386444
// "using vertical scrollbar changes horizontal scroll position with overflow-x:hidden and overflow-y:scroll")
Ext.isGecko && // 1) <div>s with overflow-x:hidden have their DOM.scrollLeft property set to 0 when scrolling vertically
currX === 0 && // 2) current x-ordinate is now zero
this.oldX > 0 && // 3) gridview is not at x=0 ordinate
(y !== currY || y === 0) // 4) vertical scroll detected / vertical scrollbar is moved rapidly all the way to the top
) {
this.restoreGridHScroll();
} else {
this.syncScroll(v.scroller.dom, v.summaryWrap.dom, currX, currY);
}
},
// private
restoreGridHScroll : function() {
// restore gridview's original x-ordinate
// (note: this causes an unvoidable flicker in the gridview)
this.view.scroller.dom.scrollLeft = this.oldX || 0;
},
// private
syncGridHScroll : function() {
var v = this.view;
this.syncScroll(v.summaryWrap.dom, v.scroller.dom);
},
// private
doWidth : function(col, w, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild;
fc.style.width = tw;
fc.rows[0].childNodes[col].style.width = w;
this.updateSummaryWidth();
},
// private
doAllWidths : function(ws, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild,
cells = fc.rows[0].childNodes,
wlen = ws.length,
j;
fc.style.width = tw;
for (j = 0; j < wlen; j++) {
cells[j].style.width = ws[j];
}
this.updateSummaryWidth();
},
// private
doHidden : function(col, hidden, tw) {
var s = this.getSummaryNode(),
fc = s.dom.firstChild,
display = hidden ? 'none' : '';
fc.style.width = tw;
fc.rows[0].childNodes[col].style.display = display;
this.updateSummaryWidth();
},
// private
getGridHeader : function() {
if (!this.gridHeader) {
this.gridHeader = this.view.mainHd.child('.x-grid3-header-offset');
}
return this.gridHeader;
},
// private
updateSummaryWidth : function() {
// all browsers add a 1 pixel space between the edges of the vert. and hori. scrollbars,
// so subtract one from the grid header width before setting the summary row's width
//kirov this.getSummaryNode().setWidth(this.getGridHeader().getWidth() - 1);
if (this.getSummaryNode()) {
this.getSummaryNode().setWidth(this.view.getTotalWidth()); //kirov
}
// kirov
if (Ext.isIE) {
var elWidth = this.grid.getGridEl().getSize().width;
if (this.grid.getColumnModel().getTotalWidth()+this.view.getScrollOffset() > elWidth){
//console.log('scroll');
//debugger;
this.view.summaryWrap.dom.style['overflow-y'] = 'hidden';
this.view.summaryWrap.setHeight(((Ext.getScrollBarWidth ? Ext.getScrollBarWidth() : this.scrollBarWidth) + 18 /* 18 = row-expander height */));
} else {
this.view.summaryWrap.dom.style['overflow-y'] = 'visible';
this.view.summaryWrap.setHeight((Ext.getScrollBarWidth ? Ext.getScrollBarWidth() : this.scrollBarWidth));
}
}
},
// private
renderSummary : function(o, cs, cm) {
cs = cs || this.view.getColumnData();
var cfg = cm.config,
buf = [],
last = cs.length - 1,
i, len, c, cf, p;
for (i = 0, len = cs.length; i < len; i++) {
c = cs[i];
cf = cfg[i];
p = {};
p.id = c.id;
p.style = c.style;
p.css = i === 0 ? 'x-grid3-cell-first ' : (i == last ? 'x-grid3-cell-last ' : '');
if (cf.summaryType || cf.summaryRenderer) {
p.value = (cf.summaryRenderer || c.renderer)(o.data[c.name], p, o);
} else {
p.value = '';
}
if (p.value === undefined || p.value === "") {
p.value = " ";
}
buf[buf.length] = this.cellTpl.apply(p);
}
return this.rowTpl.apply({
tstyle: 'width:' + this.view.getTotalWidth() + ';',
cells: buf.join('')
});
},
// private
refreshSummary : function() {
var g = this.grid,
ds = g.store,
cs = this.view.getColumnData(),
cm = g.getColumnModel(),
rs = ds.getRange(),
data = this.calculate(rs, cm),
buf = this.renderSummary({data: data}, cs, cm);
if (!this.view.summaryWrap) {
this.view.summaryWrap = Ext.DomHelper.insertAfter(this.view.scroller, {
// IE6/7/8 style hacks:
// - width:100% required for horizontal scroll to appear (all the time for IE6/7, only in GroupingView for IE8)
// - explicit height required for summary row to appear (only for IE6/7, no effect in IE8)
// - overflow-y:hidden required to hide vertical scrollbar in summary row (only for IE6/7, no effect in IE8)
style : 'overflow:auto;' + (Ext.isIE ? 'width:100%;overflow-y:hidden;height:' + ((Ext.getScrollBarWidth ? Ext.getScrollBarWidth() : this.scrollBarWidth) + 18 /* 18 = row-expander height */) + 'px;' : ''),
tag : 'div',
cls : 'x-grid3-gridsummary-row-inner'
}, true);
// synchronise GridView's and GridSummary's horizontal scroll
this.view.summaryWrap.on('scroll', this.syncGridHScroll, this);
}
// update summary row data
this.setSummaryNode(this.view.summaryWrap.update(buf).first());
this.updateSummaryWidth();
},
// private
toggleGridHScroll : function(allowHScroll) {
// toggle GridView's horizontal scrollbar
//kirov
if (allowHScroll){
this.view.scroller.dom.style.overflow = 'auto';
} else {
//скрываем только горизонтальный скролл, нависающий над итоговой строкой.
//вертикальный надо оставить для прокрутки в гриде по записям
this.view.scroller.dom.style.overflowX = "hidden";
}
this.view.scroller[allowHScroll === undefined ? 'toggleClass' : allowHScroll ? 'removeClass' : 'addClass']('x-grid3-gridsummary-hide-hscroll');
},
// show/hide summary row
toggleSummary : function(visible) { // true to display summary row
var el = this.grid.getGridEl(),
v = this.view;
if (el) {
el[visible === undefined ? 'toggleClass' : visible ? 'removeClass' : 'addClass']('x-grid3-hide-gridsummary');
// toggle gridview's horizontal scrollbar
this.toggleGridHScroll();
// readjust gridview height
v.layout();
// sync summary row scroll position
v.summaryWrap.dom.scrollLeft = v.scroller.dom.scrollLeft;
}
},
// get summary row Element
getSummaryNode : function() {
return this.view.summary;
},
// private
setSummaryNode : function(sn) {
this.view.summary = sn;
},
// private
beforeDestroy : function() {
Ext.destroy(
this.view.summary,
this.view.summaryWrap
);
delete this.grid;
delete this.view;
delete this.gridHeader;
delete this.oldX;
delete this.oldY;
}
});
Ext.reg('gridsummary', Ext.ux.grid.GridSummary);
/*
* all Calculation methods are called on each Record in the Store
* with the following 5 parameters:
*
* v - cell value
* record - reference to the current Record
* colName - column name (i.e. the ColumnModel's dataIndex)
* data - the cumulative data for the current column + summaryType up to the current Record
* rowIdx - current row index
*/
Ext.ux.grid.GridSummary.Calculations = {
sum : function(v, record, colName, data, rowIdx) {
return data[colName] + Ext.num(v, 0);
},
count : function(v, record, colName, data, rowIdx) {
return rowIdx + 1;
},
max : function(v, record, colName, data, rowIdx) {
return Math.max(Ext.num(v, 0), data[colName]);
},
min : function(v, record, colName, data, rowIdx) {
return Math.min(Ext.num(v, 0), data[colName]);
},
average : function(v, record, colName, data, rowIdx) {
var t = data[colName] + Ext.num(v, 0),
count = record.store.getCount();
return rowIdx == count - 1 ? (t / count) : t;
}
};
| { // handles grid's height:'auto' config
return;
} | conditional_block |
output.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use crate::channel::eventio::EventsBuffer;
use crate::channel::tee::{WrappedTee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> + 'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> {
if !self.poisoned {
trace!("Worker[{}], output[{:?}] is closing ...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
}
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else |
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if !self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
}
Ok(())
}
#[inline]
fn push(&mut self, msg: D) -> IOResult<()> {
self.buffer.push(msg);
if self.buffer.len() == self.batch_size {
self.flush()?;
}
Ok(())
}
/// update the capacity of output channel's left space;
#[inline]
fn update_capacity(&mut self, decr: usize) -> bool {
if let Some(ref mut ca) = self.capacity {
if **ca <= decr {
**ca = 0;
false
} else {
**ca -= decr;
true
}
} else {
true
}
}
/// Return `true` if there is capacity left;
#[inline]
fn check_capacity(&self) -> bool {
self.capacity.as_ref().map(|ca| **ca > 0).unwrap_or(true)
}
}
impl<'a, D: Data> Drop for Session<'a, D> {
fn drop(&mut self) {
match self.flush() {
Ok(_) => (),
Err(e) => {
error!("Session flush failed, caused by {:?}", e);
}
}
}
}
impl<D: Data> OutputHandle<D> {
#[inline]
pub fn session(&mut self, tag: &Tag) -> Session<D> {
let matched = self.match_output(tag).unwrap_or(tag.clone());
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
#[inline]
pub fn session_of<T: Into<Tag>>(&mut self, tag: T) -> Session<D> {
let t = tag.into();
let matched = self.match_output(&t).unwrap_or(t);
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
}
| {
for item in iter {
self.push(item)?;
}
Ok(true)
} | conditional_block |
output.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use crate::channel::eventio::EventsBuffer;
use crate::channel::tee::{WrappedTee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> + 'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> {
if !self.poisoned {
trace!("Worker[{}], output[{:?}] is closing ...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
}
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else {
for item in iter {
self.push(item)?;
}
Ok(true)
}
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if !self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
}
Ok(())
}
#[inline]
fn push(&mut self, msg: D) -> IOResult<()> {
self.buffer.push(msg);
if self.buffer.len() == self.batch_size {
self.flush()?;
}
Ok(())
}
/// update the capacity of output channel's left space;
#[inline]
fn update_capacity(&mut self, decr: usize) -> bool {
if let Some(ref mut ca) = self.capacity {
if **ca <= decr {
**ca = 0;
false
} else {
**ca -= decr;
true
}
} else {
true
}
}
/// Return `true` if there is capacity left;
#[inline]
fn check_capacity(&self) -> bool {
self.capacity.as_ref().map(|ca| **ca > 0).unwrap_or(true)
}
}
impl<'a, D: Data> Drop for Session<'a, D> {
fn drop(&mut self) {
match self.flush() {
Ok(_) => (),
Err(e) => {
error!("Session flush failed, caused by {:?}", e);
}
}
}
}
impl<D: Data> OutputHandle<D> {
#[inline]
pub fn session(&mut self, tag: &Tag) -> Session<D> {
let matched = self.match_output(tag).unwrap_or(tag.clone());
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
#[inline]
pub fn session_of<T: Into<Tag>>(&mut self, tag: T) -> Session<D> {
let t = tag.into();
let matched = self.match_output(&t).unwrap_or(t);
let ca = self.capacity.as_mut(); | } | Session::new(&mut self.inner, matched, self.batch_size, ca)
} | random_line_split |
output.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use crate::channel::eventio::EventsBuffer;
use crate::channel::tee::{WrappedTee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> + 'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> |
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else {
for item in iter {
self.push(item)?;
}
Ok(true)
}
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if !self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
}
Ok(())
}
#[inline]
fn push(&mut self, msg: D) -> IOResult<()> {
self.buffer.push(msg);
if self.buffer.len() == self.batch_size {
self.flush()?;
}
Ok(())
}
/// update the capacity of output channel's left space;
#[inline]
fn update_capacity(&mut self, decr: usize) -> bool {
if let Some(ref mut ca) = self.capacity {
if **ca <= decr {
**ca = 0;
false
} else {
**ca -= decr;
true
}
} else {
true
}
}
/// Return `true` if there is capacity left;
#[inline]
fn check_capacity(&self) -> bool {
self.capacity.as_ref().map(|ca| **ca > 0).unwrap_or(true)
}
}
impl<'a, D: Data> Drop for Session<'a, D> {
fn drop(&mut self) {
match self.flush() {
Ok(_) => (),
Err(e) => {
error!("Session flush failed, caused by {:?}", e);
}
}
}
}
impl<D: Data> OutputHandle<D> {
#[inline]
pub fn session(&mut self, tag: &Tag) -> Session<D> {
let matched = self.match_output(tag).unwrap_or(tag.clone());
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
#[inline]
pub fn session_of<T: Into<Tag>>(&mut self, tag: T) -> Session<D> {
let t = tag.into();
let matched = self.match_output(&t).unwrap_or(t);
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
}
| {
if !self.poisoned {
trace!("Worker[{}], output[{:?}] is closing ...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
} | identifier_body |
output.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use crate::channel::eventio::EventsBuffer;
use crate::channel::tee::{WrappedTee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> + 'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn | (&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> {
if !self.poisoned {
trace!("Worker[{}], output[{:?}] is closing ...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
}
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else {
for item in iter {
self.push(item)?;
}
Ok(true)
}
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if !self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
}
Ok(())
}
#[inline]
fn push(&mut self, msg: D) -> IOResult<()> {
self.buffer.push(msg);
if self.buffer.len() == self.batch_size {
self.flush()?;
}
Ok(())
}
/// update the capacity of output channel's left space;
#[inline]
fn update_capacity(&mut self, decr: usize) -> bool {
if let Some(ref mut ca) = self.capacity {
if **ca <= decr {
**ca = 0;
false
} else {
**ca -= decr;
true
}
} else {
true
}
}
/// Return `true` if there is capacity left;
#[inline]
fn check_capacity(&self) -> bool {
self.capacity.as_ref().map(|ca| **ca > 0).unwrap_or(true)
}
}
impl<'a, D: Data> Drop for Session<'a, D> {
fn drop(&mut self) {
match self.flush() {
Ok(_) => (),
Err(e) => {
error!("Session flush failed, caused by {:?}", e);
}
}
}
}
impl<D: Data> OutputHandle<D> {
#[inline]
pub fn session(&mut self, tag: &Tag) -> Session<D> {
let matched = self.match_output(tag).unwrap_or(tag.clone());
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
#[inline]
pub fn session_of<T: Into<Tag>>(&mut self, tag: T) -> Session<D> {
let t = tag.into();
let matched = self.match_output(&t).unwrap_or(t);
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
}
| as_any_mut | identifier_name |
main.rs | use clap::*;
use gre::*;
use isosurface::{marching_cubes::MarchingCubes, source::Source};
use kiss3d::nalgebra::{Perspective3, Point3, Rotation3, Vector3};
use rand::prelude::*;
use std::f32::consts::PI;
use std::ops::{Add, Mul, Sub};
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "210.0")]
pub width: f64,
#[clap(short, long, default_value = "297.0")]
pub height: f64,
#[clap(short, long, default_value = "20.0")]
pub pad: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
// We use sign distance function paradigm here:
fn sd_capsule(
p: Vector3<f32>,
a: Vector3<f32>,
b: Vector3<f32>,
r: f32,
) -> f32 {
let pa = p - a;
let ba = b - a;
let h = (pa.dot(&ba) / ba.dot(&ba)).max(0.0).min(1.0);
(pa - ba * h).norm() - r
}
trait BinaryOps<T> {
fn intersect(&self, other: T) -> T;
fn difference(&self, other: T) -> T;
fn union(&self, other: T) -> T;
fn smooth_intersect(&self, k: T, other: T) -> T;
fn smooth_difference(&self, k: T, other: T) -> T;
fn smooth_union(&self, k: T, other: T) -> T;
}
impl BinaryOps<f32> for f32 {
fn intersect(&self, other: f32) -> f32 {
self.max(other)
}
fn difference(&self, other: f32) -> f32 {
self.max(-other)
}
fn union(&self, other: f32) -> f32 {
self.min(other)
}
fn smooth_intersect(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) + k * h * (1.0 - h)
}
fn smooth_difference(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (other + self) / k).max(0.0).min(1.0);
mix(*self, -other, h) + k * h * (1.0 - h)
}
fn smooth_union(&self, k: f32, other: f32) -> f32 {
let h = (0.5 + 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) - k * h * (1.0 - h)
}
}
fn mix(a: f32, b: f32, x: f32) -> f32 {
(1. - x) * a + x * b
}
struct Shape {
seed: f64,
}
impl Source for Shape {
fn sample(&self, x: f32, y: f32, z: f32) -> f32 {
let p = Vector3::new(x, y, z);
let mut s = 999.;
let mut rng = rng_from_seed(self.seed);
let count = rng.gen_range(5, 80);
let max_size = 0.2;
let v = rng.gen_range(0.1, 0.9);
for _i in 0..count {
let a = Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
);
let b = if rng.gen_bool(v) {
a
} else {
Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
)
};
s = s.smooth_union(
rng.gen_range(0.0, 0.1),
sd_capsule(p, a, b, max_size * rng.gen_range(0.2, 1.0)),
);
}
s
}
}
fn make_triangles_from_vertices_indices(
vert: &Vec<f32>,
idx: &Vec<u32>,
) -> Vec<Tri> {
let mut triangles = vec![];
for face in idx.chunks(3) {
let i1 = face[0] as usize;
let i2 = face[1] as usize;
let i3 = face[2] as usize;
let v1 = Point3::new(vert[i1 * 3], vert[i1 * 3 + 1], vert[i1 * 3 + 2]);
let v2 = Point3::new(vert[i2 * 3], vert[i2 * 3 + 1], vert[i2 * 3 + 2]);
let v3 = Point3::new(vert[i3 * 3], vert[i3 * 3 + 1], vert[i3 * 3 + 2]);
triangles.push(Tri::new(v3, v2, v1));
}
triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
} | fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1) != (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside = !inside;
}
j = i;
}
inside
}
fn is_inside_polygons(p: (f64, f64), polygons: &Vec<Vec<(f64, f64)>>) -> bool {
for polygon in polygons {
if is_inside_a_polygon(p, polygon) {
return true;
}
}
false
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("yellow", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
fn centroid(points: &Vec<(f64, f64)>) -> (f64, f64) {
let mut x = 0.0;
let mut y = 0.0;
for (x_, y_) in points {
x += x_;
y += y_;
}
(x / points.len() as f64, y / points.len() as f64)
}
#[derive(Clone)]
struct Passage {
precision: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage {
pub fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision).ceil() as usize;
let hi = (height / precision).ceil() as usize;
let counters = vec![0; wi * hi];
Passage {
precision,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.precision).ceil() as usize;
let hi = (self.height / self.precision).ceil() as usize;
let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
}
fn spiral_optimized_in_triangle(
points: &Vec<(f64, f64)>,
dr: f64,
) -> Vec<(f64, f64)> {
let mut pts = vec![];
for i in 0..3 {
let a = points[i];
let b = points[(i + 1) % 3];
pts.push(((a.0 + b.0) * 0.5, (a.1 + b.1) * 0.5));
}
let center = centroid(&pts);
let d = pts
.iter()
.map(|&p| euclidian_dist(p, center))
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
if d < 3.0 * dr {
return vec![];
}
let radius = 0.9 * d;
let (x, y) = center;
spiral_optimized(x, y, radius, dr)
}
fn spiral_optimized(x: f64, y: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let approx = 0.05;
let extra = 0.5;
let two_pi = 2.0 * std::f64::consts::PI;
let mut route = Vec::new();
let mut r = radius + extra;
let mut a = 0f64;
loop {
let mr = r.min(radius);
let p = round_point((x + mr * a.cos(), y + mr * a.sin()), 0.01);
let l = route.len();
if l == 0 || euclidian_dist(route[l - 1], p) > approx {
route.push(p);
}
let da = 1.0 / (r + 8.0); // bigger radius is more we have to do angle iterations
a = (a + da) % two_pi;
r -= dr * da / two_pi;
if r < approx {
break;
}
}
route.push((x, y));
route
} | }
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
| random_line_split |
main.rs | use clap::*;
use gre::*;
use isosurface::{marching_cubes::MarchingCubes, source::Source};
use kiss3d::nalgebra::{Perspective3, Point3, Rotation3, Vector3};
use rand::prelude::*;
use std::f32::consts::PI;
use std::ops::{Add, Mul, Sub};
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "210.0")]
pub width: f64,
#[clap(short, long, default_value = "297.0")]
pub height: f64,
#[clap(short, long, default_value = "20.0")]
pub pad: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
// We use sign distance function paradigm here:
fn sd_capsule(
p: Vector3<f32>,
a: Vector3<f32>,
b: Vector3<f32>,
r: f32,
) -> f32 {
let pa = p - a;
let ba = b - a;
let h = (pa.dot(&ba) / ba.dot(&ba)).max(0.0).min(1.0);
(pa - ba * h).norm() - r
}
trait BinaryOps<T> {
fn intersect(&self, other: T) -> T;
fn difference(&self, other: T) -> T;
fn union(&self, other: T) -> T;
fn smooth_intersect(&self, k: T, other: T) -> T;
fn smooth_difference(&self, k: T, other: T) -> T;
fn smooth_union(&self, k: T, other: T) -> T;
}
impl BinaryOps<f32> for f32 {
fn intersect(&self, other: f32) -> f32 {
self.max(other)
}
fn difference(&self, other: f32) -> f32 {
self.max(-other)
}
fn union(&self, other: f32) -> f32 {
self.min(other)
}
fn smooth_intersect(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) + k * h * (1.0 - h)
}
fn smooth_difference(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (other + self) / k).max(0.0).min(1.0);
mix(*self, -other, h) + k * h * (1.0 - h)
}
fn smooth_union(&self, k: f32, other: f32) -> f32 {
let h = (0.5 + 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) - k * h * (1.0 - h)
}
}
fn mix(a: f32, b: f32, x: f32) -> f32 {
(1. - x) * a + x * b
}
struct Shape {
seed: f64,
}
impl Source for Shape {
fn sample(&self, x: f32, y: f32, z: f32) -> f32 {
let p = Vector3::new(x, y, z);
let mut s = 999.;
let mut rng = rng_from_seed(self.seed);
let count = rng.gen_range(5, 80);
let max_size = 0.2;
let v = rng.gen_range(0.1, 0.9);
for _i in 0..count {
let a = Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
);
let b = if rng.gen_bool(v) {
a
} else {
Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
)
};
s = s.smooth_union(
rng.gen_range(0.0, 0.1),
sd_capsule(p, a, b, max_size * rng.gen_range(0.2, 1.0)),
);
}
s
}
}
fn make_triangles_from_vertices_indices(
vert: &Vec<f32>,
idx: &Vec<u32>,
) -> Vec<Tri> {
let mut triangles = vec![];
for face in idx.chunks(3) {
let i1 = face[0] as usize;
let i2 = face[1] as usize;
let i3 = face[2] as usize;
let v1 = Point3::new(vert[i1 * 3], vert[i1 * 3 + 1], vert[i1 * 3 + 2]);
let v2 = Point3::new(vert[i2 * 3], vert[i2 * 3 + 1], vert[i2 * 3 + 2]);
let v3 = Point3::new(vert[i3 * 3], vert[i3 * 3 + 1], vert[i3 * 3 + 2]);
triangles.push(Tri::new(v3, v2, v1));
}
triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
}
}
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1) != (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside = !inside;
}
j = i;
}
inside
}
fn is_inside_polygons(p: (f64, f64), polygons: &Vec<Vec<(f64, f64)>>) -> bool {
for polygon in polygons {
if is_inside_a_polygon(p, polygon) {
return true;
}
}
false
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("yellow", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
fn centroid(points: &Vec<(f64, f64)>) -> (f64, f64) {
let mut x = 0.0;
let mut y = 0.0;
for (x_, y_) in points {
x += x_;
y += y_;
}
(x / points.len() as f64, y / points.len() as f64)
}
#[derive(Clone)]
struct Passage {
precision: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage {
pub fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision).ceil() as usize;
let hi = (height / precision).ceil() as usize;
let counters = vec![0; wi * hi];
Passage {
precision,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.precision).ceil() as usize;
let hi = (self.height / self.precision).ceil() as usize;
let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
}
fn spiral_optimized_in_triangle(
points: &Vec<(f64, f64)>,
dr: f64,
) -> Vec<(f64, f64)> {
let mut pts = vec![];
for i in 0..3 {
let a = points[i];
let b = points[(i + 1) % 3];
pts.push(((a.0 + b.0) * 0.5, (a.1 + b.1) * 0.5));
}
let center = centroid(&pts);
let d = pts
.iter()
.map(|&p| euclidian_dist(p, center))
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
if d < 3.0 * dr {
return vec![];
}
let radius = 0.9 * d;
let (x, y) = center;
spiral_optimized(x, y, radius, dr)
}
fn spiral_optimized(x: f64, y: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let approx = 0.05;
let extra = 0.5;
let two_pi = 2.0 * std::f64::consts::PI;
let mut route = Vec::new();
let mut r = radius + extra;
let mut a = 0f64;
loop {
let mr = r.min(radius);
let p = round_point((x + mr * a.cos(), y + mr * a.sin()), 0.01);
let l = route.len();
if l == 0 || euclidian_dist(route[l - 1], p) > approx {
route.push(p);
}
let da = 1.0 / (r + 8.0); // bigger radius is more we have to do angle iterations
a = (a + da) % two_pi;
r -= dr * da / two_pi;
if r < approx |
}
route.push((x, y));
route
}
| {
break;
} | conditional_block |
main.rs | use clap::*;
use gre::*;
use isosurface::{marching_cubes::MarchingCubes, source::Source};
use kiss3d::nalgebra::{Perspective3, Point3, Rotation3, Vector3};
use rand::prelude::*;
use std::f32::consts::PI;
use std::ops::{Add, Mul, Sub};
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "210.0")]
pub width: f64,
#[clap(short, long, default_value = "297.0")]
pub height: f64,
#[clap(short, long, default_value = "20.0")]
pub pad: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
// We use sign distance function paradigm here:
fn sd_capsule(
p: Vector3<f32>,
a: Vector3<f32>,
b: Vector3<f32>,
r: f32,
) -> f32 {
let pa = p - a;
let ba = b - a;
let h = (pa.dot(&ba) / ba.dot(&ba)).max(0.0).min(1.0);
(pa - ba * h).norm() - r
}
trait BinaryOps<T> {
fn intersect(&self, other: T) -> T;
fn difference(&self, other: T) -> T;
fn union(&self, other: T) -> T;
fn smooth_intersect(&self, k: T, other: T) -> T;
fn smooth_difference(&self, k: T, other: T) -> T;
fn smooth_union(&self, k: T, other: T) -> T;
}
impl BinaryOps<f32> for f32 {
fn intersect(&self, other: f32) -> f32 {
self.max(other)
}
fn difference(&self, other: f32) -> f32 {
self.max(-other)
}
fn union(&self, other: f32) -> f32 {
self.min(other)
}
fn smooth_intersect(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) + k * h * (1.0 - h)
}
fn smooth_difference(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (other + self) / k).max(0.0).min(1.0);
mix(*self, -other, h) + k * h * (1.0 - h)
}
fn smooth_union(&self, k: f32, other: f32) -> f32 {
let h = (0.5 + 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) - k * h * (1.0 - h)
}
}
fn mix(a: f32, b: f32, x: f32) -> f32 {
(1. - x) * a + x * b
}
struct Shape {
seed: f64,
}
impl Source for Shape {
fn sample(&self, x: f32, y: f32, z: f32) -> f32 {
let p = Vector3::new(x, y, z);
let mut s = 999.;
let mut rng = rng_from_seed(self.seed);
let count = rng.gen_range(5, 80);
let max_size = 0.2;
let v = rng.gen_range(0.1, 0.9);
for _i in 0..count {
let a = Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
);
let b = if rng.gen_bool(v) {
a
} else {
Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
)
};
s = s.smooth_union(
rng.gen_range(0.0, 0.1),
sd_capsule(p, a, b, max_size * rng.gen_range(0.2, 1.0)),
);
}
s
}
}
fn make_triangles_from_vertices_indices(
vert: &Vec<f32>,
idx: &Vec<u32>,
) -> Vec<Tri> {
let mut triangles = vec![];
for face in idx.chunks(3) {
let i1 = face[0] as usize;
let i2 = face[1] as usize;
let i3 = face[2] as usize;
let v1 = Point3::new(vert[i1 * 3], vert[i1 * 3 + 1], vert[i1 * 3 + 2]);
let v2 = Point3::new(vert[i2 * 3], vert[i2 * 3 + 1], vert[i2 * 3 + 2]);
let v3 = Point3::new(vert[i3 * 3], vert[i3 * 3 + 1], vert[i3 * 3 + 2]);
triangles.push(Tri::new(v3, v2, v1));
}
triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
}
}
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool |
fn is_inside_polygons(p: (f64, f64), polygons: &Vec<Vec<(f64, f64)>>) -> bool {
for polygon in polygons {
if is_inside_a_polygon(p, polygon) {
return true;
}
}
false
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("yellow", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
fn centroid(points: &Vec<(f64, f64)>) -> (f64, f64) {
let mut x = 0.0;
let mut y = 0.0;
for (x_, y_) in points {
x += x_;
y += y_;
}
(x / points.len() as f64, y / points.len() as f64)
}
#[derive(Clone)]
struct Passage {
precision: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage {
pub fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision).ceil() as usize;
let hi = (height / precision).ceil() as usize;
let counters = vec![0; wi * hi];
Passage {
precision,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.precision).ceil() as usize;
let hi = (self.height / self.precision).ceil() as usize;
let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
}
fn spiral_optimized_in_triangle(
points: &Vec<(f64, f64)>,
dr: f64,
) -> Vec<(f64, f64)> {
let mut pts = vec![];
for i in 0..3 {
let a = points[i];
let b = points[(i + 1) % 3];
pts.push(((a.0 + b.0) * 0.5, (a.1 + b.1) * 0.5));
}
let center = centroid(&pts);
let d = pts
.iter()
.map(|&p| euclidian_dist(p, center))
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
if d < 3.0 * dr {
return vec![];
}
let radius = 0.9 * d;
let (x, y) = center;
spiral_optimized(x, y, radius, dr)
}
fn spiral_optimized(x: f64, y: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let approx = 0.05;
let extra = 0.5;
let two_pi = 2.0 * std::f64::consts::PI;
let mut route = Vec::new();
let mut r = radius + extra;
let mut a = 0f64;
loop {
let mr = r.min(radius);
let p = round_point((x + mr * a.cos(), y + mr * a.sin()), 0.01);
let l = route.len();
if l == 0 || euclidian_dist(route[l - 1], p) > approx {
route.push(p);
}
let da = 1.0 / (r + 8.0); // bigger radius is more we have to do angle iterations
a = (a + da) % two_pi;
r -= dr * da / two_pi;
if r < approx {
break;
}
}
route.push((x, y));
route
}
| {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1) != (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside = !inside;
}
j = i;
}
inside
} | identifier_body |
main.rs | use clap::*;
use gre::*;
use isosurface::{marching_cubes::MarchingCubes, source::Source};
use kiss3d::nalgebra::{Perspective3, Point3, Rotation3, Vector3};
use rand::prelude::*;
use std::f32::consts::PI;
use std::ops::{Add, Mul, Sub};
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "210.0")]
pub width: f64,
#[clap(short, long, default_value = "297.0")]
pub height: f64,
#[clap(short, long, default_value = "20.0")]
pub pad: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
// We use sign distance function paradigm here:
fn sd_capsule(
p: Vector3<f32>,
a: Vector3<f32>,
b: Vector3<f32>,
r: f32,
) -> f32 {
let pa = p - a;
let ba = b - a;
let h = (pa.dot(&ba) / ba.dot(&ba)).max(0.0).min(1.0);
(pa - ba * h).norm() - r
}
trait BinaryOps<T> {
fn intersect(&self, other: T) -> T;
fn difference(&self, other: T) -> T;
fn union(&self, other: T) -> T;
fn smooth_intersect(&self, k: T, other: T) -> T;
fn smooth_difference(&self, k: T, other: T) -> T;
fn smooth_union(&self, k: T, other: T) -> T;
}
impl BinaryOps<f32> for f32 {
fn intersect(&self, other: f32) -> f32 {
self.max(other)
}
fn difference(&self, other: f32) -> f32 {
self.max(-other)
}
fn union(&self, other: f32) -> f32 {
self.min(other)
}
fn smooth_intersect(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) + k * h * (1.0 - h)
}
fn smooth_difference(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (other + self) / k).max(0.0).min(1.0);
mix(*self, -other, h) + k * h * (1.0 - h)
}
fn smooth_union(&self, k: f32, other: f32) -> f32 {
let h = (0.5 + 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) - k * h * (1.0 - h)
}
}
fn mix(a: f32, b: f32, x: f32) -> f32 {
(1. - x) * a + x * b
}
struct Shape {
seed: f64,
}
impl Source for Shape {
fn sample(&self, x: f32, y: f32, z: f32) -> f32 {
let p = Vector3::new(x, y, z);
let mut s = 999.;
let mut rng = rng_from_seed(self.seed);
let count = rng.gen_range(5, 80);
let max_size = 0.2;
let v = rng.gen_range(0.1, 0.9);
for _i in 0..count {
let a = Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
);
let b = if rng.gen_bool(v) {
a
} else {
Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
)
};
s = s.smooth_union(
rng.gen_range(0.0, 0.1),
sd_capsule(p, a, b, max_size * rng.gen_range(0.2, 1.0)),
);
}
s
}
}
fn make_triangles_from_vertices_indices(
vert: &Vec<f32>,
idx: &Vec<u32>,
) -> Vec<Tri> {
let mut triangles = vec![];
for face in idx.chunks(3) {
let i1 = face[0] as usize;
let i2 = face[1] as usize;
let i3 = face[2] as usize;
let v1 = Point3::new(vert[i1 * 3], vert[i1 * 3 + 1], vert[i1 * 3 + 2]);
let v2 = Point3::new(vert[i2 * 3], vert[i2 * 3 + 1], vert[i2 * 3 + 2]);
let v3 = Point3::new(vert[i3 * 3], vert[i3 * 3 + 1], vert[i3 * 3 + 2]);
triangles.push(Tri::new(v3, v2, v1));
}
triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
}
}
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1) != (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside = !inside;
}
j = i;
}
inside
}
fn is_inside_polygons(p: (f64, f64), polygons: &Vec<Vec<(f64, f64)>>) -> bool {
for polygon in polygons {
if is_inside_a_polygon(p, polygon) {
return true;
}
}
false
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("yellow", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
fn centroid(points: &Vec<(f64, f64)>) -> (f64, f64) {
let mut x = 0.0;
let mut y = 0.0;
for (x_, y_) in points {
x += x_;
y += y_;
}
(x / points.len() as f64, y / points.len() as f64)
}
#[derive(Clone)]
struct Passage {
precision: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage {
pub fn | (precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision).ceil() as usize;
let hi = (height / precision).ceil() as usize;
let counters = vec![0; wi * hi];
Passage {
precision,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.precision).ceil() as usize;
let hi = (self.height / self.precision).ceil() as usize;
let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
}
fn spiral_optimized_in_triangle(
points: &Vec<(f64, f64)>,
dr: f64,
) -> Vec<(f64, f64)> {
let mut pts = vec![];
for i in 0..3 {
let a = points[i];
let b = points[(i + 1) % 3];
pts.push(((a.0 + b.0) * 0.5, (a.1 + b.1) * 0.5));
}
let center = centroid(&pts);
let d = pts
.iter()
.map(|&p| euclidian_dist(p, center))
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
if d < 3.0 * dr {
return vec![];
}
let radius = 0.9 * d;
let (x, y) = center;
spiral_optimized(x, y, radius, dr)
}
fn spiral_optimized(x: f64, y: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let approx = 0.05;
let extra = 0.5;
let two_pi = 2.0 * std::f64::consts::PI;
let mut route = Vec::new();
let mut r = radius + extra;
let mut a = 0f64;
loop {
let mr = r.min(radius);
let p = round_point((x + mr * a.cos(), y + mr * a.sin()), 0.01);
let l = route.len();
if l == 0 || euclidian_dist(route[l - 1], p) > approx {
route.push(p);
}
let da = 1.0 / (r + 8.0); // bigger radius is more we have to do angle iterations
a = (a + da) % two_pi;
r -= dr * da / two_pi;
if r < approx {
break;
}
}
route.push((x, y));
route
}
| new | identifier_name |
gossip.rs | #![allow(clippy::arithmetic_side_effects)]
#[macro_use]
extern crate log;
use {
rayon::iter::*,
solana_gossip::{
cluster_info::{ClusterInfo, Node},
contact_info::{LegacyContactInfo as ContactInfo, Protocol},
crds::Cursor,
gossip_service::GossipService,
},
solana_perf::packet::Packet,
solana_runtime::bank_forks::BankForks,
solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
timing::timestamp,
transaction::Transaction,
},
solana_streamer::{
sendmmsg::{multi_target_send, SendPktsError},
socket::SocketAddrSpace,
},
solana_vote_program::{vote_instruction, vote_state::Vote},
std::{
net::UdpSocket,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::sleep,
time::Duration,
},
};
fn test_node(exit: Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
None,
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
fn test_node_with_bank(
node_keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else |
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number");
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts(
10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests(&genesis_config_info.genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let nodes: Vec<_> = vote_keypairs
.into_iter()
.map(|keypairs| {
test_node_with_bank(
Arc::new(keypairs.node_keypair),
exit.clone(),
bank_forks.clone(),
)
})
.collect();
let ci0 = nodes[0].0.my_contact_info();
for node in &nodes[1..] {
node.0.insert_info(ci0.clone());
}
let mut time = Measure::start("time");
let mut done;
let mut success = false;
for _ in 0..30 {
done = true;
for (i, node) in nodes.iter().enumerate() {
warn!("node {} peers: {}", i, node.0.gossip_peers().len());
if node.0.gossip_peers().len() != num_nodes - 1 {
done = false;
break;
}
}
if done {
success = true;
break;
}
sleep(Duration::from_secs(1));
}
time.stop();
warn!("found {} nodes in {} success: {}", num_nodes, time, success);
for num_votes in 1..1000 {
let mut time = Measure::start("votes");
let tx = test_tx();
warn!("tx.message.account_keys: {:?}", tx.message.account_keys);
let vote = Vote::new(
vec![1, 3, num_votes + 5], // slots
Hash::default(),
);
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
&Pubkey::new_unique(), // authorized_voter_pubkey
vote,
);
let tx = Transaction::new_with_payer(
&[ix], // instructions
None, // payer
);
let tower = vec![num_votes + 5];
nodes[0].0.push_vote(&tower, tx.clone());
let mut success = false;
for _ in 0..(30 * 5) {
let mut not_done = 0;
let mut num_old = 0;
let mut num_push_total = 0;
let mut num_pushes = 0;
let mut num_pulls = 0;
for (node, _, _) in nodes.iter() {
//if node.0.get_votes(0).1.len() != (num_nodes * num_votes) {
let has_tx = node
.get_votes(&mut Cursor::default())
.iter()
.filter(|v| v.message.account_keys == tx.message.account_keys)
.count();
num_old += node.gossip.push.num_old.load(Ordering::Relaxed);
num_push_total += node.gossip.push.num_total.load(Ordering::Relaxed);
num_pushes += node.gossip.push.num_pushes.load(Ordering::Relaxed);
num_pulls += node.gossip.pull.num_pulls.load(Ordering::Relaxed);
if has_tx == 0 {
not_done += 1;
}
}
warn!("not_done: {}/{}", not_done, nodes.len());
warn!("num_old: {}", num_old);
warn!("num_push_total: {}", num_push_total);
warn!("num_pushes: {}", num_pushes);
warn!("num_pulls: {}", num_pulls);
success = not_done < (nodes.len() / 20);
if success {
break;
}
sleep(Duration::from_millis(200));
}
time.stop();
warn!(
"propagated vote {} in {} success: {}",
num_votes, time, success
);
sleep(Duration::from_millis(200));
for (node, _, _) in nodes.iter() {
node.gossip.push.num_old.store(0, Ordering::Relaxed);
node.gossip.push.num_total.store(0, Ordering::Relaxed);
node.gossip.push.num_pushes.store(0, Ordering::Relaxed);
node.gossip.pull.num_pulls.store(0, Ordering::Relaxed);
}
}
exit.store(true, Ordering::Relaxed);
for node in nodes {
node.1.join().unwrap();
}
}
| {
trace!("not converged {} {} {}", i, total + num, num * num);
} | conditional_block |
gossip.rs | #![allow(clippy::arithmetic_side_effects)]
#[macro_use]
extern crate log;
use {
rayon::iter::*,
solana_gossip::{
cluster_info::{ClusterInfo, Node},
contact_info::{LegacyContactInfo as ContactInfo, Protocol},
crds::Cursor,
gossip_service::GossipService,
},
solana_perf::packet::Packet,
solana_runtime::bank_forks::BankForks,
solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
timing::timestamp,
transaction::Transaction,
},
solana_streamer::{
sendmmsg::{multi_target_send, SendPktsError},
socket::SocketAddrSpace,
},
solana_vote_program::{vote_instruction, vote_state::Vote},
std::{
net::UdpSocket,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::sleep,
time::Duration,
},
};
fn test_node(exit: Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
None,
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
fn test_node_with_bank(
node_keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else {
trace!("not converged {} {} {}", i, total + num, num * num);
}
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number"); | 10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests(&genesis_config_info.genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let nodes: Vec<_> = vote_keypairs
.into_iter()
.map(|keypairs| {
test_node_with_bank(
Arc::new(keypairs.node_keypair),
exit.clone(),
bank_forks.clone(),
)
})
.collect();
let ci0 = nodes[0].0.my_contact_info();
for node in &nodes[1..] {
node.0.insert_info(ci0.clone());
}
let mut time = Measure::start("time");
let mut done;
let mut success = false;
for _ in 0..30 {
done = true;
for (i, node) in nodes.iter().enumerate() {
warn!("node {} peers: {}", i, node.0.gossip_peers().len());
if node.0.gossip_peers().len() != num_nodes - 1 {
done = false;
break;
}
}
if done {
success = true;
break;
}
sleep(Duration::from_secs(1));
}
time.stop();
warn!("found {} nodes in {} success: {}", num_nodes, time, success);
for num_votes in 1..1000 {
let mut time = Measure::start("votes");
let tx = test_tx();
warn!("tx.message.account_keys: {:?}", tx.message.account_keys);
let vote = Vote::new(
vec![1, 3, num_votes + 5], // slots
Hash::default(),
);
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
&Pubkey::new_unique(), // authorized_voter_pubkey
vote,
);
let tx = Transaction::new_with_payer(
&[ix], // instructions
None, // payer
);
let tower = vec![num_votes + 5];
nodes[0].0.push_vote(&tower, tx.clone());
let mut success = false;
for _ in 0..(30 * 5) {
let mut not_done = 0;
let mut num_old = 0;
let mut num_push_total = 0;
let mut num_pushes = 0;
let mut num_pulls = 0;
for (node, _, _) in nodes.iter() {
//if node.0.get_votes(0).1.len() != (num_nodes * num_votes) {
let has_tx = node
.get_votes(&mut Cursor::default())
.iter()
.filter(|v| v.message.account_keys == tx.message.account_keys)
.count();
num_old += node.gossip.push.num_old.load(Ordering::Relaxed);
num_push_total += node.gossip.push.num_total.load(Ordering::Relaxed);
num_pushes += node.gossip.push.num_pushes.load(Ordering::Relaxed);
num_pulls += node.gossip.pull.num_pulls.load(Ordering::Relaxed);
if has_tx == 0 {
not_done += 1;
}
}
warn!("not_done: {}/{}", not_done, nodes.len());
warn!("num_old: {}", num_old);
warn!("num_push_total: {}", num_push_total);
warn!("num_pushes: {}", num_pushes);
warn!("num_pulls: {}", num_pulls);
success = not_done < (nodes.len() / 20);
if success {
break;
}
sleep(Duration::from_millis(200));
}
time.stop();
warn!(
"propagated vote {} in {} success: {}",
num_votes, time, success
);
sleep(Duration::from_millis(200));
for (node, _, _) in nodes.iter() {
node.gossip.push.num_old.store(0, Ordering::Relaxed);
node.gossip.push.num_total.store(0, Ordering::Relaxed);
node.gossip.push.num_pushes.store(0, Ordering::Relaxed);
node.gossip.pull.num_pulls.store(0, Ordering::Relaxed);
}
}
exit.store(true, Ordering::Relaxed);
for node in nodes {
node.1.join().unwrap();
}
} |
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts( | random_line_split |
gossip.rs | #![allow(clippy::arithmetic_side_effects)]
#[macro_use]
extern crate log;
use {
rayon::iter::*,
solana_gossip::{
cluster_info::{ClusterInfo, Node},
contact_info::{LegacyContactInfo as ContactInfo, Protocol},
crds::Cursor,
gossip_service::GossipService,
},
solana_perf::packet::Packet,
solana_runtime::bank_forks::BankForks,
solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
timing::timestamp,
transaction::Transaction,
},
solana_streamer::{
sendmmsg::{multi_target_send, SendPktsError},
socket::SocketAddrSpace,
},
solana_vote_program::{vote_instruction, vote_state::Vote},
std::{
net::UdpSocket,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::sleep,
time::Duration,
},
};
fn test_node(exit: Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
None,
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
fn test_node_with_bank(
node_keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else {
trace!("not converged {} {} {}", i, total + num, num * num);
}
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn | () {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number");
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts(
10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests(&genesis_config_info.genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let nodes: Vec<_> = vote_keypairs
.into_iter()
.map(|keypairs| {
test_node_with_bank(
Arc::new(keypairs.node_keypair),
exit.clone(),
bank_forks.clone(),
)
})
.collect();
let ci0 = nodes[0].0.my_contact_info();
for node in &nodes[1..] {
node.0.insert_info(ci0.clone());
}
let mut time = Measure::start("time");
let mut done;
let mut success = false;
for _ in 0..30 {
done = true;
for (i, node) in nodes.iter().enumerate() {
warn!("node {} peers: {}", i, node.0.gossip_peers().len());
if node.0.gossip_peers().len() != num_nodes - 1 {
done = false;
break;
}
}
if done {
success = true;
break;
}
sleep(Duration::from_secs(1));
}
time.stop();
warn!("found {} nodes in {} success: {}", num_nodes, time, success);
for num_votes in 1..1000 {
let mut time = Measure::start("votes");
let tx = test_tx();
warn!("tx.message.account_keys: {:?}", tx.message.account_keys);
let vote = Vote::new(
vec![1, 3, num_votes + 5], // slots
Hash::default(),
);
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
&Pubkey::new_unique(), // authorized_voter_pubkey
vote,
);
let tx = Transaction::new_with_payer(
&[ix], // instructions
None, // payer
);
let tower = vec![num_votes + 5];
nodes[0].0.push_vote(&tower, tx.clone());
let mut success = false;
for _ in 0..(30 * 5) {
let mut not_done = 0;
let mut num_old = 0;
let mut num_push_total = 0;
let mut num_pushes = 0;
let mut num_pulls = 0;
for (node, _, _) in nodes.iter() {
//if node.0.get_votes(0).1.len() != (num_nodes * num_votes) {
let has_tx = node
.get_votes(&mut Cursor::default())
.iter()
.filter(|v| v.message.account_keys == tx.message.account_keys)
.count();
num_old += node.gossip.push.num_old.load(Ordering::Relaxed);
num_push_total += node.gossip.push.num_total.load(Ordering::Relaxed);
num_pushes += node.gossip.push.num_pushes.load(Ordering::Relaxed);
num_pulls += node.gossip.pull.num_pulls.load(Ordering::Relaxed);
if has_tx == 0 {
not_done += 1;
}
}
warn!("not_done: {}/{}", not_done, nodes.len());
warn!("num_old: {}", num_old);
warn!("num_push_total: {}", num_push_total);
warn!("num_pushes: {}", num_pushes);
warn!("num_pulls: {}", num_pulls);
success = not_done < (nodes.len() / 20);
if success {
break;
}
sleep(Duration::from_millis(200));
}
time.stop();
warn!(
"propagated vote {} in {} success: {}",
num_votes, time, success
);
sleep(Duration::from_millis(200));
for (node, _, _) in nodes.iter() {
node.gossip.push.num_old.store(0, Ordering::Relaxed);
node.gossip.push.num_total.store(0, Ordering::Relaxed);
node.gossip.push.num_pushes.store(0, Ordering::Relaxed);
node.gossip.pull.num_pulls.store(0, Ordering::Relaxed);
}
}
exit.store(true, Ordering::Relaxed);
for node in nodes {
node.1.join().unwrap();
}
}
| gossip_ring | identifier_name |
gossip.rs | #![allow(clippy::arithmetic_side_effects)]
#[macro_use]
extern crate log;
use {
rayon::iter::*,
solana_gossip::{
cluster_info::{ClusterInfo, Node},
contact_info::{LegacyContactInfo as ContactInfo, Protocol},
crds::Cursor,
gossip_service::GossipService,
},
solana_perf::packet::Packet,
solana_runtime::bank_forks::BankForks,
solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
timing::timestamp,
transaction::Transaction,
},
solana_streamer::{
sendmmsg::{multi_target_send, SendPktsError},
socket::SocketAddrSpace,
},
solana_vote_program::{vote_instruction, vote_state::Vote},
std::{
net::UdpSocket,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::sleep,
time::Duration,
},
};
fn test_node(exit: Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
None,
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
fn test_node_with_bank(
node_keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) |
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else {
trace!("not converged {} {} {}", i, total + num, num * num);
}
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number");
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts(
10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests(&genesis_config_info.genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let nodes: Vec<_> = vote_keypairs
.into_iter()
.map(|keypairs| {
test_node_with_bank(
Arc::new(keypairs.node_keypair),
exit.clone(),
bank_forks.clone(),
)
})
.collect();
let ci0 = nodes[0].0.my_contact_info();
for node in &nodes[1..] {
node.0.insert_info(ci0.clone());
}
let mut time = Measure::start("time");
let mut done;
let mut success = false;
for _ in 0..30 {
done = true;
for (i, node) in nodes.iter().enumerate() {
warn!("node {} peers: {}", i, node.0.gossip_peers().len());
if node.0.gossip_peers().len() != num_nodes - 1 {
done = false;
break;
}
}
if done {
success = true;
break;
}
sleep(Duration::from_secs(1));
}
time.stop();
warn!("found {} nodes in {} success: {}", num_nodes, time, success);
for num_votes in 1..1000 {
let mut time = Measure::start("votes");
let tx = test_tx();
warn!("tx.message.account_keys: {:?}", tx.message.account_keys);
let vote = Vote::new(
vec![1, 3, num_votes + 5], // slots
Hash::default(),
);
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
&Pubkey::new_unique(), // authorized_voter_pubkey
vote,
);
let tx = Transaction::new_with_payer(
&[ix], // instructions
None, // payer
);
let tower = vec![num_votes + 5];
nodes[0].0.push_vote(&tower, tx.clone());
let mut success = false;
for _ in 0..(30 * 5) {
let mut not_done = 0;
let mut num_old = 0;
let mut num_push_total = 0;
let mut num_pushes = 0;
let mut num_pulls = 0;
for (node, _, _) in nodes.iter() {
//if node.0.get_votes(0).1.len() != (num_nodes * num_votes) {
let has_tx = node
.get_votes(&mut Cursor::default())
.iter()
.filter(|v| v.message.account_keys == tx.message.account_keys)
.count();
num_old += node.gossip.push.num_old.load(Ordering::Relaxed);
num_push_total += node.gossip.push.num_total.load(Ordering::Relaxed);
num_pushes += node.gossip.push.num_pushes.load(Ordering::Relaxed);
num_pulls += node.gossip.pull.num_pulls.load(Ordering::Relaxed);
if has_tx == 0 {
not_done += 1;
}
}
warn!("not_done: {}/{}", not_done, nodes.len());
warn!("num_old: {}", num_old);
warn!("num_push_total: {}", num_push_total);
warn!("num_pushes: {}", num_pushes);
warn!("num_pulls: {}", num_pulls);
success = not_done < (nodes.len() / 20);
if success {
break;
}
sleep(Duration::from_millis(200));
}
time.stop();
warn!(
"propagated vote {} in {} success: {}",
num_votes, time, success
);
sleep(Duration::from_millis(200));
for (node, _, _) in nodes.iter() {
node.gossip.push.num_old.store(0, Ordering::Relaxed);
node.gossip.push.num_total.store(0, Ordering::Relaxed);
node.gossip.push.num_pushes.store(0, Ordering::Relaxed);
node.gossip.pull.num_pulls.store(0, Ordering::Relaxed);
}
}
exit.store(true, Ordering::Relaxed);
for node in nodes {
node.1.join().unwrap();
}
}
| {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
} | identifier_body |
decrypt.py | import tkinter as tk
texte1 = "kd oqnbgzhm ehbghdq ztqz tm bncd ozq rtarshstshnm zkogzadshptd: bgzptd kdssqd drs qdlokzbdd ozq tmd ztsqd. tshkhrdq kz eqdptdmbd cdr kdssqdr ontq cdbncdq kd ldrrzfd."
texte2 = "gx qosvlnkd wkvlkxo xiu vscx qno yd fsu cx qniix cx unkggx kdvsddyx xu vsdukxdu g'kdckvx. gxi gxuuoxi cy fsu cx qniix qxofxuuxdu cx cxvngxo gxi gxuuoxi cy fxiinmx sokmkdng fscygs 26. ixygxi gxi gxuuoxi cx n n a isdu vlkwwoxxi."
texte3 = "dceuq e n'ehfp cg p'kyhhep uqfw cgiy citudm c gzudiq ni ezhd px c jhptv ep cggsht. kg hdtymdt xdzei gdx rzyq wir mvzxpw, cifcchdb znwd ccyw wy lkcsht, dp isgd uqfw wy ?"
def decalage(lettre_message, lettre_cle):
"""Alors ça c'est la correction mais ça marche pas bien -_-"""
return chr((ord(lettre_message) + ord(lettre_cle))%256)
def dec_texte(texte, cle):
texte_code = ""
t, c = 0, 0
while len(texte_code) < len(texte):
if texte[t] == " " or texte[t] == ":" or texte[t] == "," or texte[t] == "?" or texte[t] == "." or texte[t] == "2" or texte[t] == "6":
texte_code += texte[t]
else:
texte_code += decalage(texte[t], cle[c%len(cle)])
t, c = t + 1, c + 1
if c == len(cle):
c = 0
return texte_code
def chiffre():
resultat.delete(0, tk.END)
if entree_texte.get() == "" or entree_cle.get() == "":
label_res.config(text="Il manque quelque chose en entrée :/")
resultat.insert(0, dec_texte(entree_texte.get(), entree_cle.get()))
def chiffre_deux(texte, clef):
resultat.delete(0, tk.END)
resultat.insert(0, dec_texte(texte, clef))
return dec_texte(texte, clef)
def dechiffrement(texte_a_decoder, cle):
texte_decode = ""
t, c = 0, 0
while len(texte_decode) < len(texte_a_decoder):
if texte_a_decoder[t] == " " or texte_a_decoder[t] == ":" or texte_a_decoder[t] == "," or texte_a_decoder[t] == "?" or texte_a_decoder[t] == "." or texte_a_decoder[t] == "2" or texte_a_decoder[t] == "6":
texte_decode += texte_a_decoder[t]
else:
texte_decode += decalage(texte_a_decoder[t], chr(256-ord(cle[c%len(cle)])))
t, c = t + 1, c + 1
if c == len(cle):
c = 0
return texte_decode
def dechiffre():
resultat.delete(0, tk.END)
if entree_texte.get() == "" or entree_cle.get() == "":
label_res.config(text = "Il manque quelque chose en entrée :/")
else:
resultat.insert(0, dechiffrement(entree_texte.get(), entree_cle.get()))
def chiffre_xor(lettre_message, lettre_cle):
return chr(ord(lettre_message) ^ ord(lettre_cle))
def creer_liste_clef(taille):
possibilite_clef = [chr(i) for i in range(256)]
for i in range(taille):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in possibilite_clef] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in possibilite_clef for x in a]
return a
def brute_force_cesar(texte_a_trouver):
"""Trouve une clé longue de 1 et une suite de caractères qui
correspondent au texte à trouver. Pas sûr de l'idée."""
alphabet = "abcdefghijklmnopqrstuvwxyz :,?.0123456789'"
# Tous les caractères possibles / vus dans les textes à décoder
liste_car = []
# Liste vide qui contiendra les combinaisons de caractères possibles
texte_test = ""
# Texte codé à comparé avec le texte initial
l = 0 # Index de liste_car
m = 0 # Index de la clef
t = 1 # Taille clef
clef = creer_liste_clef(t)
for i in range(len(texte_a_trouver)):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in alphabet] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in alphabet for x in a]
# On ajoute chaque caractère à chaque caractère
# (pas sûr de cette phrase -_-)
liste_car = liste_car + a # On ajoute ce qu'on a trouvé à notre liste
while texte_test != texte_a_trouver:
# Tant qu'on code pas pareil que ce qu'on cherche
texte_test = chiffre_deux(str(liste_car[l]), clef)
# On teste l'encodage avec le texte et la clef actuels
l += 1 # On regarde le caractère suivant
if l >= len(liste_car): # Ne pas aller out of range
l = 0
m += 1 # On change la clef
if m == 256:
t += 1
clef = creer_liste_clef(t)
m += -1
entree_cle.insert(0, clef[m])
return ord(clef[m])
racine=tk.Tk()
racine.title("Cryptographie")
entree_texte = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_texte.grid(row = 0, column = 0)
entree_cle = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_cle.grid(row = 1, column = 0)
label_texte = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer le message ici.")
label_texte.grid(row = 0, column = 1)
label_cle = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer la clé ici.")
label_cle.grid(row = 1, column = 1)
bouton_coder=tk.Button(racine, text="Chiffrer texte",fg="black", width=15, command=chiffre)
bouton_coder.grid(row=2, column=0)
bouton_decoder=tk.Button(racine,text="Déchiffrer texte",fg="black", width=15,command=dechiffre)
bouton_decoder.grid(row=2, column=1)
resultat=tk.Entry(racine,width = 50, font = ("helvetica", "20"))
resultat.grid(row=3,column=0)
label_res=tk.Label(racine,font = ("helvetica", "20"), text="Résultat ici.")
label_res.grid(row = 3, column=1)
# print("La clef est : chr", brute_force_cesar("kd"))
# La clé trouvée est chr 255 -> ÿ (pb lié au code initial ?)
texte1_decode = "le prochain fichier aura un code par substitution alphabetique: chaque lettre est remplacee par une autre. utiliser la frequence des lettres pour decoder le message."
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
def str_convert(liste):
"""Renvoie un texte depuis une liste qui contient un texte découpé"""
texte_str = ""
for a in range(len(liste)):
texte_str += str(liste[a])
return texte_str
def trouver_frequence_lettre(lettre, texte):
"""Trouve le nombre d'itérations d'une lettre dans un texte"""
# Oui le nom porte à confusion
compteur = 0
for i in texte:
if i == lettre:
compteur += 1
return compteur
def trouver_frequence_texte(texte):
"""Applique la fonction précédante pour toutes les lettres"""
# On obtient vraiment une fréquence cette fois
alphabet_francais_texte = [0 for i in range(26)]
for i in range(26):
alphabet_francais_texte[i] = [alphabet_francais_texte[i], chr(i + 97)]
for i in range(26):
alphabet_francais_texte[i][0] = round((trouver_frequence_lettre(chr(i + 97), texte) * 100) / len(texte), 3)
alphabet_francais_texte.sort(reverse=True)
return alphabet_francais_texte
def substituer(texte): # Donne une vague idée mais pas efficace, mal codé
"""Remplace les lettres selon leur fréquence, en se basant sur
la fréquence moyenne d'apparition des lettres dans
l'alphabet français."""
alphabet = "abcdefghijklmnopqrstuvwxyz"
texte_lettre_only = []
for car in texte:
if car in alphabet:
texte_lettre_only.append(car)
nouveau_texte = list(texte)
j = 0
alphabet_francais_texte = trouver_frequence_texte(texte_lettre_only)
alphabet_francais.sort(reverse=True)
for lettre in texte_lettre_only:
a = False
i = 0
if nouveau_texte[j] == " " or nouveau_texte[j] == ":" or nouveau_texte[j] == "," or nouveau_texte[j] == "?" or nouveau_texte[j] == "." or nouveau_texte[j] == "2" or nouveau_texte[j] == "6":
j += 1
else:
while a == False:
if lettre == alphabet_francais_texte[i][1]:
nouveau_texte[j] = alphabet_francais[i][1]
a = True
else:
i += 1
if i == 26:
i = 0
j += 1
texte_str = str_convert(nouveau_texte)
return texte_str
# print(substituer(texte2))
def substituer_lettre(texte, lettre_initiale, lettre_finale):
nouveau_texte = list(texte)
i = 0
for lettre in texte:
if lettre == lettre_initiale:
nouveau_texte[i] = lettre_finale
i += 1
nouveau_texte = str_convert(nouveau_texte)
return nouveau_texte
# print(alphabet_francais)
# print(trouver_frequence_texte(texte2))
# print(texte2)
alphabet_decode = ['z', 'b', 'd', 'n', 'e', 'm', 'l', 'h', 's', 'j', 'i', 'h', 'g', 'a', 'r', 'p', 'p', 'r', 'o', 't', 't', 'c', 'f', 'e', 'u', 'y']
# Obtenu par essai et erreur (en testant la fonction substituer_lettre en boucle)
def decode_substitution(texte, alphabet):
"""Effectue une substitution par rapport à un alphabet donné."""
nouveau_texte = []
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
for lettre in texte:
a = False
i = 0
if lettre == " " or lettre == ":" or lettre == "," or lettre == "?" or lettre == "." or lettre == "2" or lettre == "6" or lettre == "'":
nouveau_texte.append(lettre)
else:
while a == False:
if lettre == alphabet_francais[i][1]:
nouveau_texte.append(alphabet[i])
a = True
else:
i += 1
if i == 26:
i = 0
texte_sub = str_convert(nouveau_texte)
return texte_sub
texte2_decode = "le prochain fichier est code par un mot de passe de taille inconnu et contient l'indice. les lettres du mot de passe permettent de décaler les lettres du message original modulo 26. seules les lettres de a a z sont chiffrees."
# print(decode_substitution(texte2, alphabet_decode))
def position_lettre(lettre):
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(alphabet_liste)):
if lettre == alphabet_liste[i]:
return i
def decaler_les_lettres(texte, clef):
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
elif liste_texte[i] == " ":
a = 0
else:
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def decaler_les_lettres_sans_espace(texte, clef):
| liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
elif liste_texte[i] == " ":
pass
else:
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def decaler_les_lettres_en_bourrin(texte, clef):
# Celui-là marche
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def creer_clef_lettre(taille):
alphabet = "abcdefghijklmnopqrstuvwxyz"
for i in range(taille):
clef = [j for j in alphabet]
for y in range(i):
clef = [x + j for j in alphabet for x in clef]
return clef
liste_des_clef = creer_clef_lettre(4)
#for j in range(len(liste_des_clef)):
# coucou = decaler_les_lettres(texte3,liste_des_clef[j])
# if "bravo a" in coucou:
# print(coucou, liste_des_clef[j])
#for j in range(len(liste_des_clef)):
# coucou = decaler_les_lettres_sans_espace(texte3,liste_des_clef[j])
# if "bravo a" in coucou:
# print(coucou, liste_des_clef[j])
for j in range(len(liste_des_clef)):
coucou = decaler_les_lettres_en_bourrin(texte3,liste_des_clef[j])
if "bravo a" in coucou:
print(coucou, liste_des_clef[j])
# Pour "bravo a" j'avais essayé "grace a" au depart mais cela n'avait pas fonctionne, donc j'ai essaye "bravo a" et ca a marche
texte3_decode = "bravo a l'aide de l'indice vous avez reussi a casser ce code et a finir ce devoir. le dernier texte est pour les braves, regardez vous dans un miroir, en etes vous un ?"
# On regarde le texte dans un miroir -> on retourne chaque phrase (ou vers ?)
def retourner_texte(texte):
texte_a_lenvers = list(texte)
for i in range(len(texte)):
texte_a_lenvers[i] = texte[-i-1]
return str_convert(texte_a_lenvers)
texte4 = ["jeqeqecvnf suozvb jfk muj",
"dfjr fmy rvuqsk ve",
"itajtd mifwz nnrt",
"imtrvp zuh srzmzbqz tepr zn",
"tmsnirt imtrvp nec hw",
"dzpqj tjf pdecpr zl jr",
"ptejnt ekpb iu b",
"iiuyu iy ijz surg rjs ttsn",
"votp ac hw rzpuen jozw",
"rvwdvx jbo nirscyjv fi",
"svmkyw ve iaflss yie te",
"teffvv'u riznxjzvv jfk",
"nelrhtjrk dh sivdvjvve",
"yi cvb à jffrds tdp",
"rvwdv sebr onvnqsy zvp",
"zuhjwiM le wmifo wiezib nec",
"triot qmjvr'c onrwz",
"memfqg srq wdaietsq vk"]
texte4_decode = []
texte4_dune_traite = "jeqeqecvnf suozvb jfk muj dfjr fmy rvuqsk ve itajtd mifwz nnrt imtrvp zuh srzmzbqz tepr zn tmsnirt imtrvp nec hw dzpqj tjf pdecpr zl jr ptejnt ekpb iu b iiuyu iy ijz surg rjs ttsn votp ac hw rzpuen jozw rvwdvx jbo nirscyjv fi svmkyw ve iaflss yie te teffvv'u riznxjzvv jfk nelrhtjrk dh sivdvjvve yi cvb à jffrds tdp rvwdv sebr onvnqsy zvp zuhjwiM le wmifo wiezib nec triot qmjvr'c onrwz memfqg srq wdaietsq vk"
#for i in range(len(texte4)):
# texte4_decode.append(decaler_les_lettres_en_bourrin(retourner_texte(texte4[i]), "bravez"))
# texte4_decode.append("\n")
texte4_decode = decaler_les_lettres_en_bourrin(retourner_texte(texte4_dune_traite), "bravez")
# J'ai essayé "brave" et ses dérivés pour la clef (braves, braver, bravons...)
# J'ai d'abord obtenu un truc du genre : je voudrais pas crever avant d'avoir connu les chiens noirs du Mexique qui dorment sans rever les singes à cul nu devoreurs de tropiques les araignees d'argent au nid truffe de bulles
# Puis j'ai abandonné
print(str_convert(texte4_decode))
racine.mainloop()
| liste_texte = list(texte)
| identifier_name |
decrypt.py | import tkinter as tk
texte1 = "kd oqnbgzhm ehbghdq ztqz tm bncd ozq rtarshstshnm zkogzadshptd: bgzptd kdssqd drs qdlokzbdd ozq tmd ztsqd. tshkhrdq kz eqdptdmbd cdr kdssqdr ontq cdbncdq kd ldrrzfd."
texte2 = "gx qosvlnkd wkvlkxo xiu vscx qno yd fsu cx qniix cx unkggx kdvsddyx xu vsdukxdu g'kdckvx. gxi gxuuoxi cy fsu cx qniix qxofxuuxdu cx cxvngxo gxi gxuuoxi cy fxiinmx sokmkdng fscygs 26. ixygxi gxi gxuuoxi cx n n a isdu vlkwwoxxi."
texte3 = "dceuq e n'ehfp cg p'kyhhep uqfw cgiy citudm c gzudiq ni ezhd px c jhptv ep cggsht. kg hdtymdt xdzei gdx rzyq wir mvzxpw, cifcchdb znwd ccyw wy lkcsht, dp isgd uqfw wy ?"
def decalage(lettre_message, lettre_cle):
"""Alors ça c'est la correction mais ça marche pas bien -_-"""
return chr((ord(lettre_message) + ord(lettre_cle))%256)
def dec_texte(texte, cle):
texte_code = ""
t, c = 0, 0
while len(texte_code) < len(texte):
if texte[t] == " " or texte[t] == ":" or texte[t] == "," or texte[t] == "?" or texte[t] == "." or texte[t] == "2" or texte[t] == "6":
texte_code += texte[t]
else:
texte_code += decalage(texte[t], cle[c%len(cle)])
t, c = t + 1, c + 1
if c == len(cle):
c = 0
return texte_code
def chiffre():
resultat.delete(0, tk.END)
if entree_texte.get() == "" or entree_cle.get() == "":
label_res.config(text="Il manque quelque chose en entrée :/")
resultat.insert(0, dec_texte(entree_texte.get(), entree_cle.get()))
def chiffre_deux(texte, clef):
resultat.delete(0, tk.END)
resultat.insert(0, dec_texte(texte, clef))
return dec_texte(texte, clef)
def dechiffrement(texte_a_decoder, cle):
texte_decode = ""
t, c = 0, 0
while len(texte_decode) < len(texte_a_decoder):
if texte_a_decoder[t] == " " or texte_a_decoder[t] == ":" or texte_a_decoder[t] == "," or texte_a_decoder[t] == "?" or texte_a_decoder[t] == "." or texte_a_decoder[t] == "2" or texte_a_decoder[t] == "6":
texte_decode += texte_a_decoder[t]
else:
texte_decode += decalage(texte_a_decoder[t], chr(256-ord(cle[c%len(cle)])))
t, c = t + 1, c + 1
if c == len(cle):
c = 0
return texte_decode
def dechiffre():
resultat.delete(0, tk.END)
if entree_texte.get() == "" or entree_cle.get() == "":
label_res.config(text = "Il manque quelque chose en entrée :/")
else:
resultat.insert(0, dechiffrement(entree_texte.get(), entree_cle.get()))
def chiffre_xor(lettre_message, lettre_cle):
return chr(ord(lettre_message) ^ ord(lettre_cle))
def creer_liste_clef(taille):
possibilite_clef = [chr(i) for i in range(256)]
for i in range(taille):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in possibilite_clef] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in possibilite_clef for x in a]
return a
def brute_force_cesar(texte_a_trouver):
"""Trouve une clé longue de 1 et une suite de caractères qui
correspondent au texte à trouver. Pas sûr de l'idée."""
alphabet = "abcdefghijklmnopqrstuvwxyz :,?.0123456789'"
# Tous les caractères possibles / vus dans les textes à décoder
liste_car = []
# Liste vide qui contiendra les combinaisons de caractères possibles
texte_test = ""
# Texte codé à comparé avec le texte initial
l = 0 # Index de liste_car
m = 0 # Index de la clef
t = 1 # Taille clef
clef = creer_liste_clef(t)
for i in range(len(texte_a_trouver)):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in alphabet] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in | ar + a # On ajoute ce qu'on a trouvé à notre liste
while texte_test != texte_a_trouver:
# Tant qu'on code pas pareil que ce qu'on cherche
texte_test = chiffre_deux(str(liste_car[l]), clef)
# On teste l'encodage avec le texte et la clef actuels
l += 1 # On regarde le caractère suivant
if l >= len(liste_car): # Ne pas aller out of range
l = 0
m += 1 # On change la clef
if m == 256:
t += 1
clef = creer_liste_clef(t)
m += -1
entree_cle.insert(0, clef[m])
return ord(clef[m])
racine=tk.Tk()
racine.title("Cryptographie")
entree_texte = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_texte.grid(row = 0, column = 0)
entree_cle = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_cle.grid(row = 1, column = 0)
label_texte = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer le message ici.")
label_texte.grid(row = 0, column = 1)
label_cle = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer la clé ici.")
label_cle.grid(row = 1, column = 1)
bouton_coder=tk.Button(racine, text="Chiffrer texte",fg="black", width=15, command=chiffre)
bouton_coder.grid(row=2, column=0)
bouton_decoder=tk.Button(racine,text="Déchiffrer texte",fg="black", width=15,command=dechiffre)
bouton_decoder.grid(row=2, column=1)
resultat=tk.Entry(racine,width = 50, font = ("helvetica", "20"))
resultat.grid(row=3,column=0)
label_res=tk.Label(racine,font = ("helvetica", "20"), text="Résultat ici.")
label_res.grid(row = 3, column=1)
# print("La clef est : chr", brute_force_cesar("kd"))
# La clé trouvée est chr 255 -> ÿ (pb lié au code initial ?)
texte1_decode = "le prochain fichier aura un code par substitution alphabetique: chaque lettre est remplacee par une autre. utiliser la frequence des lettres pour decoder le message."
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
def str_convert(liste):
"""Renvoie un texte depuis une liste qui contient un texte découpé"""
texte_str = ""
for a in range(len(liste)):
texte_str += str(liste[a])
return texte_str
def trouver_frequence_lettre(lettre, texte):
"""Trouve le nombre d'itérations d'une lettre dans un texte"""
# Oui le nom porte à confusion
compteur = 0
for i in texte:
if i == lettre:
compteur += 1
return compteur
def trouver_frequence_texte(texte):
"""Applique la fonction précédante pour toutes les lettres"""
# On obtient vraiment une fréquence cette fois
alphabet_francais_texte = [0 for i in range(26)]
for i in range(26):
alphabet_francais_texte[i] = [alphabet_francais_texte[i], chr(i + 97)]
for i in range(26):
alphabet_francais_texte[i][0] = round((trouver_frequence_lettre(chr(i + 97), texte) * 100) / len(texte), 3)
alphabet_francais_texte.sort(reverse=True)
return alphabet_francais_texte
def substituer(texte): # Donne une vague idée mais pas efficace, mal codé
"""Remplace les lettres selon leur fréquence, en se basant sur
la fréquence moyenne d'apparition des lettres dans
l'alphabet français."""
alphabet = "abcdefghijklmnopqrstuvwxyz"
texte_lettre_only = []
for car in texte:
if car in alphabet:
texte_lettre_only.append(car)
nouveau_texte = list(texte)
j = 0
alphabet_francais_texte = trouver_frequence_texte(texte_lettre_only)
alphabet_francais.sort(reverse=True)
for lettre in texte_lettre_only:
a = False
i = 0
if nouveau_texte[j] == " " or nouveau_texte[j] == ":" or nouveau_texte[j] == "," or nouveau_texte[j] == "?" or nouveau_texte[j] == "." or nouveau_texte[j] == "2" or nouveau_texte[j] == "6":
j += 1
else:
while a == False:
if lettre == alphabet_francais_texte[i][1]:
nouveau_texte[j] = alphabet_francais[i][1]
a = True
else:
i += 1
if i == 26:
i = 0
j += 1
texte_str = str_convert(nouveau_texte)
return texte_str
# print(substituer(texte2))
def substituer_lettre(texte, lettre_initiale, lettre_finale):
nouveau_texte = list(texte)
i = 0
for lettre in texte:
if lettre == lettre_initiale:
nouveau_texte[i] = lettre_finale
i += 1
nouveau_texte = str_convert(nouveau_texte)
return nouveau_texte
# print(alphabet_francais)
# print(trouver_frequence_texte(texte2))
# print(texte2)
alphabet_decode = ['z', 'b', 'd', 'n', 'e', 'm', 'l', 'h', 's', 'j', 'i', 'h', 'g', 'a', 'r', 'p', 'p', 'r', 'o', 't', 't', 'c', 'f', 'e', 'u', 'y']
# Obtenu par essai et erreur (en testant la fonction substituer_lettre en boucle)
def decode_substitution(texte, alphabet):
"""Effectue une substitution par rapport à un alphabet donné."""
nouveau_texte = []
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
for lettre in texte:
a = False
i = 0
if lettre == " " or lettre == ":" or lettre == "," or lettre == "?" or lettre == "." or lettre == "2" or lettre == "6" or lettre == "'":
nouveau_texte.append(lettre)
else:
while a == False:
if lettre == alphabet_francais[i][1]:
nouveau_texte.append(alphabet[i])
a = True
else:
i += 1
if i == 26:
i = 0
texte_sub = str_convert(nouveau_texte)
return texte_sub
texte2_decode = "le prochain fichier est code par un mot de passe de taille inconnu et contient l'indice. les lettres du mot de passe permettent de décaler les lettres du message original modulo 26. seules les lettres de a a z sont chiffrees."
# print(decode_substitution(texte2, alphabet_decode))
def position_lettre(lettre):
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(alphabet_liste)):
if lettre == alphabet_liste[i]:
return i
def decaler_les_lettres(texte, clef):
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
elif liste_texte[i] == " ":
a = 0
else:
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def decaler_les_lettres_sans_espace(texte, clef):
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
elif liste_texte[i] == " ":
pass
else:
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def decaler_les_lettres_en_bourrin(texte, clef):
# Celui-là marche
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def creer_clef_lettre(taille):
alphabet = "abcdefghijklmnopqrstuvwxyz"
for i in range(taille):
clef = [j for j in alphabet]
for y in range(i):
clef = [x + j for j in alphabet for x in clef]
return clef
liste_des_clef = creer_clef_lettre(4)
#for j in range(len(liste_des_clef)):
# coucou = decaler_les_lettres(texte3,liste_des_clef[j])
# if "bravo a" in coucou:
# print(coucou, liste_des_clef[j])
#for j in range(len(liste_des_clef)):
# coucou = decaler_les_lettres_sans_espace(texte3,liste_des_clef[j])
# if "bravo a" in coucou:
# print(coucou, liste_des_clef[j])
for j in range(len(liste_des_clef)):
coucou = decaler_les_lettres_en_bourrin(texte3,liste_des_clef[j])
if "bravo a" in coucou:
print(coucou, liste_des_clef[j])
# Pour "bravo a" j'avais essayé "grace a" au depart mais cela n'avait pas fonctionne, donc j'ai essaye "bravo a" et ca a marche
texte3_decode = "bravo a l'aide de l'indice vous avez reussi a casser ce code et a finir ce devoir. le dernier texte est pour les braves, regardez vous dans un miroir, en etes vous un ?"
# On regarde le texte dans un miroir -> on retourne chaque phrase (ou vers ?)
def retourner_texte(texte):
texte_a_lenvers = list(texte)
for i in range(len(texte)):
texte_a_lenvers[i] = texte[-i-1]
return str_convert(texte_a_lenvers)
texte4 = ["jeqeqecvnf suozvb jfk muj",
"dfjr fmy rvuqsk ve",
"itajtd mifwz nnrt",
"imtrvp zuh srzmzbqz tepr zn",
"tmsnirt imtrvp nec hw",
"dzpqj tjf pdecpr zl jr",
"ptejnt ekpb iu b",
"iiuyu iy ijz surg rjs ttsn",
"votp ac hw rzpuen jozw",
"rvwdvx jbo nirscyjv fi",
"svmkyw ve iaflss yie te",
"teffvv'u riznxjzvv jfk",
"nelrhtjrk dh sivdvjvve",
"yi cvb à jffrds tdp",
"rvwdv sebr onvnqsy zvp",
"zuhjwiM le wmifo wiezib nec",
"triot qmjvr'c onrwz",
"memfqg srq wdaietsq vk"]
texte4_decode = []
texte4_dune_traite = "jeqeqecvnf suozvb jfk muj dfjr fmy rvuqsk ve itajtd mifwz nnrt imtrvp zuh srzmzbqz tepr zn tmsnirt imtrvp nec hw dzpqj tjf pdecpr zl jr ptejnt ekpb iu b iiuyu iy ijz surg rjs ttsn votp ac hw rzpuen jozw rvwdvx jbo nirscyjv fi svmkyw ve iaflss yie te teffvv'u riznxjzvv jfk nelrhtjrk dh sivdvjvve yi cvb à jffrds tdp rvwdv sebr onvnqsy zvp zuhjwiM le wmifo wiezib nec triot qmjvr'c onrwz memfqg srq wdaietsq vk"
#for i in range(len(texte4)):
# texte4_decode.append(decaler_les_lettres_en_bourrin(retourner_texte(texte4[i]), "bravez"))
# texte4_decode.append("\n")
texte4_decode = decaler_les_lettres_en_bourrin(retourner_texte(texte4_dune_traite), "bravez")
# J'ai essayé "brave" et ses dérivés pour la clef (braves, braver, bravons...)
# J'ai d'abord obtenu un truc du genre : je voudrais pas crever avant d'avoir connu les chiens noirs du Mexique qui dorment sans rever les singes à cul nu devoreurs de tropiques les araignees d'argent au nid truffe de bulles
# Puis j'ai abandonné
print(str_convert(texte4_decode))
racine.mainloop()
| alphabet for x in a]
# On ajoute chaque caractère à chaque caractère
# (pas sûr de cette phrase -_-)
liste_car = liste_c | conditional_block |
decrypt.py | import tkinter as tk
texte1 = "kd oqnbgzhm ehbghdq ztqz tm bncd ozq rtarshstshnm zkogzadshptd: bgzptd kdssqd drs qdlokzbdd ozq tmd ztsqd. tshkhrdq kz eqdptdmbd cdr kdssqdr ontq cdbncdq kd ldrrzfd."
texte2 = "gx qosvlnkd wkvlkxo xiu vscx qno yd fsu cx qniix cx unkggx kdvsddyx xu vsdukxdu g'kdckvx. gxi gxuuoxi cy fsu cx qniix qxofxuuxdu cx cxvngxo gxi gxuuoxi cy fxiinmx sokmkdng fscygs 26. ixygxi gxi gxuuoxi cx n n a isdu vlkwwoxxi."
texte3 = "dceuq e n'ehfp cg p'kyhhep uqfw cgiy citudm c gzudiq ni ezhd px c jhptv ep cggsht. kg hdtymdt xdzei gdx rzyq wir mvzxpw, cifcchdb znwd ccyw wy lkcsht, dp isgd uqfw wy ?"
def decalage(lettre_message, lettre_cle):
"""Alors ça c'est la correction mais ça marche pas bien -_-"""
return chr((ord(lettre_message) + ord(lettre_cle))%256)
def dec_texte(texte, cle):
texte_code = ""
t, c = 0, 0
while len(texte_code) < len(texte):
if texte[t] == " " or texte[t] == ":" or texte[t] == "," or texte[t] == "?" or texte[t] == "." or texte[t] == "2" or texte[t] == "6":
texte_code += texte[t]
else:
texte_code += decalage(texte[t], cle[c%len(cle)])
t, c = t + 1, c + 1
if c == len(cle):
c = 0
return texte_code
def chiffre():
resultat.delete(0, tk.END)
if entree_texte.get() == "" or entree_cle.get() == "":
label_res.config(text="Il manque quelque chose en entrée :/")
resultat.insert(0, dec_texte(entree_texte.get(), entree_cle.get()))
def chiffre_deux(texte, clef):
resultat.delete(0, tk.END)
resultat.insert(0, dec_texte(texte, clef))
return dec_texte(texte, clef)
def dechiffrement(texte_a_decoder, cle):
texte_decode = ""
t, c = 0, 0
while len(texte_decode) < len(texte_a_decoder):
if texte_a_decoder[t] == " " or texte_a_decoder[t] == ":" or texte_a_decoder[t] == "," or texte_a_decoder[t] == "?" or texte_a_decoder[t] == "." or texte_a_decoder[t] == "2" or texte_a_decoder[t] == "6":
texte_decode += texte_a_decoder[t]
else:
texte_decode += decalage(texte_a_decoder[t], chr(256-ord(cle[c%len(cle)])))
t, c = t + 1, c + 1
if c == len(cle):
c = 0
return texte_decode
def dechiffre():
resultat.delete(0, tk.END)
if entree_texte.get() == "" or entree_cle.get() == "":
label_res.config(text = "Il manque quelque chose en entrée :/")
else:
resultat.insert(0, dechiffrement(entree_texte.get(), entree_cle.get()))
def chiffre_xor(lettre_message, lettre_cle):
return chr(ord(lettre_message) ^ ord(lettre_cle))
def creer_liste_clef(taille):
possibilite_clef = [chr(i) for i in range(256)]
for i in range(taille):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in possibilite_clef] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in possibilite_clef for x in a]
return a
def brute_force_cesar(texte_a_trouver):
"""Trouve une clé longue de 1 et une suite de caractères qui
correspondent au texte à trouver. Pas sûr de l'idée."""
alphabet = "abcdefghijklmnopqrstuvwxyz :,?.0123456789'"
# Tous les caractères possibles / vus dans les textes à décoder
liste_car = []
# Liste vide qui contiendra les combinaisons de caractères possibles
texte_test = ""
# Texte codé à comparé avec le texte initial
l = 0 # Index de liste_car
m = 0 # Index de la clef
t = 1 # Taille clef
clef = creer_liste_clef(t)
for i in range(len(texte_a_trouver)):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in alphabet] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in alphabet for x in a]
# On ajoute chaque caractère à chaque caractère
# (pas sûr de cette phrase -_-)
liste_car = liste_car + a # On ajoute ce qu'on a trouvé à notre liste
while texte_test != texte_a_trouver:
# Tant qu'on code pas pareil que ce qu'on cherche
texte_test = chiffre_deux(str(liste_car[l]), clef)
# On teste l'encodage avec le texte et la clef actuels
l += 1 # On regarde le caractère suivant
if l >= len(liste_car): # Ne pas aller out of range
l = 0
m += 1 # On change la clef
if m == 256:
t += 1
clef = creer_liste_clef(t)
m += -1
entree_cle.insert(0, clef[m])
return ord(clef[m])
racine=tk.Tk()
racine.title("Cryptographie")
entree_texte = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_texte.grid(row = 0, column = 0)
entree_cle = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_cle.grid(row = 1, column = 0)
label_texte = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer le message ici.")
label_texte.grid(row = 0, column = 1)
label_cle = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer la clé ici.")
label_cle.grid(row = 1, column = 1)
bouton_coder=tk.Button(racine, text="Chiffrer texte",fg="black", width=15, command=chiffre)
bouton_coder.grid(row=2, column=0)
bouton_decoder=tk.Button(racine,text="Déchiffrer texte",fg="black", width=15,command=dechiffre)
bouton_decoder.grid(row=2, column=1)
resultat=tk.Entry(racine,width = 50, font = ("helvetica", "20"))
resultat.grid(row=3,column=0)
label_res=tk.Label(racine,font = ("helvetica", "20"), text="Résultat ici.")
label_res.grid(row = 3, column=1)
# print("La clef est : chr", brute_force_cesar("kd"))
# La clé trouvée est chr 255 -> ÿ (pb lié au code initial ?)
texte1_decode = "le prochain fichier aura un code par substitution alphabetique: chaque lettre est remplacee par une autre. utiliser la frequence des lettres pour decoder le message."
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
def str_convert(liste):
"""Renvoie un texte depuis une liste qui contient un texte découpé"""
texte_str = ""
for a in range(len(liste)):
texte_str += str(liste[a])
return texte_str
def trouver_frequence_lettre(lettre, texte):
"""Trouve le nombre d'itérations d'une lettre dans un texte"""
# Oui le nom porte à confusion
compteur = 0
for i in texte:
if i == lettre:
compteur += 1
return compteur
def trouver_frequence_texte(texte):
"""Applique la fonction précédante pour toutes les lettres"""
# On obtient vraiment une fréquence cette fois
alphabet_francais_texte = [0 for i in range(26)]
for i in range(26):
alphabet_francais_texte[i] = [alphabet_francais_texte[i], chr(i + 97)]
for i in range(26):
alphabet_francais_texte[i][0] = round((trouver_frequence_lettre(chr(i + 97), texte) * 100) / len(texte), 3)
alphabet_francais_texte.sort(reverse=True)
return alphabet_francais_texte
def substituer(texte): # Donne une vague idée mais pas efficace, mal codé
"""Remplace les lettres selon leur fréquence, en se basant sur
la fréquence moyenne d'apparition des lettres dans
l'alphabet français."""
alphabet = "abcdefghijklmnopqrstuvwxyz"
texte_lettre_only = []
for car in texte:
if car in alphabet:
texte_lettre_only.append(car)
nouveau_texte = list(texte)
j = 0
alphabet_francais_texte = trouver_frequence_texte(texte_lettre_only)
alphabet_francais.sort(reverse=True)
for lettre in texte_lettre_only:
a = False
i = 0
if nouveau_texte[j] == " " or nouveau_texte[j] == ":" or nouveau_texte[j] == "," or nouveau_texte[j] == "?" or nouveau_texte[j] == "." or nouveau_texte[j] == "2" or nouveau_texte[j] == "6":
j += 1
else:
while a == False:
if lettre == alphabet_francais_texte[i][1]:
nouveau_texte[j] = alphabet_francais[i][1]
a = True
else:
i += 1
if i == 26:
i = 0
j += 1
texte_str = str_convert(nouveau_texte)
|
def substituer_lettre(texte, lettre_initiale, lettre_finale):
nouveau_texte = list(texte)
i = 0
for lettre in texte:
if lettre == lettre_initiale:
nouveau_texte[i] = lettre_finale
i += 1
nouveau_texte = str_convert(nouveau_texte)
return nouveau_texte
# print(alphabet_francais)
# print(trouver_frequence_texte(texte2))
# print(texte2)
alphabet_decode = ['z', 'b', 'd', 'n', 'e', 'm', 'l', 'h', 's', 'j', 'i', 'h', 'g', 'a', 'r', 'p', 'p', 'r', 'o', 't', 't', 'c', 'f', 'e', 'u', 'y']
# Obtenu par essai et erreur (en testant la fonction substituer_lettre en boucle)
def decode_substitution(texte, alphabet):
"""Effectue une substitution par rapport à un alphabet donné."""
nouveau_texte = []
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
for lettre in texte:
a = False
i = 0
if lettre == " " or lettre == ":" or lettre == "," or lettre == "?" or lettre == "." or lettre == "2" or lettre == "6" or lettre == "'":
nouveau_texte.append(lettre)
else:
while a == False:
if lettre == alphabet_francais[i][1]:
nouveau_texte.append(alphabet[i])
a = True
else:
i += 1
if i == 26:
i = 0
texte_sub = str_convert(nouveau_texte)
return texte_sub
texte2_decode = "le prochain fichier est code par un mot de passe de taille inconnu et contient l'indice. les lettres du mot de passe permettent de décaler les lettres du message original modulo 26. seules les lettres de a a z sont chiffrees."
# print(decode_substitution(texte2, alphabet_decode))
def position_lettre(lettre):
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(alphabet_liste)):
if lettre == alphabet_liste[i]:
return i
def decaler_les_lettres(texte, clef):
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
elif liste_texte[i] == " ":
a = 0
else:
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def decaler_les_lettres_sans_espace(texte, clef):
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
elif liste_texte[i] == " ":
pass
else:
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def decaler_les_lettres_en_bourrin(texte, clef):
# Celui-là marche
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def creer_clef_lettre(taille):
alphabet = "abcdefghijklmnopqrstuvwxyz"
for i in range(taille):
clef = [j for j in alphabet]
for y in range(i):
clef = [x + j for j in alphabet for x in clef]
return clef
liste_des_clef = creer_clef_lettre(4)
#for j in range(len(liste_des_clef)):
# coucou = decaler_les_lettres(texte3,liste_des_clef[j])
# if "bravo a" in coucou:
# print(coucou, liste_des_clef[j])
#for j in range(len(liste_des_clef)):
# coucou = decaler_les_lettres_sans_espace(texte3,liste_des_clef[j])
# if "bravo a" in coucou:
# print(coucou, liste_des_clef[j])
for j in range(len(liste_des_clef)):
coucou = decaler_les_lettres_en_bourrin(texte3,liste_des_clef[j])
if "bravo a" in coucou:
print(coucou, liste_des_clef[j])
# Pour "bravo a" j'avais essayé "grace a" au depart mais cela n'avait pas fonctionne, donc j'ai essaye "bravo a" et ca a marche
texte3_decode = "bravo a l'aide de l'indice vous avez reussi a casser ce code et a finir ce devoir. le dernier texte est pour les braves, regardez vous dans un miroir, en etes vous un ?"
# On regarde le texte dans un miroir -> on retourne chaque phrase (ou vers ?)
def retourner_texte(texte):
texte_a_lenvers = list(texte)
for i in range(len(texte)):
texte_a_lenvers[i] = texte[-i-1]
return str_convert(texte_a_lenvers)
texte4 = ["jeqeqecvnf suozvb jfk muj",
"dfjr fmy rvuqsk ve",
"itajtd mifwz nnrt",
"imtrvp zuh srzmzbqz tepr zn",
"tmsnirt imtrvp nec hw",
"dzpqj tjf pdecpr zl jr",
"ptejnt ekpb iu b",
"iiuyu iy ijz surg rjs ttsn",
"votp ac hw rzpuen jozw",
"rvwdvx jbo nirscyjv fi",
"svmkyw ve iaflss yie te",
"teffvv'u riznxjzvv jfk",
"nelrhtjrk dh sivdvjvve",
"yi cvb à jffrds tdp",
"rvwdv sebr onvnqsy zvp",
"zuhjwiM le wmifo wiezib nec",
"triot qmjvr'c onrwz",
"memfqg srq wdaietsq vk"]
texte4_decode = []
texte4_dune_traite = "jeqeqecvnf suozvb jfk muj dfjr fmy rvuqsk ve itajtd mifwz nnrt imtrvp zuh srzmzbqz tepr zn tmsnirt imtrvp nec hw dzpqj tjf pdecpr zl jr ptejnt ekpb iu b iiuyu iy ijz surg rjs ttsn votp ac hw rzpuen jozw rvwdvx jbo nirscyjv fi svmkyw ve iaflss yie te teffvv'u riznxjzvv jfk nelrhtjrk dh sivdvjvve yi cvb à jffrds tdp rvwdv sebr onvnqsy zvp zuhjwiM le wmifo wiezib nec triot qmjvr'c onrwz memfqg srq wdaietsq vk"
#for i in range(len(texte4)):
# texte4_decode.append(decaler_les_lettres_en_bourrin(retourner_texte(texte4[i]), "bravez"))
# texte4_decode.append("\n")
texte4_decode = decaler_les_lettres_en_bourrin(retourner_texte(texte4_dune_traite), "bravez")
# J'ai essayé "brave" et ses dérivés pour la clef (braves, braver, bravons...)
# J'ai d'abord obtenu un truc du genre : je voudrais pas crever avant d'avoir connu les chiens noirs du Mexique qui dorment sans rever les singes à cul nu devoreurs de tropiques les araignees d'argent au nid truffe de bulles
# Puis j'ai abandonné
print(str_convert(texte4_decode))
racine.mainloop() | return texte_str
# print(substituer(texte2))
| random_line_split |
decrypt.py | import tkinter as tk
texte1 = "kd oqnbgzhm ehbghdq ztqz tm bncd ozq rtarshstshnm zkogzadshptd: bgzptd kdssqd drs qdlokzbdd ozq tmd ztsqd. tshkhrdq kz eqdptdmbd cdr kdssqdr ontq cdbncdq kd ldrrzfd."
texte2 = "gx qosvlnkd wkvlkxo xiu vscx qno yd fsu cx qniix cx unkggx kdvsddyx xu vsdukxdu g'kdckvx. gxi gxuuoxi cy fsu cx qniix qxofxuuxdu cx cxvngxo gxi gxuuoxi cy fxiinmx sokmkdng fscygs 26. ixygxi gxi gxuuoxi cx n n a isdu vlkwwoxxi."
texte3 = "dceuq e n'ehfp cg p'kyhhep uqfw cgiy citudm c gzudiq ni ezhd px c jhptv ep cggsht. kg hdtymdt xdzei gdx rzyq wir mvzxpw, cifcchdb znwd ccyw wy lkcsht, dp isgd uqfw wy ?"
def decalage(lettre_message, lettre_cle):
"""Alors ça c'est la correction mais ça marche pas bien -_-"""
return chr((ord(lettre_message) + ord(lettre_cle))%256)
def dec_texte(texte, cle):
texte_code = ""
t, c = 0, 0
while len(texte_code) < len(texte):
if texte[t] == " " or texte[t] == ":" or texte[t] == "," or texte[t] == "?" or texte[t] == "." or texte[t] == "2" or texte[t] == "6":
texte_code += texte[t]
else:
texte_code += decalage(texte[t], cle[c%len(cle)])
t, c = t + 1, c + 1
if c == len(cle):
c = 0
return texte_code
def chiffre():
resultat.delete(0, tk.END)
if entree_texte.get() == "" or entree_cle.get() == "":
label_res.config(text="Il manque quelque chose en entrée :/")
resultat.insert(0, dec_texte(entree_texte.get(), entree_cle.get()))
def chiffre_deux(texte, clef):
resultat.delete(0, tk.END)
resultat.insert(0, dec_texte(texte, clef))
return dec_texte(texte, clef)
def dechiffrement(texte_a_decoder, cle):
texte_decode = ""
t, c = 0, 0
while len(texte_decode) < len(texte_a_decoder):
if texte_a_decoder[t] == " " or texte_a_decoder[t] == ":" or texte_a_decoder[t] == "," or texte_a_decoder[t] == "?" or texte_a_decoder[t] == "." or texte_a_decoder[t] == "2" or texte_a_decoder[t] == "6":
texte_decode += texte_a_decoder[t]
else:
texte_decode += decalage(texte_a_decoder[t], chr(256-ord(cle[c%len(cle)])))
t, c = t + 1, c + 1
if c == len(cle):
c = 0
return texte_decode
def dechiffre():
resultat.delete(0, tk.END)
if entree_texte.get() == "" or entree_cle.get() == "":
label_res.config(text = "Il manque quelque chose en entrée :/")
else:
resultat.insert(0, dechiffrement(entree_texte.get(), entree_cle.get()))
def chiffre_xor(lettre_message, lettre_cle):
return chr(ord(lettre_message) ^ ord(lettre_cle))
def creer_liste_clef(taille):
possibilite_clef = [chr(i) for i in range(256)]
for i in range(taille):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in possibilite_clef] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in possibilite_clef for x in a]
return a
def brute_force_cesar(texte_a_trouver):
"""Trouve une clé longue de 1 et une suite de caractères qui
correspondent au texte à trouver. Pas sûr de l'idée."""
alphabet = "abcdefghijklmnopqrstuvwxyz :,?.0123456789'"
# Tous les caractères possibles / vus dans les textes à décoder
liste_car = []
# Liste vide qui contiendra les combinaisons de caractères possibles
texte_test = ""
# Texte codé à comparé avec le texte initial
l = 0 # Index de liste_car
m = 0 # Index de la clef
t = 1 # Taille clef
clef = creer_liste_clef(t)
for i in range(len(texte_a_trouver)):
# On crée une liste de toutes les combinaisons possibles
a = [j for j in alphabet] # On ajoute notre alphabet à a
for y in range(i):
a = [x + j for j in alphabet for x in a]
# On ajoute chaque caractère à chaque caractère
# (pas sûr de cette phrase -_-)
liste_car = liste_car + a # On ajoute ce qu'on a trouvé à notre liste
while texte_test != texte_a_trouver:
# Tant qu'on code pas pareil que ce qu'on cherche
texte_test = chiffre_deux(str(liste_car[l]), clef)
# On teste l'encodage avec le texte et la clef actuels
l += 1 # On regarde le caractère suivant
if l >= len(liste_car): # Ne pas aller out of range
l = 0
m += 1 # On change la clef
if m == 256:
t += 1
clef = creer_liste_clef(t)
m += -1
entree_cle.insert(0, clef[m])
return ord(clef[m])
racine=tk.Tk()
racine.title("Cryptographie")
entree_texte = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_texte.grid(row = 0, column = 0)
entree_cle = tk.Entry(racine, width = 50, font = ("helvetica", "20"))
entree_cle.grid(row = 1, column = 0)
label_texte = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer le message ici.")
label_texte.grid(row = 0, column = 1)
label_cle = tk.Label(racine,font = ("helvetica", "20"), text = "Entrer la clé ici.")
label_cle.grid(row = 1, column = 1)
bouton_coder=tk.Button(racine, text="Chiffrer texte",fg="black", width=15, command=chiffre)
bouton_coder.grid(row=2, column=0)
bouton_decoder=tk.Button(racine,text="Déchiffrer texte",fg="black", width=15,command=dechiffre)
bouton_decoder.grid(row=2, column=1)
resultat=tk.Entry(racine,width = 50, font = ("helvetica", "20"))
resultat.grid(row=3,column=0)
label_res=tk.Label(racine,font = ("helvetica", "20"), text="Résultat ici.")
label_res.grid(row = 3, column=1)
# print("La clef est : chr", brute_force_cesar("kd"))
# La clé trouvée est chr 255 -> ÿ (pb lié au code initial ?)
texte1_decode = "le prochain fichier aura un code par substitution alphabetique: chaque lettre est remplacee par une autre. utiliser la frequence des lettres pour decoder le message."
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
def str_convert(liste):
"""Renvoie un texte depuis une lis | ettre, texte):
"""Trouve le nombre d'itérations d'une lettre dans un texte"""
# Oui le nom porte à confusion
compteur = 0
for i in texte:
if i == lettre:
compteur += 1
return compteur
def trouver_frequence_texte(texte):
"""Applique la fonction précédante pour toutes les lettres"""
# On obtient vraiment une fréquence cette fois
alphabet_francais_texte = [0 for i in range(26)]
for i in range(26):
alphabet_francais_texte[i] = [alphabet_francais_texte[i], chr(i + 97)]
for i in range(26):
alphabet_francais_texte[i][0] = round((trouver_frequence_lettre(chr(i + 97), texte) * 100) / len(texte), 3)
alphabet_francais_texte.sort(reverse=True)
return alphabet_francais_texte
def substituer(texte): # Donne une vague idée mais pas efficace, mal codé
"""Remplace les lettres selon leur fréquence, en se basant sur
la fréquence moyenne d'apparition des lettres dans
l'alphabet français."""
alphabet = "abcdefghijklmnopqrstuvwxyz"
texte_lettre_only = []
for car in texte:
if car in alphabet:
texte_lettre_only.append(car)
nouveau_texte = list(texte)
j = 0
alphabet_francais_texte = trouver_frequence_texte(texte_lettre_only)
alphabet_francais.sort(reverse=True)
for lettre in texte_lettre_only:
a = False
i = 0
if nouveau_texte[j] == " " or nouveau_texte[j] == ":" or nouveau_texte[j] == "," or nouveau_texte[j] == "?" or nouveau_texte[j] == "." or nouveau_texte[j] == "2" or nouveau_texte[j] == "6":
j += 1
else:
while a == False:
if lettre == alphabet_francais_texte[i][1]:
nouveau_texte[j] = alphabet_francais[i][1]
a = True
else:
i += 1
if i == 26:
i = 0
j += 1
texte_str = str_convert(nouveau_texte)
return texte_str
# print(substituer(texte2))
def substituer_lettre(texte, lettre_initiale, lettre_finale):
nouveau_texte = list(texte)
i = 0
for lettre in texte:
if lettre == lettre_initiale:
nouveau_texte[i] = lettre_finale
i += 1
nouveau_texte = str_convert(nouveau_texte)
return nouveau_texte
# print(alphabet_francais)
# print(trouver_frequence_texte(texte2))
# print(texte2)
alphabet_decode = ['z', 'b', 'd', 'n', 'e', 'm', 'l', 'h', 's', 'j', 'i', 'h', 'g', 'a', 'r', 'p', 'p', 'r', 'o', 't', 't', 'c', 'f', 'e', 'u', 'y']
# Obtenu par essai et erreur (en testant la fonction substituer_lettre en boucle)
def decode_substitution(texte, alphabet):
"""Effectue une substitution par rapport à un alphabet donné."""
nouveau_texte = []
alphabet_francais = [[7.11, 'a'], [1.14, 'b'], [3.18, 'c'], [3.67, 'd'], [12.10, 'e'], [1.11, 'f'], [1.23, 'g'], [1.11, 'h'], [6.59, 'i'], [0.34, 'j'], [0.29, 'k'], [4.96, 'l'], [2.62, 'm'], [6.39, 'n'], [5.02, 'o'], [2.49, 'p'], [0.65, 'q'], [6.07, 'r'], [6.51, 's'], [5.92, 't'], [4.49, 'u'], [1.11, 'v'], [0.17, 'w'], [0.38, 'x'], [0.46, 'y'], [0.15, 'z']]
for lettre in texte:
a = False
i = 0
if lettre == " " or lettre == ":" or lettre == "," or lettre == "?" or lettre == "." or lettre == "2" or lettre == "6" or lettre == "'":
nouveau_texte.append(lettre)
else:
while a == False:
if lettre == alphabet_francais[i][1]:
nouveau_texte.append(alphabet[i])
a = True
else:
i += 1
if i == 26:
i = 0
texte_sub = str_convert(nouveau_texte)
return texte_sub
texte2_decode = "le prochain fichier est code par un mot de passe de taille inconnu et contient l'indice. les lettres du mot de passe permettent de décaler les lettres du message original modulo 26. seules les lettres de a a z sont chiffrees."
# print(decode_substitution(texte2, alphabet_decode))
def position_lettre(lettre):
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(alphabet_liste)):
if lettre == alphabet_liste[i]:
return i
def decaler_les_lettres(texte, clef):
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
elif liste_texte[i] == " ":
a = 0
else:
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def decaler_les_lettres_sans_espace(texte, clef):
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
elif liste_texte[i] == " ":
pass
else:
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def decaler_les_lettres_en_bourrin(texte, clef):
# Celui-là marche
liste_texte = list(texte)
liste_clef = list(clef)
a = 0
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_liste = list(alphabet)
for i in range(len(liste_texte)):
if liste_texte[i] in alphabet:
if position_lettre(liste_texte[i])+position_lettre(liste_clef[a]) < 0:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])+position_lettre(liste_clef[a])]
else:
liste_texte[i] = alphabet_liste[position_lettre(liste_texte[i])-position_lettre(liste_clef[a])]
a += 1
if a == len(clef):
a = 0
return str_convert(liste_texte)
def creer_clef_lettre(taille):
alphabet = "abcdefghijklmnopqrstuvwxyz"
for i in range(taille):
clef = [j for j in alphabet]
for y in range(i):
clef = [x + j for j in alphabet for x in clef]
return clef
liste_des_clef = creer_clef_lettre(4)
#for j in range(len(liste_des_clef)):
# coucou = decaler_les_lettres(texte3,liste_des_clef[j])
# if "bravo a" in coucou:
# print(coucou, liste_des_clef[j])
#for j in range(len(liste_des_clef)):
# coucou = decaler_les_lettres_sans_espace(texte3,liste_des_clef[j])
# if "bravo a" in coucou:
# print(coucou, liste_des_clef[j])
for j in range(len(liste_des_clef)):
coucou = decaler_les_lettres_en_bourrin(texte3,liste_des_clef[j])
if "bravo a" in coucou:
print(coucou, liste_des_clef[j])
# Pour "bravo a" j'avais essayé "grace a" au depart mais cela n'avait pas fonctionne, donc j'ai essaye "bravo a" et ca a marche
texte3_decode = "bravo a l'aide de l'indice vous avez reussi a casser ce code et a finir ce devoir. le dernier texte est pour les braves, regardez vous dans un miroir, en etes vous un ?"
# On regarde le texte dans un miroir -> on retourne chaque phrase (ou vers ?)
def retourner_texte(texte):
texte_a_lenvers = list(texte)
for i in range(len(texte)):
texte_a_lenvers[i] = texte[-i-1]
return str_convert(texte_a_lenvers)
texte4 = ["jeqeqecvnf suozvb jfk muj",
"dfjr fmy rvuqsk ve",
"itajtd mifwz nnrt",
"imtrvp zuh srzmzbqz tepr zn",
"tmsnirt imtrvp nec hw",
"dzpqj tjf pdecpr zl jr",
"ptejnt ekpb iu b",
"iiuyu iy ijz surg rjs ttsn",
"votp ac hw rzpuen jozw",
"rvwdvx jbo nirscyjv fi",
"svmkyw ve iaflss yie te",
"teffvv'u riznxjzvv jfk",
"nelrhtjrk dh sivdvjvve",
"yi cvb à jffrds tdp",
"rvwdv sebr onvnqsy zvp",
"zuhjwiM le wmifo wiezib nec",
"triot qmjvr'c onrwz",
"memfqg srq wdaietsq vk"]
texte4_decode = []
texte4_dune_traite = "jeqeqecvnf suozvb jfk muj dfjr fmy rvuqsk ve itajtd mifwz nnrt imtrvp zuh srzmzbqz tepr zn tmsnirt imtrvp nec hw dzpqj tjf pdecpr zl jr ptejnt ekpb iu b iiuyu iy ijz surg rjs ttsn votp ac hw rzpuen jozw rvwdvx jbo nirscyjv fi svmkyw ve iaflss yie te teffvv'u riznxjzvv jfk nelrhtjrk dh sivdvjvve yi cvb à jffrds tdp rvwdv sebr onvnqsy zvp zuhjwiM le wmifo wiezib nec triot qmjvr'c onrwz memfqg srq wdaietsq vk"
#for i in range(len(texte4)):
# texte4_decode.append(decaler_les_lettres_en_bourrin(retourner_texte(texte4[i]), "bravez"))
# texte4_decode.append("\n")
texte4_decode = decaler_les_lettres_en_bourrin(retourner_texte(texte4_dune_traite), "bravez")
# J'ai essayé "brave" et ses dérivés pour la clef (braves, braver, bravons...)
# J'ai d'abord obtenu un truc du genre : je voudrais pas crever avant d'avoir connu les chiens noirs du Mexique qui dorment sans rever les singes à cul nu devoreurs de tropiques les araignees d'argent au nid truffe de bulles
# Puis j'ai abandonné
print(str_convert(texte4_decode))
racine.mainloop()
| te qui contient un texte découpé"""
texte_str = ""
for a in range(len(liste)):
texte_str += str(liste[a])
return texte_str
def trouver_frequence_lettre(l | identifier_body |
dashboard.go | package main
import (
"fmt"
"math"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"strings"
"unicode"
"github.com/AlecAivazis/survey/v2"
"github.com/pbnjay/memory"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/crowdsecurity/crowdsec/pkg/metabase"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
)
var (
metabaseUser = "[email protected]"
metabasePassword string
metabaseDbPath string
metabaseConfigPath string
metabaseConfigFolder = "metabase/"
metabaseConfigFile = "metabase.yaml"
metabaseImage = "metabase/metabase:v0.46.6.1"
/**/
metabaseListenAddress = "127.0.0.1"
metabaseListenPort = "3000"
metabaseContainerID = "crowdsec-metabase"
crowdsecGroup = "crowdsec"
forceYes bool
/*informations needed to setup a random password on user's behalf*/
)
func NewDashboardCmd() *cobra.Command {
/* ---- UPDATE COMMAND */
var cmdDashboard = &cobra.Command{
Use: "dashboard [command]",
Short: "Manage your metabase dashboard container [requires local API]",
Long: `Install/Start/Stop/Remove a metabase container exposing dashboard and metrics.
Note: This command requires database direct access, so is intended to be run on Local API/master.
`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
Example: `
cscli dashboard setup
cscli dashboard start
cscli dashboard stop
cscli dashboard remove
`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := require.LAPI(csConfig); err != nil {
return err
}
if err := metabase.TestAvailability(); err != nil {
return err
}
metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder)
metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile)
if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil {
return err
}
if err := require.DB(csConfig); err != nil {
return err
}
/*
Old container name was "/crowdsec-metabase" but podman doesn't
allow '/' in container name. We do this check to not break
existing dashboard setup.
*/
if !metabase.IsContainerExist(metabaseContainerID) {
oldContainerID := fmt.Sprintf("/%s", metabaseContainerID)
if metabase.IsContainerExist(oldContainerID) {
metabaseContainerID = oldContainerID
}
}
return nil
},
}
cmdDashboard.AddCommand(NewDashboardSetupCmd())
cmdDashboard.AddCommand(NewDashboardStartCmd())
cmdDashboard.AddCommand(NewDashboardStopCmd())
cmdDashboard.AddCommand(NewDashboardShowPasswordCmd())
cmdDashboard.AddCommand(NewDashboardRemoveCmd())
return cmdDashboard
}
func NewDashboardSetupCmd() *cobra.Command {
var force bool
var cmdDashSetup = &cobra.Command{
Use: "setup",
Short: "Setup a metabase container.",
Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard setup
cscli dashboard setup --listen 0.0.0.0
cscli dashboard setup -l 0.0.0.0 -p 443 --password <password>
`,
RunE: func(cmd *cobra.Command, args []string) error {
if metabaseDbPath == "" {
metabaseDbPath = csConfig.ConfigPaths.DataDir
}
if metabasePassword == "" {
isValid := passwordIsValid(metabasePassword)
for !isValid {
metabasePassword = generatePassword(16)
isValid = passwordIsValid(metabasePassword)
}
}
if err := checkSystemMemory(&forceYes); err != nil {
return err
}
warnIfNotLoopback(metabaseListenAddress)
if err := disclaimer(&forceYes); err != nil {
return err
}
dockerGroup, err := checkGroups(&forceYes)
if err != nil {
return err
}
mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage)
if err != nil {
return err
}
if err := mb.DumpConfig(metabaseConfigPath); err != nil {
return err
}
log.Infof("Metabase is ready")
fmt.Println()
fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL)
fmt.Printf("\tusername : '%s'\n", mb.Config.Username)
fmt.Printf("\tpassword : '%s'\n", mb.Config.Password)
return nil
},
}
cmdDashSetup.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files")
cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container")
cmdDashSetup.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container")
cmdDashSetup.Flags().StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use")
cmdDashSetup.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container")
cmdDashSetup.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
//cmdDashSetup.Flags().StringVarP(&metabaseUser, "user", "u", "[email protected]", "metabase user")
cmdDashSetup.Flags().StringVar(&metabasePassword, "password", "", "metabase password")
return cmdDashSetup
}
func NewDashboardStartCmd() *cobra.Command {
var cmdDashStart = &cobra.Command{
Use: "start",
Short: "Start the metabase container.",
Long: `Stats the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID)
if err != nil {
return err
}
warnIfNotLoopback(mb.Config.ListenAddr)
if err := disclaimer(&forceYes); err != nil {
return err
}
if err := mb.Container.Start(); err != nil {
return fmt.Errorf("failed to start metabase container : %s", err)
}
log.Infof("Started metabase")
log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort)
return nil
},
}
cmdDashStart.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashStart
}
func NewDashboardStopCmd() *cobra.Command {
var cmdDashStop = &cobra.Command{
Use: "stop",
Short: "Stops the metabase container.",
Long: `Stops the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := metabase.StopContainer(metabaseContainerID); err != nil {
return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err)
}
return nil
},
}
return cmdDashStop
}
func NewDashboardShowPasswordCmd() *cobra.Command {
var cmdDashShowPassword = &cobra.Command{Use: "show-password",
Short: "displays password of metabase.",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
log.Printf("'%s'", m.Config.Password)
return nil
},
}
return cmdDashShowPassword
}
func NewDashboardRemoveCmd() *cobra.Command {
var force bool
var cmdDashRemove = &cobra.Command{
Use: "remove",
Short: "removes the metabase container.",
Long: `removes the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard remove
cscli dashboard remove --force
`,
RunE: func(cmd *cobra.Command, args []string) error {
if !forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Do you really want to remove crowdsec dashboard? (all your changes will be lost)",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to force: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
}
if metabase.IsContainerExist(metabaseContainerID) {
log.Debugf("Stopping container %s", metabaseContainerID)
if err := metabase.StopContainer(metabaseContainerID); err != nil {
log.Warningf("unable to stop container '%s': %s", metabaseContainerID, err)
}
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil { // if group exist, remove it
groupDelCmd, err := exec.LookPath("groupdel")
if err != nil {
return fmt.Errorf("unable to find 'groupdel' command, can't continue")
}
groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}}
if err := groupDel.Run(); err != nil {
log.Warnf("unable to delete group '%s': %s", dockerGroup, err)
}
}
log.Debugf("Removing container %s", metabaseContainerID)
if err := metabase.RemoveContainer(metabaseContainerID); err != nil {
log.Warnf("unable to remove container '%s': %s", metabaseContainerID, err)
}
log.Infof("container %s stopped & removed", metabaseContainerID)
}
log.Debugf("Removing metabase db %s", csConfig.ConfigPaths.DataDir)
if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil {
log.Warnf("failed to remove metabase internal db : %s", err)
}
if force {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
if err := metabase.RemoveImageContainer(m.Config.Image); err != nil {
if !strings.Contains(err.Error(), "No such image") {
return fmt.Errorf("removing docker image: %s", err)
}
}
}
return nil
},
}
cmdDashRemove.Flags().BoolVarP(&force, "force", "f", false, "Remove also the metabase image")
cmdDashRemove.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashRemove
}
func passwordIsValid(password string) bool {
hasDigit := false
for _, j := range password {
if unicode.IsDigit(j) {
hasDigit = true
break
}
}
if !hasDigit || len(password) < 6 {
return false
}
return true
}
func checkSystemMemory(forceYes *bool) error {
totMem := memory.TotalMemory()
if totMem >= uint64(math.Pow(2, 30)) {
return nil
}
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Metabase requires 1-2GB of RAM, your system is below this requirement continue ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask about RAM check: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
return nil
}
log.Warn("Metabase requires 1-2GB of RAM, your system is below this requirement")
return nil
}
func warnIfNotLoopback(addr string) {
if addr == "127.0.0.1" || addr == "::1" |
log.Warnf("You are potentially exposing your metabase port to the internet (addr: %s), please consider using a reverse proxy", addr)
}
func disclaimer(forceYes *bool) error {
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "CrowdSec takes no responsibility for the security of your metabase instance. Do you accept these responsibilities ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to question: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to responsibilities")
}
return nil
}
log.Warn("CrowdSec takes no responsibility for the security of your metabase instance. You used force yes, so you accept this disclaimer")
return nil
}
func checkGroups(forceYes *bool) (*user.Group, error) {
groupExist := false
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil {
groupExist = true
}
if !groupExist {
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: fmt.Sprintf("For metabase docker to be able to access SQLite file we need to add a new group called '%s' to the system, is it ok for you ?", crowdsecGroup),
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return dockerGroup, fmt.Errorf("unable to ask to question: %s", err)
}
if !answer {
return dockerGroup, fmt.Errorf("unable to continue without creating '%s' group", crowdsecGroup)
}
}
groupAddCmd, err := exec.LookPath("groupadd")
if err != nil {
return dockerGroup, fmt.Errorf("unable to find 'groupadd' command, can't continue")
}
groupAdd := &exec.Cmd{Path: groupAddCmd, Args: []string{groupAddCmd, crowdsecGroup}}
if err := groupAdd.Run(); err != nil {
return dockerGroup, fmt.Errorf("unable to add group '%s': %s", dockerGroup, err)
}
dockerGroup, err = user.LookupGroup(crowdsecGroup)
if err != nil {
return dockerGroup, fmt.Errorf("unable to lookup '%s' group: %+v", dockerGroup, err)
}
}
intID, err := strconv.Atoi(dockerGroup.Gid)
if err != nil {
return dockerGroup, fmt.Errorf("unable to convert group ID to int: %s", err)
}
if err := os.Chown(csConfig.DbConfig.DbPath, 0, intID); err != nil {
return dockerGroup, fmt.Errorf("unable to chown sqlite db file '%s': %s", csConfig.DbConfig.DbPath, err)
}
return dockerGroup, nil
}
| {
return
} | conditional_block |
dashboard.go | package main
import (
"fmt"
"math"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"strings"
"unicode"
"github.com/AlecAivazis/survey/v2"
"github.com/pbnjay/memory"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/crowdsecurity/crowdsec/pkg/metabase"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
)
var (
metabaseUser = "[email protected]"
metabasePassword string
metabaseDbPath string
metabaseConfigPath string
metabaseConfigFolder = "metabase/"
metabaseConfigFile = "metabase.yaml"
metabaseImage = "metabase/metabase:v0.46.6.1"
/**/
metabaseListenAddress = "127.0.0.1"
metabaseListenPort = "3000"
metabaseContainerID = "crowdsec-metabase"
crowdsecGroup = "crowdsec"
forceYes bool
/*informations needed to setup a random password on user's behalf*/
)
func NewDashboardCmd() *cobra.Command {
/* ---- UPDATE COMMAND */
var cmdDashboard = &cobra.Command{
Use: "dashboard [command]",
Short: "Manage your metabase dashboard container [requires local API]",
Long: `Install/Start/Stop/Remove a metabase container exposing dashboard and metrics.
Note: This command requires database direct access, so is intended to be run on Local API/master.
`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
Example: `
cscli dashboard setup
cscli dashboard start
cscli dashboard stop
cscli dashboard remove
`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := require.LAPI(csConfig); err != nil {
return err
}
if err := metabase.TestAvailability(); err != nil {
return err
}
metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder)
metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile)
if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil {
return err
}
if err := require.DB(csConfig); err != nil {
return err
}
/*
Old container name was "/crowdsec-metabase" but podman doesn't
allow '/' in container name. We do this check to not break
existing dashboard setup.
*/
if !metabase.IsContainerExist(metabaseContainerID) {
oldContainerID := fmt.Sprintf("/%s", metabaseContainerID)
if metabase.IsContainerExist(oldContainerID) {
metabaseContainerID = oldContainerID
}
}
return nil
},
}
cmdDashboard.AddCommand(NewDashboardSetupCmd())
cmdDashboard.AddCommand(NewDashboardStartCmd())
cmdDashboard.AddCommand(NewDashboardStopCmd())
cmdDashboard.AddCommand(NewDashboardShowPasswordCmd())
cmdDashboard.AddCommand(NewDashboardRemoveCmd())
return cmdDashboard
}
func NewDashboardSetupCmd() *cobra.Command {
var force bool
var cmdDashSetup = &cobra.Command{
Use: "setup",
Short: "Setup a metabase container.",
Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard setup
cscli dashboard setup --listen 0.0.0.0
cscli dashboard setup -l 0.0.0.0 -p 443 --password <password>
`,
RunE: func(cmd *cobra.Command, args []string) error {
if metabaseDbPath == "" {
metabaseDbPath = csConfig.ConfigPaths.DataDir
}
if metabasePassword == "" {
isValid := passwordIsValid(metabasePassword)
for !isValid {
metabasePassword = generatePassword(16)
isValid = passwordIsValid(metabasePassword)
}
}
if err := checkSystemMemory(&forceYes); err != nil {
return err
}
warnIfNotLoopback(metabaseListenAddress)
if err := disclaimer(&forceYes); err != nil {
return err
}
dockerGroup, err := checkGroups(&forceYes)
if err != nil {
return err
}
mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage)
if err != nil {
return err
}
if err := mb.DumpConfig(metabaseConfigPath); err != nil {
return err
}
log.Infof("Metabase is ready")
fmt.Println()
fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL)
fmt.Printf("\tusername : '%s'\n", mb.Config.Username)
fmt.Printf("\tpassword : '%s'\n", mb.Config.Password)
return nil
},
}
cmdDashSetup.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files")
cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container")
cmdDashSetup.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container")
cmdDashSetup.Flags().StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use")
cmdDashSetup.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container")
cmdDashSetup.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
//cmdDashSetup.Flags().StringVarP(&metabaseUser, "user", "u", "[email protected]", "metabase user")
cmdDashSetup.Flags().StringVar(&metabasePassword, "password", "", "metabase password")
return cmdDashSetup
}
func NewDashboardStartCmd() *cobra.Command {
var cmdDashStart = &cobra.Command{
Use: "start",
Short: "Start the metabase container.",
Long: `Stats the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID)
if err != nil {
return err
}
warnIfNotLoopback(mb.Config.ListenAddr)
if err := disclaimer(&forceYes); err != nil {
return err
}
if err := mb.Container.Start(); err != nil {
return fmt.Errorf("failed to start metabase container : %s", err)
}
log.Infof("Started metabase")
log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort)
return nil
},
}
cmdDashStart.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashStart
}
func NewDashboardStopCmd() *cobra.Command {
var cmdDashStop = &cobra.Command{
Use: "stop",
Short: "Stops the metabase container.",
Long: `Stops the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := metabase.StopContainer(metabaseContainerID); err != nil {
return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err)
}
return nil
},
}
return cmdDashStop
}
func NewDashboardShowPasswordCmd() *cobra.Command {
var cmdDashShowPassword = &cobra.Command{Use: "show-password",
Short: "displays password of metabase.",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
log.Printf("'%s'", m.Config.Password)
return nil
},
}
return cmdDashShowPassword
}
func | () *cobra.Command {
var force bool
var cmdDashRemove = &cobra.Command{
Use: "remove",
Short: "removes the metabase container.",
Long: `removes the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard remove
cscli dashboard remove --force
`,
RunE: func(cmd *cobra.Command, args []string) error {
if !forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Do you really want to remove crowdsec dashboard? (all your changes will be lost)",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to force: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
}
if metabase.IsContainerExist(metabaseContainerID) {
log.Debugf("Stopping container %s", metabaseContainerID)
if err := metabase.StopContainer(metabaseContainerID); err != nil {
log.Warningf("unable to stop container '%s': %s", metabaseContainerID, err)
}
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil { // if group exist, remove it
groupDelCmd, err := exec.LookPath("groupdel")
if err != nil {
return fmt.Errorf("unable to find 'groupdel' command, can't continue")
}
groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}}
if err := groupDel.Run(); err != nil {
log.Warnf("unable to delete group '%s': %s", dockerGroup, err)
}
}
log.Debugf("Removing container %s", metabaseContainerID)
if err := metabase.RemoveContainer(metabaseContainerID); err != nil {
log.Warnf("unable to remove container '%s': %s", metabaseContainerID, err)
}
log.Infof("container %s stopped & removed", metabaseContainerID)
}
log.Debugf("Removing metabase db %s", csConfig.ConfigPaths.DataDir)
if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil {
log.Warnf("failed to remove metabase internal db : %s", err)
}
if force {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
if err := metabase.RemoveImageContainer(m.Config.Image); err != nil {
if !strings.Contains(err.Error(), "No such image") {
return fmt.Errorf("removing docker image: %s", err)
}
}
}
return nil
},
}
cmdDashRemove.Flags().BoolVarP(&force, "force", "f", false, "Remove also the metabase image")
cmdDashRemove.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashRemove
}
func passwordIsValid(password string) bool {
hasDigit := false
for _, j := range password {
if unicode.IsDigit(j) {
hasDigit = true
break
}
}
if !hasDigit || len(password) < 6 {
return false
}
return true
}
func checkSystemMemory(forceYes *bool) error {
totMem := memory.TotalMemory()
if totMem >= uint64(math.Pow(2, 30)) {
return nil
}
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Metabase requires 1-2GB of RAM, your system is below this requirement continue ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask about RAM check: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
return nil
}
log.Warn("Metabase requires 1-2GB of RAM, your system is below this requirement")
return nil
}
func warnIfNotLoopback(addr string) {
if addr == "127.0.0.1" || addr == "::1" {
return
}
log.Warnf("You are potentially exposing your metabase port to the internet (addr: %s), please consider using a reverse proxy", addr)
}
func disclaimer(forceYes *bool) error {
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "CrowdSec takes no responsibility for the security of your metabase instance. Do you accept these responsibilities ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to question: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to responsibilities")
}
return nil
}
log.Warn("CrowdSec takes no responsibility for the security of your metabase instance. You used force yes, so you accept this disclaimer")
return nil
}
func checkGroups(forceYes *bool) (*user.Group, error) {
groupExist := false
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil {
groupExist = true
}
if !groupExist {
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: fmt.Sprintf("For metabase docker to be able to access SQLite file we need to add a new group called '%s' to the system, is it ok for you ?", crowdsecGroup),
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return dockerGroup, fmt.Errorf("unable to ask to question: %s", err)
}
if !answer {
return dockerGroup, fmt.Errorf("unable to continue without creating '%s' group", crowdsecGroup)
}
}
groupAddCmd, err := exec.LookPath("groupadd")
if err != nil {
return dockerGroup, fmt.Errorf("unable to find 'groupadd' command, can't continue")
}
groupAdd := &exec.Cmd{Path: groupAddCmd, Args: []string{groupAddCmd, crowdsecGroup}}
if err := groupAdd.Run(); err != nil {
return dockerGroup, fmt.Errorf("unable to add group '%s': %s", dockerGroup, err)
}
dockerGroup, err = user.LookupGroup(crowdsecGroup)
if err != nil {
return dockerGroup, fmt.Errorf("unable to lookup '%s' group: %+v", dockerGroup, err)
}
}
intID, err := strconv.Atoi(dockerGroup.Gid)
if err != nil {
return dockerGroup, fmt.Errorf("unable to convert group ID to int: %s", err)
}
if err := os.Chown(csConfig.DbConfig.DbPath, 0, intID); err != nil {
return dockerGroup, fmt.Errorf("unable to chown sqlite db file '%s': %s", csConfig.DbConfig.DbPath, err)
}
return dockerGroup, nil
}
| NewDashboardRemoveCmd | identifier_name |
dashboard.go | package main
import (
"fmt"
"math"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"strings"
"unicode"
"github.com/AlecAivazis/survey/v2"
"github.com/pbnjay/memory"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/crowdsecurity/crowdsec/pkg/metabase"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
)
var (
metabaseUser = "[email protected]"
metabasePassword string
metabaseDbPath string
metabaseConfigPath string
metabaseConfigFolder = "metabase/"
metabaseConfigFile = "metabase.yaml"
metabaseImage = "metabase/metabase:v0.46.6.1"
/**/
metabaseListenAddress = "127.0.0.1"
metabaseListenPort = "3000"
metabaseContainerID = "crowdsec-metabase"
crowdsecGroup = "crowdsec"
forceYes bool
/*informations needed to setup a random password on user's behalf*/
)
func NewDashboardCmd() *cobra.Command {
/* ---- UPDATE COMMAND */
var cmdDashboard = &cobra.Command{
Use: "dashboard [command]",
Short: "Manage your metabase dashboard container [requires local API]",
Long: `Install/Start/Stop/Remove a metabase container exposing dashboard and metrics.
Note: This command requires database direct access, so is intended to be run on Local API/master.
`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
Example: `
cscli dashboard setup
cscli dashboard start
cscli dashboard stop
cscli dashboard remove
`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := require.LAPI(csConfig); err != nil {
return err
}
if err := metabase.TestAvailability(); err != nil {
return err
}
metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder)
metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile)
if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil {
return err
}
if err := require.DB(csConfig); err != nil {
return err
}
/*
Old container name was "/crowdsec-metabase" but podman doesn't
allow '/' in container name. We do this check to not break
existing dashboard setup.
*/
if !metabase.IsContainerExist(metabaseContainerID) {
oldContainerID := fmt.Sprintf("/%s", metabaseContainerID)
if metabase.IsContainerExist(oldContainerID) {
metabaseContainerID = oldContainerID
}
}
return nil
},
}
cmdDashboard.AddCommand(NewDashboardSetupCmd())
cmdDashboard.AddCommand(NewDashboardStartCmd())
cmdDashboard.AddCommand(NewDashboardStopCmd())
cmdDashboard.AddCommand(NewDashboardShowPasswordCmd())
cmdDashboard.AddCommand(NewDashboardRemoveCmd())
return cmdDashboard
}
func NewDashboardSetupCmd() *cobra.Command {
var force bool
var cmdDashSetup = &cobra.Command{
Use: "setup",
Short: "Setup a metabase container.",
Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard setup
cscli dashboard setup --listen 0.0.0.0
cscli dashboard setup -l 0.0.0.0 -p 443 --password <password>
`,
RunE: func(cmd *cobra.Command, args []string) error {
if metabaseDbPath == "" {
metabaseDbPath = csConfig.ConfigPaths.DataDir
}
if metabasePassword == "" {
isValid := passwordIsValid(metabasePassword)
for !isValid {
metabasePassword = generatePassword(16)
isValid = passwordIsValid(metabasePassword)
}
}
if err := checkSystemMemory(&forceYes); err != nil {
return err
}
warnIfNotLoopback(metabaseListenAddress)
if err := disclaimer(&forceYes); err != nil {
return err
}
dockerGroup, err := checkGroups(&forceYes)
if err != nil {
return err
}
mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage)
if err != nil {
return err
}
if err := mb.DumpConfig(metabaseConfigPath); err != nil {
return err
}
log.Infof("Metabase is ready")
fmt.Println()
fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL)
fmt.Printf("\tusername : '%s'\n", mb.Config.Username)
fmt.Printf("\tpassword : '%s'\n", mb.Config.Password)
return nil
},
}
cmdDashSetup.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files")
cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container")
cmdDashSetup.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container")
cmdDashSetup.Flags().StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use")
cmdDashSetup.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container")
cmdDashSetup.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
//cmdDashSetup.Flags().StringVarP(&metabaseUser, "user", "u", "[email protected]", "metabase user")
cmdDashSetup.Flags().StringVar(&metabasePassword, "password", "", "metabase password")
return cmdDashSetup
}
func NewDashboardStartCmd() *cobra.Command {
var cmdDashStart = &cobra.Command{
Use: "start",
Short: "Start the metabase container.",
Long: `Stats the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID)
if err != nil {
return err
}
warnIfNotLoopback(mb.Config.ListenAddr)
if err := disclaimer(&forceYes); err != nil {
return err
}
if err := mb.Container.Start(); err != nil {
return fmt.Errorf("failed to start metabase container : %s", err)
}
log.Infof("Started metabase")
log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort)
return nil
},
}
cmdDashStart.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashStart
}
func NewDashboardStopCmd() *cobra.Command {
var cmdDashStop = &cobra.Command{
Use: "stop",
Short: "Stops the metabase container.",
Long: `Stops the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := metabase.StopContainer(metabaseContainerID); err != nil {
return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err)
}
return nil
},
}
return cmdDashStop
}
func NewDashboardShowPasswordCmd() *cobra.Command {
var cmdDashShowPassword = &cobra.Command{Use: "show-password",
Short: "displays password of metabase.",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
log.Printf("'%s'", m.Config.Password)
return nil
},
}
return cmdDashShowPassword
}
func NewDashboardRemoveCmd() *cobra.Command {
var force bool
var cmdDashRemove = &cobra.Command{
Use: "remove",
Short: "removes the metabase container.",
Long: `removes the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard remove
cscli dashboard remove --force
`,
RunE: func(cmd *cobra.Command, args []string) error {
if !forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Do you really want to remove crowdsec dashboard? (all your changes will be lost)",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to force: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
}
if metabase.IsContainerExist(metabaseContainerID) {
log.Debugf("Stopping container %s", metabaseContainerID)
if err := metabase.StopContainer(metabaseContainerID); err != nil {
log.Warningf("unable to stop container '%s': %s", metabaseContainerID, err)
}
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil { // if group exist, remove it
groupDelCmd, err := exec.LookPath("groupdel")
if err != nil {
return fmt.Errorf("unable to find 'groupdel' command, can't continue")
}
groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}}
if err := groupDel.Run(); err != nil {
log.Warnf("unable to delete group '%s': %s", dockerGroup, err)
}
}
log.Debugf("Removing container %s", metabaseContainerID)
if err := metabase.RemoveContainer(metabaseContainerID); err != nil {
log.Warnf("unable to remove container '%s': %s", metabaseContainerID, err)
}
log.Infof("container %s stopped & removed", metabaseContainerID)
}
log.Debugf("Removing metabase db %s", csConfig.ConfigPaths.DataDir)
if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil {
log.Warnf("failed to remove metabase internal db : %s", err)
}
if force {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
if err := metabase.RemoveImageContainer(m.Config.Image); err != nil {
if !strings.Contains(err.Error(), "No such image") {
return fmt.Errorf("removing docker image: %s", err)
}
}
}
return nil
},
}
cmdDashRemove.Flags().BoolVarP(&force, "force", "f", false, "Remove also the metabase image")
cmdDashRemove.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashRemove
}
func passwordIsValid(password string) bool {
hasDigit := false
for _, j := range password {
if unicode.IsDigit(j) {
hasDigit = true
break
}
}
if !hasDigit || len(password) < 6 {
return false
}
return true
}
func checkSystemMemory(forceYes *bool) error {
totMem := memory.TotalMemory()
if totMem >= uint64(math.Pow(2, 30)) {
return nil
}
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Metabase requires 1-2GB of RAM, your system is below this requirement continue ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask about RAM check: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
return nil
}
log.Warn("Metabase requires 1-2GB of RAM, your system is below this requirement")
return nil
}
func warnIfNotLoopback(addr string) {
if addr == "127.0.0.1" || addr == "::1" {
return
}
log.Warnf("You are potentially exposing your metabase port to the internet (addr: %s), please consider using a reverse proxy", addr)
}
func disclaimer(forceYes *bool) error {
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "CrowdSec takes no responsibility for the security of your metabase instance. Do you accept these responsibilities ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to question: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to responsibilities")
}
return nil
}
log.Warn("CrowdSec takes no responsibility for the security of your metabase instance. You used force yes, so you accept this disclaimer")
return nil
}
func checkGroups(forceYes *bool) (*user.Group, error) {
groupExist := false
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil {
groupExist = true
}
if !groupExist {
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: fmt.Sprintf("For metabase docker to be able to access SQLite file we need to add a new group called '%s' to the system, is it ok for you ?", crowdsecGroup),
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return dockerGroup, fmt.Errorf("unable to ask to question: %s", err)
}
if !answer {
return dockerGroup, fmt.Errorf("unable to continue without creating '%s' group", crowdsecGroup)
}
}
groupAddCmd, err := exec.LookPath("groupadd")
if err != nil {
return dockerGroup, fmt.Errorf("unable to find 'groupadd' command, can't continue")
} | }
dockerGroup, err = user.LookupGroup(crowdsecGroup)
if err != nil {
return dockerGroup, fmt.Errorf("unable to lookup '%s' group: %+v", dockerGroup, err)
}
}
intID, err := strconv.Atoi(dockerGroup.Gid)
if err != nil {
return dockerGroup, fmt.Errorf("unable to convert group ID to int: %s", err)
}
if err := os.Chown(csConfig.DbConfig.DbPath, 0, intID); err != nil {
return dockerGroup, fmt.Errorf("unable to chown sqlite db file '%s': %s", csConfig.DbConfig.DbPath, err)
}
return dockerGroup, nil
} |
groupAdd := &exec.Cmd{Path: groupAddCmd, Args: []string{groupAddCmd, crowdsecGroup}}
if err := groupAdd.Run(); err != nil {
return dockerGroup, fmt.Errorf("unable to add group '%s': %s", dockerGroup, err) | random_line_split |
dashboard.go | package main
import (
"fmt"
"math"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"strings"
"unicode"
"github.com/AlecAivazis/survey/v2"
"github.com/pbnjay/memory"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/crowdsecurity/crowdsec/pkg/metabase"
"github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require"
)
var (
metabaseUser = "[email protected]"
metabasePassword string
metabaseDbPath string
metabaseConfigPath string
metabaseConfigFolder = "metabase/"
metabaseConfigFile = "metabase.yaml"
metabaseImage = "metabase/metabase:v0.46.6.1"
/**/
metabaseListenAddress = "127.0.0.1"
metabaseListenPort = "3000"
metabaseContainerID = "crowdsec-metabase"
crowdsecGroup = "crowdsec"
forceYes bool
/*informations needed to setup a random password on user's behalf*/
)
func NewDashboardCmd() *cobra.Command {
/* ---- UPDATE COMMAND */
var cmdDashboard = &cobra.Command{
Use: "dashboard [command]",
Short: "Manage your metabase dashboard container [requires local API]",
Long: `Install/Start/Stop/Remove a metabase container exposing dashboard and metrics.
Note: This command requires database direct access, so is intended to be run on Local API/master.
`,
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
Example: `
cscli dashboard setup
cscli dashboard start
cscli dashboard stop
cscli dashboard remove
`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := require.LAPI(csConfig); err != nil {
return err
}
if err := metabase.TestAvailability(); err != nil {
return err
}
metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder)
metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile)
if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil {
return err
}
if err := require.DB(csConfig); err != nil {
return err
}
/*
Old container name was "/crowdsec-metabase" but podman doesn't
allow '/' in container name. We do this check to not break
existing dashboard setup.
*/
if !metabase.IsContainerExist(metabaseContainerID) {
oldContainerID := fmt.Sprintf("/%s", metabaseContainerID)
if metabase.IsContainerExist(oldContainerID) {
metabaseContainerID = oldContainerID
}
}
return nil
},
}
cmdDashboard.AddCommand(NewDashboardSetupCmd())
cmdDashboard.AddCommand(NewDashboardStartCmd())
cmdDashboard.AddCommand(NewDashboardStopCmd())
cmdDashboard.AddCommand(NewDashboardShowPasswordCmd())
cmdDashboard.AddCommand(NewDashboardRemoveCmd())
return cmdDashboard
}
func NewDashboardSetupCmd() *cobra.Command {
var force bool
var cmdDashSetup = &cobra.Command{
Use: "setup",
Short: "Setup a metabase container.",
Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard setup
cscli dashboard setup --listen 0.0.0.0
cscli dashboard setup -l 0.0.0.0 -p 443 --password <password>
`,
RunE: func(cmd *cobra.Command, args []string) error {
if metabaseDbPath == "" {
metabaseDbPath = csConfig.ConfigPaths.DataDir
}
if metabasePassword == "" {
isValid := passwordIsValid(metabasePassword)
for !isValid {
metabasePassword = generatePassword(16)
isValid = passwordIsValid(metabasePassword)
}
}
if err := checkSystemMemory(&forceYes); err != nil {
return err
}
warnIfNotLoopback(metabaseListenAddress)
if err := disclaimer(&forceYes); err != nil {
return err
}
dockerGroup, err := checkGroups(&forceYes)
if err != nil {
return err
}
mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage)
if err != nil {
return err
}
if err := mb.DumpConfig(metabaseConfigPath); err != nil {
return err
}
log.Infof("Metabase is ready")
fmt.Println()
fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL)
fmt.Printf("\tusername : '%s'\n", mb.Config.Username)
fmt.Printf("\tpassword : '%s'\n", mb.Config.Password)
return nil
},
}
cmdDashSetup.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files")
cmdDashSetup.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container")
cmdDashSetup.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container")
cmdDashSetup.Flags().StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use")
cmdDashSetup.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container")
cmdDashSetup.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
//cmdDashSetup.Flags().StringVarP(&metabaseUser, "user", "u", "[email protected]", "metabase user")
cmdDashSetup.Flags().StringVar(&metabasePassword, "password", "", "metabase password")
return cmdDashSetup
}
func NewDashboardStartCmd() *cobra.Command |
func NewDashboardStopCmd() *cobra.Command {
var cmdDashStop = &cobra.Command{
Use: "stop",
Short: "Stops the metabase container.",
Long: `Stops the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
if err := metabase.StopContainer(metabaseContainerID); err != nil {
return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err)
}
return nil
},
}
return cmdDashStop
}
func NewDashboardShowPasswordCmd() *cobra.Command {
var cmdDashShowPassword = &cobra.Command{Use: "show-password",
Short: "displays password of metabase.",
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
log.Printf("'%s'", m.Config.Password)
return nil
},
}
return cmdDashShowPassword
}
func NewDashboardRemoveCmd() *cobra.Command {
var force bool
var cmdDashRemove = &cobra.Command{
Use: "remove",
Short: "removes the metabase container.",
Long: `removes the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
Example: `
cscli dashboard remove
cscli dashboard remove --force
`,
RunE: func(cmd *cobra.Command, args []string) error {
if !forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Do you really want to remove crowdsec dashboard? (all your changes will be lost)",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to force: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
}
if metabase.IsContainerExist(metabaseContainerID) {
log.Debugf("Stopping container %s", metabaseContainerID)
if err := metabase.StopContainer(metabaseContainerID); err != nil {
log.Warningf("unable to stop container '%s': %s", metabaseContainerID, err)
}
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil { // if group exist, remove it
groupDelCmd, err := exec.LookPath("groupdel")
if err != nil {
return fmt.Errorf("unable to find 'groupdel' command, can't continue")
}
groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}}
if err := groupDel.Run(); err != nil {
log.Warnf("unable to delete group '%s': %s", dockerGroup, err)
}
}
log.Debugf("Removing container %s", metabaseContainerID)
if err := metabase.RemoveContainer(metabaseContainerID); err != nil {
log.Warnf("unable to remove container '%s': %s", metabaseContainerID, err)
}
log.Infof("container %s stopped & removed", metabaseContainerID)
}
log.Debugf("Removing metabase db %s", csConfig.ConfigPaths.DataDir)
if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil {
log.Warnf("failed to remove metabase internal db : %s", err)
}
if force {
m := metabase.Metabase{}
if err := m.LoadConfig(metabaseConfigPath); err != nil {
return err
}
if err := metabase.RemoveImageContainer(m.Config.Image); err != nil {
if !strings.Contains(err.Error(), "No such image") {
return fmt.Errorf("removing docker image: %s", err)
}
}
}
return nil
},
}
cmdDashRemove.Flags().BoolVarP(&force, "force", "f", false, "Remove also the metabase image")
cmdDashRemove.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashRemove
}
func passwordIsValid(password string) bool {
hasDigit := false
for _, j := range password {
if unicode.IsDigit(j) {
hasDigit = true
break
}
}
if !hasDigit || len(password) < 6 {
return false
}
return true
}
func checkSystemMemory(forceYes *bool) error {
totMem := memory.TotalMemory()
if totMem >= uint64(math.Pow(2, 30)) {
return nil
}
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "Metabase requires 1-2GB of RAM, your system is below this requirement continue ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask about RAM check: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to continue")
}
return nil
}
log.Warn("Metabase requires 1-2GB of RAM, your system is below this requirement")
return nil
}
func warnIfNotLoopback(addr string) {
if addr == "127.0.0.1" || addr == "::1" {
return
}
log.Warnf("You are potentially exposing your metabase port to the internet (addr: %s), please consider using a reverse proxy", addr)
}
func disclaimer(forceYes *bool) error {
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: "CrowdSec takes no responsibility for the security of your metabase instance. Do you accept these responsibilities ?",
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return fmt.Errorf("unable to ask to question: %s", err)
}
if !answer {
return fmt.Errorf("user stated no to responsibilities")
}
return nil
}
log.Warn("CrowdSec takes no responsibility for the security of your metabase instance. You used force yes, so you accept this disclaimer")
return nil
}
func checkGroups(forceYes *bool) (*user.Group, error) {
groupExist := false
dockerGroup, err := user.LookupGroup(crowdsecGroup)
if err == nil {
groupExist = true
}
if !groupExist {
if !*forceYes {
var answer bool
prompt := &survey.Confirm{
Message: fmt.Sprintf("For metabase docker to be able to access SQLite file we need to add a new group called '%s' to the system, is it ok for you ?", crowdsecGroup),
Default: true,
}
if err := survey.AskOne(prompt, &answer); err != nil {
return dockerGroup, fmt.Errorf("unable to ask to question: %s", err)
}
if !answer {
return dockerGroup, fmt.Errorf("unable to continue without creating '%s' group", crowdsecGroup)
}
}
groupAddCmd, err := exec.LookPath("groupadd")
if err != nil {
return dockerGroup, fmt.Errorf("unable to find 'groupadd' command, can't continue")
}
groupAdd := &exec.Cmd{Path: groupAddCmd, Args: []string{groupAddCmd, crowdsecGroup}}
if err := groupAdd.Run(); err != nil {
return dockerGroup, fmt.Errorf("unable to add group '%s': %s", dockerGroup, err)
}
dockerGroup, err = user.LookupGroup(crowdsecGroup)
if err != nil {
return dockerGroup, fmt.Errorf("unable to lookup '%s' group: %+v", dockerGroup, err)
}
}
intID, err := strconv.Atoi(dockerGroup.Gid)
if err != nil {
return dockerGroup, fmt.Errorf("unable to convert group ID to int: %s", err)
}
if err := os.Chown(csConfig.DbConfig.DbPath, 0, intID); err != nil {
return dockerGroup, fmt.Errorf("unable to chown sqlite db file '%s': %s", csConfig.DbConfig.DbPath, err)
}
return dockerGroup, nil
}
| {
var cmdDashStart = &cobra.Command{
Use: "start",
Short: "Start the metabase container.",
Long: `Stats the metabase container using docker.`,
Args: cobra.ExactArgs(0),
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID)
if err != nil {
return err
}
warnIfNotLoopback(mb.Config.ListenAddr)
if err := disclaimer(&forceYes); err != nil {
return err
}
if err := mb.Container.Start(); err != nil {
return fmt.Errorf("failed to start metabase container : %s", err)
}
log.Infof("Started metabase")
log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort)
return nil
},
}
cmdDashStart.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes")
return cmdDashStart
} | identifier_body |
main.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/pkg/errors"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/urfave/cli/v2"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager"
profilebindingv1alpha1 "sigs.k8s.io/security-profiles-operator/api/profilebinding/v1alpha1"
profilerecording1alpha1 "sigs.k8s.io/security-profiles-operator/api/profilerecording/v1alpha1"
seccompprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/seccompprofile/v1alpha1"
secprofnodestatusv1alpha1 "sigs.k8s.io/security-profiles-operator/api/secprofnodestatus/v1alpha1"
selinuxprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/selinuxprofile/v1alpha1"
"sigs.k8s.io/security-profiles-operator/internal/pkg/config"
"sigs.k8s.io/security-profiles-operator/internal/pkg/controller"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/enricher"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/metrics"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/profilerecorder"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/seccompprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/selinuxprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/server"
nodestatus "sigs.k8s.io/security-profiles-operator/internal/pkg/manager/nodestatus"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/spod"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/workloadannotator"
"sigs.k8s.io/security-profiles-operator/internal/pkg/nonrootenabler"
"sigs.k8s.io/security-profiles-operator/internal/pkg/version"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/binding"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/recording"
)
const (
jsonFlag string = "json"
selinuxFlag string = "with-selinux"
defaultWebhookPort int = 9443
)
var (
sync = time.Second * 30
setupLog = ctrl.Log.WithName("setup")
)
func main() {
ctrl.SetLogger(klogr.New())
app := cli.NewApp() | "to manage their seccomp or AppArmor profiles and apply them to Kubernetes' workloads."
app.Version = version.Get().Version
app.Commands = cli.Commands{
&cli.Command{
Name: "version",
Aliases: []string{"v"},
Usage: "display detailed version information",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: jsonFlag,
Aliases: []string{"j"},
Usage: "print JSON instead of text",
},
},
Action: func(c *cli.Context) error {
v := version.Get()
res := v.String()
if c.Bool(jsonFlag) {
j, err := v.JSONString()
if err != nil {
return errors.Wrap(err, "unable to generate JSON from version info")
}
res = j
}
print(res)
return nil
},
},
&cli.Command{
Name: "manager",
Aliases: []string{"m"},
Usage: "run the manager",
Action: runManager,
},
&cli.Command{
Name: "daemon",
Aliases: []string{"d"},
Usage: "run the daemon",
Action: runDaemon,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: selinuxFlag,
Usage: "Listen for SELinux API resources",
Value: false,
},
},
},
&cli.Command{
Name: "webhook",
Aliases: []string{"w"},
Usage: "run the webhook",
Action: runWebhook,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "port",
Aliases: []string{"p"},
Value: defaultWebhookPort,
Usage: "the port on which to expose the webhook service (default 9443)",
},
},
},
&cli.Command{
Name: "non-root-enabler",
Usage: "run the non root enabler",
Action: runNonRootEnabler,
},
&cli.Command{
Name: "log-enricher",
Aliases: []string{"l"},
Usage: "run the audit's log enricher",
Action: runLogEnricher,
},
}
if err := app.Run(os.Args); err != nil {
setupLog.Error(err, "running security-profiles-operator")
os.Exit(1)
}
}
func printInfo(component string) {
setupLog.Info(
fmt.Sprintf("starting component: %s", component),
version.Get().AsKeyValues()...,
)
}
func runManager(ctx *cli.Context) error {
printInfo("security-profiles-operator")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-lock",
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster manager")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := selinuxprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add selinuxprofile API to scheme")
}
if err := monitoringv1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add ServiceMonitor API to scheme")
}
if err := setupEnabledControllers(ctx.Context, []controller.Controller{
nodestatus.NewController(),
spod.NewController(),
workloadannotator.NewController(),
}, mgr, nil); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting manager")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
setupLog.Info("ending manager")
return nil
}
func setControllerOptionsForNamespaces(opts *ctrl.Options) {
namespace := os.Getenv(config.RestrictNamespaceEnvKey)
// listen globally
if namespace == "" {
opts.Namespace = namespace
return
}
// ensure we listen to our own namespace
if !strings.Contains(namespace, config.GetOperatorNamespace()) {
namespace = namespace + "," + config.GetOperatorNamespace()
}
namespaceList := strings.Split(namespace, ",")
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
// Adding "" adds cluster namespaced resources
if strings.Contains(namespace, ",") {
opts.NewCache = cache.MultiNamespacedCacheBuilder(namespaceList)
} else {
// listen to a specific namespace only
opts.Namespace = namespace
}
}
func getEnabledControllers(ctx *cli.Context) []controller.Controller {
controllers := []controller.Controller{
seccompprofile.NewController(),
profilerecorder.NewController(),
}
if ctx.Bool(selinuxFlag) {
controllers = append(controllers, selinuxprofile.NewController())
}
return controllers
}
func runDaemon(ctx *cli.Context) error {
// security-profiles-operator-daemon
printInfo("spod")
enabledControllers := getEnabledControllers(ctx)
if len(enabledControllers) == 0 {
return errors.New("no controllers enabled")
}
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := ctrl.Options{
SyncPeriod: &sync,
HealthProbeBindAddress: fmt.Sprintf(":%d", config.HealthProbePort),
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create manager")
}
// Setup metrics
met := metrics.New()
if err := met.Register(); err != nil {
return errors.Wrap(err, "register metrics")
}
if err := mgr.AddMetricsExtraHandler(metrics.HandlerPath, met.Handler()); err != nil {
return errors.Wrap(err, "add metrics extra handler")
}
// Setup the GRPC server
if err := server.New(ctrl.Log.WithName("server"), met).Start(); err != nil {
return errors.Wrap(err, "start GRPC server")
}
// This API provides status which is used by both seccomp and selinux
if err := secprofnodestatusv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add per-node Status API to scheme")
}
if err := setupEnabledControllers(ctx.Context, enabledControllers, mgr, met); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting daemon")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "SPOd error")
}
setupLog.Info("ending daemon")
return nil
}
func runLogEnricher(ctx *cli.Context) error {
const component = "log-enricher"
printInfo(component)
return enricher.New(ctrl.Log.WithName(component)).Run()
}
func runNonRootEnabler(ctx *cli.Context) error {
const component = "non-root-enabler"
printInfo(component)
return nonrootenabler.New().Run(ctrl.Log.WithName(component))
}
func runWebhook(ctx *cli.Context) error {
printInfo("security-profiles-operator-webhook")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
port := ctx.Int("port")
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-webhook-lock",
Port: port,
}
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster webhook")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := profilerecording1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilerecording API to scheme")
}
setupLog.Info("registering webhooks")
hookserver := mgr.GetWebhookServer()
binding.RegisterWebhook(hookserver, mgr.GetClient())
recording.RegisterWebhook(hookserver, mgr.GetClient())
sigHandler := ctrl.SetupSignalHandler()
setupLog.Info("starting webhook")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
return nil
}
func setupEnabledControllers(
ctx context.Context,
enabledControllers []controller.Controller,
mgr ctrl.Manager,
met *metrics.Metrics,
) error {
for _, enableCtrl := range enabledControllers {
if enableCtrl.SchemeBuilder() != nil {
if err := enableCtrl.SchemeBuilder().AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add core operator APIs to scheme")
}
}
if err := enableCtrl.Setup(ctx, mgr, met); err != nil {
return errors.Wrapf(err, "setup %s controller", enableCtrl.Name())
}
if met != nil {
if err := mgr.AddHealthzCheck(enableCtrl.Name(), enableCtrl.Healthz); err != nil {
return errors.Wrap(err, "add readiness check to controller")
}
}
}
return nil
} | app.Name = config.OperatorName
app.Usage = "Kubernetes Security Profiles Operator"
app.Description = "The Security Profiles Operator makes it easier for cluster admins " + | random_line_split |
main.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/pkg/errors"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/urfave/cli/v2"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager"
profilebindingv1alpha1 "sigs.k8s.io/security-profiles-operator/api/profilebinding/v1alpha1"
profilerecording1alpha1 "sigs.k8s.io/security-profiles-operator/api/profilerecording/v1alpha1"
seccompprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/seccompprofile/v1alpha1"
secprofnodestatusv1alpha1 "sigs.k8s.io/security-profiles-operator/api/secprofnodestatus/v1alpha1"
selinuxprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/selinuxprofile/v1alpha1"
"sigs.k8s.io/security-profiles-operator/internal/pkg/config"
"sigs.k8s.io/security-profiles-operator/internal/pkg/controller"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/enricher"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/metrics"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/profilerecorder"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/seccompprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/selinuxprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/server"
nodestatus "sigs.k8s.io/security-profiles-operator/internal/pkg/manager/nodestatus"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/spod"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/workloadannotator"
"sigs.k8s.io/security-profiles-operator/internal/pkg/nonrootenabler"
"sigs.k8s.io/security-profiles-operator/internal/pkg/version"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/binding"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/recording"
)
const (
jsonFlag string = "json"
selinuxFlag string = "with-selinux"
defaultWebhookPort int = 9443
)
var (
sync = time.Second * 30
setupLog = ctrl.Log.WithName("setup")
)
func main() {
ctrl.SetLogger(klogr.New())
app := cli.NewApp()
app.Name = config.OperatorName
app.Usage = "Kubernetes Security Profiles Operator"
app.Description = "The Security Profiles Operator makes it easier for cluster admins " +
"to manage their seccomp or AppArmor profiles and apply them to Kubernetes' workloads."
app.Version = version.Get().Version
app.Commands = cli.Commands{
&cli.Command{
Name: "version",
Aliases: []string{"v"},
Usage: "display detailed version information",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: jsonFlag,
Aliases: []string{"j"},
Usage: "print JSON instead of text",
},
},
Action: func(c *cli.Context) error {
v := version.Get()
res := v.String()
if c.Bool(jsonFlag) {
j, err := v.JSONString()
if err != nil {
return errors.Wrap(err, "unable to generate JSON from version info")
}
res = j
}
print(res)
return nil
},
},
&cli.Command{
Name: "manager",
Aliases: []string{"m"},
Usage: "run the manager",
Action: runManager,
},
&cli.Command{
Name: "daemon",
Aliases: []string{"d"},
Usage: "run the daemon",
Action: runDaemon,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: selinuxFlag,
Usage: "Listen for SELinux API resources",
Value: false,
},
},
},
&cli.Command{
Name: "webhook",
Aliases: []string{"w"},
Usage: "run the webhook",
Action: runWebhook,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "port",
Aliases: []string{"p"},
Value: defaultWebhookPort,
Usage: "the port on which to expose the webhook service (default 9443)",
},
},
},
&cli.Command{
Name: "non-root-enabler",
Usage: "run the non root enabler",
Action: runNonRootEnabler,
},
&cli.Command{
Name: "log-enricher",
Aliases: []string{"l"},
Usage: "run the audit's log enricher",
Action: runLogEnricher,
},
}
if err := app.Run(os.Args); err != nil {
setupLog.Error(err, "running security-profiles-operator")
os.Exit(1)
}
}
func printInfo(component string) {
setupLog.Info(
fmt.Sprintf("starting component: %s", component),
version.Get().AsKeyValues()...,
)
}
func runManager(ctx *cli.Context) error {
printInfo("security-profiles-operator")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-lock",
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster manager")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := selinuxprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add selinuxprofile API to scheme")
}
if err := monitoringv1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add ServiceMonitor API to scheme")
}
if err := setupEnabledControllers(ctx.Context, []controller.Controller{
nodestatus.NewController(),
spod.NewController(),
workloadannotator.NewController(),
}, mgr, nil); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting manager")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
setupLog.Info("ending manager")
return nil
}
func setControllerOptionsForNamespaces(opts *ctrl.Options) {
namespace := os.Getenv(config.RestrictNamespaceEnvKey)
// listen globally
if namespace == "" {
opts.Namespace = namespace
return
}
// ensure we listen to our own namespace
if !strings.Contains(namespace, config.GetOperatorNamespace()) {
namespace = namespace + "," + config.GetOperatorNamespace()
}
namespaceList := strings.Split(namespace, ",")
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
// Adding "" adds cluster namespaced resources
if strings.Contains(namespace, ",") {
opts.NewCache = cache.MultiNamespacedCacheBuilder(namespaceList)
} else {
// listen to a specific namespace only
opts.Namespace = namespace
}
}
func | (ctx *cli.Context) []controller.Controller {
controllers := []controller.Controller{
seccompprofile.NewController(),
profilerecorder.NewController(),
}
if ctx.Bool(selinuxFlag) {
controllers = append(controllers, selinuxprofile.NewController())
}
return controllers
}
func runDaemon(ctx *cli.Context) error {
// security-profiles-operator-daemon
printInfo("spod")
enabledControllers := getEnabledControllers(ctx)
if len(enabledControllers) == 0 {
return errors.New("no controllers enabled")
}
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := ctrl.Options{
SyncPeriod: &sync,
HealthProbeBindAddress: fmt.Sprintf(":%d", config.HealthProbePort),
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create manager")
}
// Setup metrics
met := metrics.New()
if err := met.Register(); err != nil {
return errors.Wrap(err, "register metrics")
}
if err := mgr.AddMetricsExtraHandler(metrics.HandlerPath, met.Handler()); err != nil {
return errors.Wrap(err, "add metrics extra handler")
}
// Setup the GRPC server
if err := server.New(ctrl.Log.WithName("server"), met).Start(); err != nil {
return errors.Wrap(err, "start GRPC server")
}
// This API provides status which is used by both seccomp and selinux
if err := secprofnodestatusv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add per-node Status API to scheme")
}
if err := setupEnabledControllers(ctx.Context, enabledControllers, mgr, met); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting daemon")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "SPOd error")
}
setupLog.Info("ending daemon")
return nil
}
func runLogEnricher(ctx *cli.Context) error {
const component = "log-enricher"
printInfo(component)
return enricher.New(ctrl.Log.WithName(component)).Run()
}
func runNonRootEnabler(ctx *cli.Context) error {
const component = "non-root-enabler"
printInfo(component)
return nonrootenabler.New().Run(ctrl.Log.WithName(component))
}
func runWebhook(ctx *cli.Context) error {
printInfo("security-profiles-operator-webhook")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
port := ctx.Int("port")
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-webhook-lock",
Port: port,
}
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster webhook")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := profilerecording1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilerecording API to scheme")
}
setupLog.Info("registering webhooks")
hookserver := mgr.GetWebhookServer()
binding.RegisterWebhook(hookserver, mgr.GetClient())
recording.RegisterWebhook(hookserver, mgr.GetClient())
sigHandler := ctrl.SetupSignalHandler()
setupLog.Info("starting webhook")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
return nil
}
func setupEnabledControllers(
ctx context.Context,
enabledControllers []controller.Controller,
mgr ctrl.Manager,
met *metrics.Metrics,
) error {
for _, enableCtrl := range enabledControllers {
if enableCtrl.SchemeBuilder() != nil {
if err := enableCtrl.SchemeBuilder().AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add core operator APIs to scheme")
}
}
if err := enableCtrl.Setup(ctx, mgr, met); err != nil {
return errors.Wrapf(err, "setup %s controller", enableCtrl.Name())
}
if met != nil {
if err := mgr.AddHealthzCheck(enableCtrl.Name(), enableCtrl.Healthz); err != nil {
return errors.Wrap(err, "add readiness check to controller")
}
}
}
return nil
}
| getEnabledControllers | identifier_name |
main.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/pkg/errors"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/urfave/cli/v2"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager"
profilebindingv1alpha1 "sigs.k8s.io/security-profiles-operator/api/profilebinding/v1alpha1"
profilerecording1alpha1 "sigs.k8s.io/security-profiles-operator/api/profilerecording/v1alpha1"
seccompprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/seccompprofile/v1alpha1"
secprofnodestatusv1alpha1 "sigs.k8s.io/security-profiles-operator/api/secprofnodestatus/v1alpha1"
selinuxprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/selinuxprofile/v1alpha1"
"sigs.k8s.io/security-profiles-operator/internal/pkg/config"
"sigs.k8s.io/security-profiles-operator/internal/pkg/controller"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/enricher"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/metrics"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/profilerecorder"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/seccompprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/selinuxprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/server"
nodestatus "sigs.k8s.io/security-profiles-operator/internal/pkg/manager/nodestatus"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/spod"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/workloadannotator"
"sigs.k8s.io/security-profiles-operator/internal/pkg/nonrootenabler"
"sigs.k8s.io/security-profiles-operator/internal/pkg/version"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/binding"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/recording"
)
const (
jsonFlag string = "json"
selinuxFlag string = "with-selinux"
defaultWebhookPort int = 9443
)
var (
sync = time.Second * 30
setupLog = ctrl.Log.WithName("setup")
)
func main() {
ctrl.SetLogger(klogr.New())
app := cli.NewApp()
app.Name = config.OperatorName
app.Usage = "Kubernetes Security Profiles Operator"
app.Description = "The Security Profiles Operator makes it easier for cluster admins " +
"to manage their seccomp or AppArmor profiles and apply them to Kubernetes' workloads."
app.Version = version.Get().Version
app.Commands = cli.Commands{
&cli.Command{
Name: "version",
Aliases: []string{"v"},
Usage: "display detailed version information",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: jsonFlag,
Aliases: []string{"j"},
Usage: "print JSON instead of text",
},
},
Action: func(c *cli.Context) error {
v := version.Get()
res := v.String()
if c.Bool(jsonFlag) {
j, err := v.JSONString()
if err != nil {
return errors.Wrap(err, "unable to generate JSON from version info")
}
res = j
}
print(res)
return nil
},
},
&cli.Command{
Name: "manager",
Aliases: []string{"m"},
Usage: "run the manager",
Action: runManager,
},
&cli.Command{
Name: "daemon",
Aliases: []string{"d"},
Usage: "run the daemon",
Action: runDaemon,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: selinuxFlag,
Usage: "Listen for SELinux API resources",
Value: false,
},
},
},
&cli.Command{
Name: "webhook",
Aliases: []string{"w"},
Usage: "run the webhook",
Action: runWebhook,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "port",
Aliases: []string{"p"},
Value: defaultWebhookPort,
Usage: "the port on which to expose the webhook service (default 9443)",
},
},
},
&cli.Command{
Name: "non-root-enabler",
Usage: "run the non root enabler",
Action: runNonRootEnabler,
},
&cli.Command{
Name: "log-enricher",
Aliases: []string{"l"},
Usage: "run the audit's log enricher",
Action: runLogEnricher,
},
}
if err := app.Run(os.Args); err != nil {
setupLog.Error(err, "running security-profiles-operator")
os.Exit(1)
}
}
func printInfo(component string) {
setupLog.Info(
fmt.Sprintf("starting component: %s", component),
version.Get().AsKeyValues()...,
)
}
func runManager(ctx *cli.Context) error {
printInfo("security-profiles-operator")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-lock",
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster manager")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := selinuxprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add selinuxprofile API to scheme")
}
if err := monitoringv1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add ServiceMonitor API to scheme")
}
if err := setupEnabledControllers(ctx.Context, []controller.Controller{
nodestatus.NewController(),
spod.NewController(),
workloadannotator.NewController(),
}, mgr, nil); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting manager")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
setupLog.Info("ending manager")
return nil
}
func setControllerOptionsForNamespaces(opts *ctrl.Options) |
func getEnabledControllers(ctx *cli.Context) []controller.Controller {
controllers := []controller.Controller{
seccompprofile.NewController(),
profilerecorder.NewController(),
}
if ctx.Bool(selinuxFlag) {
controllers = append(controllers, selinuxprofile.NewController())
}
return controllers
}
func runDaemon(ctx *cli.Context) error {
// security-profiles-operator-daemon
printInfo("spod")
enabledControllers := getEnabledControllers(ctx)
if len(enabledControllers) == 0 {
return errors.New("no controllers enabled")
}
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := ctrl.Options{
SyncPeriod: &sync,
HealthProbeBindAddress: fmt.Sprintf(":%d", config.HealthProbePort),
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create manager")
}
// Setup metrics
met := metrics.New()
if err := met.Register(); err != nil {
return errors.Wrap(err, "register metrics")
}
if err := mgr.AddMetricsExtraHandler(metrics.HandlerPath, met.Handler()); err != nil {
return errors.Wrap(err, "add metrics extra handler")
}
// Setup the GRPC server
if err := server.New(ctrl.Log.WithName("server"), met).Start(); err != nil {
return errors.Wrap(err, "start GRPC server")
}
// This API provides status which is used by both seccomp and selinux
if err := secprofnodestatusv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add per-node Status API to scheme")
}
if err := setupEnabledControllers(ctx.Context, enabledControllers, mgr, met); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting daemon")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "SPOd error")
}
setupLog.Info("ending daemon")
return nil
}
func runLogEnricher(ctx *cli.Context) error {
const component = "log-enricher"
printInfo(component)
return enricher.New(ctrl.Log.WithName(component)).Run()
}
func runNonRootEnabler(ctx *cli.Context) error {
const component = "non-root-enabler"
printInfo(component)
return nonrootenabler.New().Run(ctrl.Log.WithName(component))
}
func runWebhook(ctx *cli.Context) error {
printInfo("security-profiles-operator-webhook")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
port := ctx.Int("port")
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-webhook-lock",
Port: port,
}
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster webhook")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := profilerecording1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilerecording API to scheme")
}
setupLog.Info("registering webhooks")
hookserver := mgr.GetWebhookServer()
binding.RegisterWebhook(hookserver, mgr.GetClient())
recording.RegisterWebhook(hookserver, mgr.GetClient())
sigHandler := ctrl.SetupSignalHandler()
setupLog.Info("starting webhook")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
return nil
}
func setupEnabledControllers(
ctx context.Context,
enabledControllers []controller.Controller,
mgr ctrl.Manager,
met *metrics.Metrics,
) error {
for _, enableCtrl := range enabledControllers {
if enableCtrl.SchemeBuilder() != nil {
if err := enableCtrl.SchemeBuilder().AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add core operator APIs to scheme")
}
}
if err := enableCtrl.Setup(ctx, mgr, met); err != nil {
return errors.Wrapf(err, "setup %s controller", enableCtrl.Name())
}
if met != nil {
if err := mgr.AddHealthzCheck(enableCtrl.Name(), enableCtrl.Healthz); err != nil {
return errors.Wrap(err, "add readiness check to controller")
}
}
}
return nil
}
| {
namespace := os.Getenv(config.RestrictNamespaceEnvKey)
// listen globally
if namespace == "" {
opts.Namespace = namespace
return
}
// ensure we listen to our own namespace
if !strings.Contains(namespace, config.GetOperatorNamespace()) {
namespace = namespace + "," + config.GetOperatorNamespace()
}
namespaceList := strings.Split(namespace, ",")
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
// Adding "" adds cluster namespaced resources
if strings.Contains(namespace, ",") {
opts.NewCache = cache.MultiNamespacedCacheBuilder(namespaceList)
} else {
// listen to a specific namespace only
opts.Namespace = namespace
}
} | identifier_body |
main.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/pkg/errors"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/urfave/cli/v2"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/manager"
profilebindingv1alpha1 "sigs.k8s.io/security-profiles-operator/api/profilebinding/v1alpha1"
profilerecording1alpha1 "sigs.k8s.io/security-profiles-operator/api/profilerecording/v1alpha1"
seccompprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/seccompprofile/v1alpha1"
secprofnodestatusv1alpha1 "sigs.k8s.io/security-profiles-operator/api/secprofnodestatus/v1alpha1"
selinuxprofilev1alpha1 "sigs.k8s.io/security-profiles-operator/api/selinuxprofile/v1alpha1"
"sigs.k8s.io/security-profiles-operator/internal/pkg/config"
"sigs.k8s.io/security-profiles-operator/internal/pkg/controller"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/enricher"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/metrics"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/profilerecorder"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/seccompprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/selinuxprofile"
"sigs.k8s.io/security-profiles-operator/internal/pkg/daemon/server"
nodestatus "sigs.k8s.io/security-profiles-operator/internal/pkg/manager/nodestatus"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/spod"
"sigs.k8s.io/security-profiles-operator/internal/pkg/manager/workloadannotator"
"sigs.k8s.io/security-profiles-operator/internal/pkg/nonrootenabler"
"sigs.k8s.io/security-profiles-operator/internal/pkg/version"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/binding"
"sigs.k8s.io/security-profiles-operator/internal/pkg/webhooks/recording"
)
const (
jsonFlag string = "json"
selinuxFlag string = "with-selinux"
defaultWebhookPort int = 9443
)
var (
sync = time.Second * 30
setupLog = ctrl.Log.WithName("setup")
)
func main() {
ctrl.SetLogger(klogr.New())
app := cli.NewApp()
app.Name = config.OperatorName
app.Usage = "Kubernetes Security Profiles Operator"
app.Description = "The Security Profiles Operator makes it easier for cluster admins " +
"to manage their seccomp or AppArmor profiles and apply them to Kubernetes' workloads."
app.Version = version.Get().Version
app.Commands = cli.Commands{
&cli.Command{
Name: "version",
Aliases: []string{"v"},
Usage: "display detailed version information",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: jsonFlag,
Aliases: []string{"j"},
Usage: "print JSON instead of text",
},
},
Action: func(c *cli.Context) error {
v := version.Get()
res := v.String()
if c.Bool(jsonFlag) {
j, err := v.JSONString()
if err != nil {
return errors.Wrap(err, "unable to generate JSON from version info")
}
res = j
}
print(res)
return nil
},
},
&cli.Command{
Name: "manager",
Aliases: []string{"m"},
Usage: "run the manager",
Action: runManager,
},
&cli.Command{
Name: "daemon",
Aliases: []string{"d"},
Usage: "run the daemon",
Action: runDaemon,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: selinuxFlag,
Usage: "Listen for SELinux API resources",
Value: false,
},
},
},
&cli.Command{
Name: "webhook",
Aliases: []string{"w"},
Usage: "run the webhook",
Action: runWebhook,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "port",
Aliases: []string{"p"},
Value: defaultWebhookPort,
Usage: "the port on which to expose the webhook service (default 9443)",
},
},
},
&cli.Command{
Name: "non-root-enabler",
Usage: "run the non root enabler",
Action: runNonRootEnabler,
},
&cli.Command{
Name: "log-enricher",
Aliases: []string{"l"},
Usage: "run the audit's log enricher",
Action: runLogEnricher,
},
}
if err := app.Run(os.Args); err != nil {
setupLog.Error(err, "running security-profiles-operator")
os.Exit(1)
}
}
func printInfo(component string) {
setupLog.Info(
fmt.Sprintf("starting component: %s", component),
version.Get().AsKeyValues()...,
)
}
func runManager(ctx *cli.Context) error {
printInfo("security-profiles-operator")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-lock",
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster manager")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := selinuxprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add selinuxprofile API to scheme")
}
if err := monitoringv1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add ServiceMonitor API to scheme")
}
if err := setupEnabledControllers(ctx.Context, []controller.Controller{
nodestatus.NewController(),
spod.NewController(),
workloadannotator.NewController(),
}, mgr, nil); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting manager")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
setupLog.Info("ending manager")
return nil
}
func setControllerOptionsForNamespaces(opts *ctrl.Options) {
namespace := os.Getenv(config.RestrictNamespaceEnvKey)
// listen globally
if namespace == "" {
opts.Namespace = namespace
return
}
// ensure we listen to our own namespace
if !strings.Contains(namespace, config.GetOperatorNamespace()) {
namespace = namespace + "," + config.GetOperatorNamespace()
}
namespaceList := strings.Split(namespace, ",")
// Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2)
// Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate
// Also note that you may face performance issues when using this with a high number of namespaces.
// More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder
// Adding "" adds cluster namespaced resources
if strings.Contains(namespace, ",") {
opts.NewCache = cache.MultiNamespacedCacheBuilder(namespaceList)
} else {
// listen to a specific namespace only
opts.Namespace = namespace
}
}
func getEnabledControllers(ctx *cli.Context) []controller.Controller {
controllers := []controller.Controller{
seccompprofile.NewController(),
profilerecorder.NewController(),
}
if ctx.Bool(selinuxFlag) {
controllers = append(controllers, selinuxprofile.NewController())
}
return controllers
}
func runDaemon(ctx *cli.Context) error {
// security-profiles-operator-daemon
printInfo("spod")
enabledControllers := getEnabledControllers(ctx)
if len(enabledControllers) == 0 {
return errors.New("no controllers enabled")
}
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
sigHandler := ctrl.SetupSignalHandler()
ctrlOpts := ctrl.Options{
SyncPeriod: &sync,
HealthProbeBindAddress: fmt.Sprintf(":%d", config.HealthProbePort),
}
setControllerOptionsForNamespaces(&ctrlOpts)
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create manager")
}
// Setup metrics
met := metrics.New()
if err := met.Register(); err != nil {
return errors.Wrap(err, "register metrics")
}
if err := mgr.AddMetricsExtraHandler(metrics.HandlerPath, met.Handler()); err != nil {
return errors.Wrap(err, "add metrics extra handler")
}
// Setup the GRPC server
if err := server.New(ctrl.Log.WithName("server"), met).Start(); err != nil {
return errors.Wrap(err, "start GRPC server")
}
// This API provides status which is used by both seccomp and selinux
if err := secprofnodestatusv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add per-node Status API to scheme")
}
if err := setupEnabledControllers(ctx.Context, enabledControllers, mgr, met); err != nil {
return errors.Wrap(err, "enable controllers")
}
setupLog.Info("starting daemon")
if err := mgr.Start(sigHandler); err != nil |
setupLog.Info("ending daemon")
return nil
}
func runLogEnricher(ctx *cli.Context) error {
const component = "log-enricher"
printInfo(component)
return enricher.New(ctrl.Log.WithName(component)).Run()
}
func runNonRootEnabler(ctx *cli.Context) error {
const component = "non-root-enabler"
printInfo(component)
return nonrootenabler.New().Run(ctrl.Log.WithName(component))
}
func runWebhook(ctx *cli.Context) error {
printInfo("security-profiles-operator-webhook")
cfg, err := ctrl.GetConfig()
if err != nil {
return errors.Wrap(err, "get config")
}
port := ctx.Int("port")
ctrlOpts := manager.Options{
SyncPeriod: &sync,
LeaderElection: true,
LeaderElectionID: "security-profiles-operator-webhook-lock",
Port: port,
}
mgr, err := ctrl.NewManager(cfg, ctrlOpts)
if err != nil {
return errors.Wrap(err, "create cluster webhook")
}
if err := profilebindingv1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilebinding API to scheme")
}
if err := seccompprofilev1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add seccompprofile API to scheme")
}
if err := profilerecording1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add profilerecording API to scheme")
}
setupLog.Info("registering webhooks")
hookserver := mgr.GetWebhookServer()
binding.RegisterWebhook(hookserver, mgr.GetClient())
recording.RegisterWebhook(hookserver, mgr.GetClient())
sigHandler := ctrl.SetupSignalHandler()
setupLog.Info("starting webhook")
if err := mgr.Start(sigHandler); err != nil {
return errors.Wrap(err, "controller manager error")
}
return nil
}
func setupEnabledControllers(
ctx context.Context,
enabledControllers []controller.Controller,
mgr ctrl.Manager,
met *metrics.Metrics,
) error {
for _, enableCtrl := range enabledControllers {
if enableCtrl.SchemeBuilder() != nil {
if err := enableCtrl.SchemeBuilder().AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrap(err, "add core operator APIs to scheme")
}
}
if err := enableCtrl.Setup(ctx, mgr, met); err != nil {
return errors.Wrapf(err, "setup %s controller", enableCtrl.Name())
}
if met != nil {
if err := mgr.AddHealthzCheck(enableCtrl.Name(), enableCtrl.Healthz); err != nil {
return errors.Wrap(err, "add readiness check to controller")
}
}
}
return nil
}
| {
return errors.Wrap(err, "SPOd error")
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.