file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
gen_mike_input_rf_linux.py | #!/home/uwcc-admin/curw_mike_data_handler/venv/bin/python3
"only she bang, root dir, output dir and filename are different from generic one"
import pymysql
from datetime import datetime, timedelta
import traceback
import json
import os
import sys
import getopt
import pandas as pd
import numpy as np
DATE_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
from db_adapter.constants import set_db_config_file_path
from db_adapter.constants import connection as con_params
from db_adapter.base import get_Pool, destroy_Pool
from db_adapter.constants import CURW_SIM_DATABASE, CURW_SIM_PASSWORD, CURW_SIM_USERNAME, CURW_SIM_PORT, CURW_SIM_HOST
from db_adapter.curw_sim.timeseries import Timeseries
from db_adapter.constants import COMMON_DATE_TIME_FORMAT
ROOT_DIRECTORY = '/home/uwcc-admin/curw_mike_data_handler'
# ROOT_DIRECTORY = 'D:\curw_mike_data_handlers'
OUTPUT_DIRECTORY = "/mnt/disks/wrf_nfs/mike/inputs"
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def | (file_name, data):
with open(file_name, 'a+') as f:
f.write('\n'.join(data))
def append_file_to_file(file_name, file_content):
with open(file_name, 'a+') as f:
f.write('\n')
f.write(file_content)
def makedir_if_not_exist_given_filepath(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
pass
def read_attribute_from_config_file(attribute, config, compulsory=False):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:param compulsory: Boolean value: whether the attribute is must present or not in the config file
:return:
"""
if attribute in config and (config[attribute] != ""):
return config[attribute]
elif compulsory:
print("{} not specified in config file.".format(attribute))
exit(1)
else:
# print("{} not specified in config file.".format(attribute))
return None
def check_time_format(time):
try:
time = datetime.strptime(time, DATE_TIME_FORMAT)
if time.strftime('%S') != '00':
print("Seconds should be always 00")
exit(1)
if time.strftime('%M') not in ('00', '15', '30', '45'):
print("Minutes should be always multiple of 15")
exit(1)
return True
except Exception:
print("Time {} is not in proper format".format(time))
exit(1)
def list_of_lists_to_df_first_row_as_columns(data):
"""
:param data: data in list of lists format
:return: equivalent pandas dataframe
"""
return pd.DataFrame.from_records(data[1:], columns=data[0])
def replace_negative_numbers_with_nan(df):
num = df._get_numeric_data()
num[num < 0] = np.nan
return df
def replace_nan_with_row_average(df):
m = df.mean(axis=1)
for i, col in enumerate(df):
df.iloc[:, i] = df.iloc[:, i].fillna(m)
return df
def get_all_obs_rain_hashids_from_curw_sim(pool):
obs_id_hash_id_mappings = {}
expected_earliest_obs_end = (datetime.now() - timedelta(days=1)).strftime(COMMON_DATE_TIME_FORMAT)
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id`, `grid_id` FROM `run` where `model`=%s and `obs_end`>=%s;"
row_count = cursor.execute(sql_statement, ("hechms", expected_earliest_obs_end))
if row_count > 0:
results = cursor.fetchall()
for dict in results:
grid_id = dict.get("grid_id")
grid_id_parts = grid_id.split("_")
obs_id_hash_id_mappings[grid_id_parts[1]] = dict.get("id")
return obs_id_hash_id_mappings
else:
return None
except Exception as exception:
traceback.print_exc()
finally:
if connection is not None:
connection.close()
def prepare_mike_rf_input(start, end, coefficients):
try:
#### process staton based hybrid timeseries ####
distinct_obs_ids = coefficients.curw_obs_id.unique()
hybrid_ts_df = pd.DataFrame()
hybrid_ts_df['time'] = pd.date_range(start=start, end=end, freq='5min')
pool = get_Pool(host=con_params.CURW_SIM_HOST, port=con_params.CURW_SIM_PORT, user=con_params.CURW_SIM_USERNAME,
password=con_params.CURW_SIM_PASSWORD,
db=con_params.CURW_SIM_DATABASE)
TS = Timeseries(pool)
obs_id_hash_id_mapping = get_all_obs_rain_hashids_from_curw_sim(pool)
for obs_id in distinct_obs_ids:
# taking data from curw_sim database (data prepared based on active stations for hechms)
ts = TS.get_timeseries(id_=obs_id_hash_id_mapping.get(str(obs_id)), start_date=start, end_date=end)
ts.insert(0, ['time', obs_id])
ts_df = list_of_lists_to_df_first_row_as_columns(ts)
ts_df[obs_id] = ts_df[obs_id].astype('float64')
hybrid_ts_df = pd.merge(hybrid_ts_df, ts_df, how="left", on='time')
hybrid_ts_df.set_index('time', inplace=True)
hybrid_ts_df = hybrid_ts_df.resample('15min', label='right', closed='right').sum()
# pd.set_option('display.max_rows', hybrid_ts_df.shape[0]+1)
# pd.set_option('display.max_columns', hybrid_ts_df.shape[1]+1)
# print(hybrid_ts_df)
hybrid_ts_df = replace_negative_numbers_with_nan(hybrid_ts_df)
# print(hybrid_ts_df)
hybrid_ts_df = replace_nan_with_row_average(hybrid_ts_df)
# print(hybrid_ts_df)
#### process mike input ####
distinct_names = coefficients.name.unique()
mike_input = pd.DataFrame()
mike_input_initialized = False
for name in distinct_names:
catchment_coefficients = coefficients[coefficients.name == name]
# print(catchment_coefficients)
catchment = pd.DataFrame()
catchment_initialized = False
for index, row in catchment_coefficients.iterrows():
# print(index, row['curw_obs_id'], row['coefficient'])
if not catchment_initialized:
catchment = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment_initialized = True
else:
new = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment = pd.merge(catchment, new, how="left", on='time')
if not mike_input_initialized:
mike_input[name] = catchment.sum(axis=1)
mike_input_initialized = True
else:
mike_input = pd.merge(mike_input, (catchment.sum(axis=1)).to_frame(name=name), how="left", on='time')
mike_input.round(1)
return mike_input
except Exception:
traceback.print_exc()
finally:
destroy_Pool(pool)
def usage():
usageText = """
Usage: ./inputs/gen_mike_input_rf_linux.py [-s "YYYY-MM-DD HH:MM:SS"] [-e "YYYY-MM-DD HH:MM:SS"]
-h --help Show usage
-s --start_time Mike rainfall timeseries start time (e.g: "2019-06-05 00:00:00"). Default is 00:00:00, 3 days before today.
-e --end_time Mike rainfall timeseries end time (e.g: "2019-06-05 23:00:00"). Default is 00:00:00, 2 days after.
"""
print(usageText)
if __name__ == "__main__":
set_db_config_file_path(os.path.join(ROOT_DIRECTORY, 'db_adapter_config.json'))
try:
start_time = None
end_time = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:s:e:",
["help", "start_time=", "end_time="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--start_time"):
start_time = arg.strip()
elif opt in ("-e", "--end_time"):
end_time = arg.strip()
# Load config params
config = json.loads(open(os.path.join('inputs', 'configs', 'rain_config.json')).read())
output_dir = read_attribute_from_config_file('output_dir', config)
file_name = read_attribute_from_config_file('output_file_name', config)
if start_time is None:
start_time = (datetime.now() - timedelta(days=3)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=start_time)
if end_time is None:
end_time = (datetime.now() + timedelta(days=2)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=end_time)
if output_dir is None:
output_dir = os.path.join(OUTPUT_DIRECTORY, (datetime.utcnow() + timedelta(hours=5, minutes=30)).strftime('%Y-%m-%d_%H-00-00'))
if file_name is None:
file_name = 'mike_rf.txt'.format(start_time, end_time)
mike_rf_file_path = os.path.join(output_dir, file_name)
if not os.path.isfile(mike_rf_file_path):
makedir_if_not_exist_given_filepath(mike_rf_file_path)
print("{} start preparing mike rainfall input".format(datetime.now()))
coefficients = pd.read_csv(os.path.join('inputs', 'params', 'sb_rf_coefficients.csv'), delimiter=',')
mike_rainfall = prepare_mike_rf_input(start=start_time, end=end_time, coefficients=coefficients)
mike_rainfall.to_csv(mike_rf_file_path, header=True, index=True)
print("{} completed preparing mike rainfall input".format(datetime.now()))
print("Mike input rainfall file is available at {}".format(mike_rf_file_path))
else:
print('Mike rainfall input file already in path : ', mike_rf_file_path)
except Exception:
traceback.print_exc()
| append_to_file | identifier_name |
debug.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#![allow(dead_code)]
#![allow(unused_macros)]
/// Low-level functions for testing.
// Macro to parse a `Borrow<str>` to an `edn::Value` and assert the given `edn::Value` `matches`
// against it.
//
// This is a macro only to give nice line numbers when tests fail.
#[macro_export]
macro_rules! assert_matches {
( $input: expr, $expected: expr ) => {{
// Failure to parse the expected pattern is a coding error, so we unwrap.
let pattern_value = edn::parse::value($expected.borrow())
.expect(format!("to be able to parse expected {}", $expected).as_str())
.without_spans();
let input_value = $input.to_edn();
assert!(
input_value.matches(&pattern_value),
"Expected value:\n{}\nto match pattern:\n{}\n",
input_value.to_pretty(120).unwrap(),
pattern_value.to_pretty(120).unwrap()
);
}};
}
// Transact $input against the given $conn, expecting success or a `Result<TxReport, String>`.
//
// This unwraps safely and makes asserting errors pleasant.
#[macro_export]
macro_rules! assert_transact {
( $conn: expr, $input: expr, $expected: expr ) => {{
trace!("assert_transact: {}", $input);
let result = $conn.transact($input).map_err(|e| e.to_string());
assert_eq!(result, $expected.map_err(|e| e.to_string()));
}};
( $conn: expr, $input: expr ) => {{
trace!("assert_transact: {}", $input);
let result = $conn.transact($input);
assert!(
result.is_ok(),
"Expected Ok(_), got `{}`",
result.unwrap_err()
);
result.unwrap()
}};
}
use std::borrow::Borrow;
use std::collections::BTreeMap;
use std::io::Write;
use itertools::Itertools;
use rusqlite;
use rusqlite::types::ToSql;
use rusqlite::TransactionBehavior;
use tabwriter::TabWriter;
use crate::bootstrap;
use crate::db::*;
use crate::db::{read_attribute_map, read_ident_map};
use crate::entids;
use db_traits::errors::Result;
use edn;
use core_traits::{Entid, TypedValue, ValueType};
use crate::internal_types::TermWithTempIds;
use crate::schema::SchemaBuilding;
use crate::tx::{transact, transact_terms};
use crate::types::*;
use crate::watcher::NullWatcher;
use edn::entities::{EntidOrIdent, TempId};
use edn::InternSet;
use mentat_core::{HasSchema, SQLValueType, TxReport};
/// Represents a *datom* (assertion) in the store.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub struct Datom {
// TODO: generalize this.
pub e: EntidOrIdent,
pub a: EntidOrIdent,
pub v: edn::Value,
pub tx: i64,
pub added: Option<bool>,
}
/// Represents a set of datoms (assertions) in the store.
///
/// To make comparision easier, we deterministically order. The ordering is the ascending tuple
/// ordering determined by `(e, a, (value_type_tag, v), tx)`, where `value_type_tag` is an internal
/// value that is not exposed but is deterministic.
pub struct Datoms(pub Vec<Datom>);
/// Represents an ordered sequence of transactions in the store.
///
/// To make comparision easier, we deterministically order. The ordering is the ascending tuple
/// ordering determined by `(e, a, (value_type_tag, v), tx, added)`, where `value_type_tag` is an
/// internal value that is not exposed but is deterministic, and `added` is ordered such that
/// retracted assertions appear before added assertions.
pub struct Transactions(pub Vec<Datoms>);
/// Represents the fulltext values in the store.
pub struct FulltextValues(pub Vec<(i64, String)>);
impl Datom {
pub fn to_edn(&self) -> edn::Value {
let f = |entid: &EntidOrIdent| -> edn::Value {
match *entid {
EntidOrIdent::Entid(ref y) => edn::Value::Integer(*y),
EntidOrIdent::Ident(ref y) => edn::Value::Keyword(y.clone()),
}
};
let mut v = vec![f(&self.e), f(&self.a), self.v.clone()];
if let Some(added) = self.added {
v.push(edn::Value::Integer(self.tx));
v.push(edn::Value::Boolean(added));
}
edn::Value::Vector(v)
}
}
impl Datoms {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector((&self.0).iter().map(|x| x.to_edn()).collect())
}
}
impl Transactions {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector((&self.0).iter().map(|x| x.to_edn()).collect())
}
}
impl FulltextValues {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector(
(&self.0)
.iter()
.map(|&(x, ref y)| {
edn::Value::Vector(vec![edn::Value::Integer(x), edn::Value::Text(y.clone())])
})
.collect(),
)
}
}
/// Turn TypedValue::Ref into TypedValue::Keyword when it is possible.
trait ToIdent {
fn map_ident(self, schema: &Schema) -> Self;
}
impl ToIdent for TypedValue {
fn map_ident(self, schema: &Schema) -> Self {
if let TypedValue::Ref(e) = self {
schema
.get_ident(e)
.cloned()
.map(|i| i.into())
.unwrap_or(TypedValue::Ref(e))
} else {
self
}
}
}
/// Convert a numeric entid to an ident `Entid` if possible, otherwise a numeric `Entid`.
pub fn to_entid(schema: &Schema, entid: i64) -> EntidOrIdent {
schema
.get_ident(entid)
.map_or(EntidOrIdent::Entid(entid), |ident| {
EntidOrIdent::Ident(ident.clone())
})
}
// /// Convert a symbolic ident to an ident `Entid` if possible, otherwise a numeric `Entid`.
// pub fn to_ident(schema: &Schema, entid: i64) -> Entid {
// schema.get_ident(entid).map_or(Entid::Entid(entid), |ident| Entid::Ident(ident.clone()))
// }
/// Return the set of datoms in the store, ordered by (e, a, v, tx), but not including any datoms of
/// the form [... :db/txInstant ...].
pub fn datoms<S: Borrow<Schema>>(conn: &rusqlite::Connection, schema: &S) -> Result<Datoms> {
datoms_after(conn, schema, bootstrap::TX0 - 1)
}
/// Return the set of datoms in the store with transaction ID strictly greater than the given `tx`,
/// ordered by (e, a, v, tx).
///
/// The datom set returned does not include any datoms of the form [... :db/txInstant ...].
pub fn datoms_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Datoms> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx FROM datoms WHERE tx > ? ORDER BY e ASC, a ASC, value_type_tag ASC, v ASC, tx ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
if a == entids::DB_TX_INSTANT {
return Ok(None);
}
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else {
ValueType::Long.value_type_tag()
};
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
Ok(Some(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: None,
}))
})?
.collect();
Ok(Datoms(r?.into_iter().filter_map(|x| x).collect()))
}
/// Return the sequence of transactions in the store with transaction ID strictly greater than the
/// given `tx`, ordered by (tx, e, a, v).
///
/// Each transaction returned includes the [(transaction-tx) :db/txInstant ...] datom.
pub fn transactions_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Transactions> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx, added FROM transactions WHERE tx > ? ORDER BY tx ASC, e ASC, a ASC, value_type_tag ASC, v ASC, added ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else | ;
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
let added: bool = row.get(5)?;
Ok(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: Some(added),
})
})?
.collect();
// Group by tx.
let r: Vec<Datoms> = r?
.into_iter()
.group_by(|x| x.tx)
.into_iter()
.map(|(_key, group)| Datoms(group.collect()))
.collect();
Ok(Transactions(r))
}
/// Return the set of fulltext values in the store, ordered by rowid.
pub fn fulltext_values(conn: &rusqlite::Connection) -> Result<FulltextValues> {
let mut stmt: rusqlite::Statement =
conn.prepare("SELECT rowid, text FROM fulltext_values ORDER BY rowid")?;
let r: Result<Vec<_>> = stmt
.query_and_then([], |row| {
let rowid: i64 = row.get(0)?;
let text: String = row.get(1)?;
Ok((rowid, text))
})?
.collect();
r.map(FulltextValues)
}
/// Execute the given `sql` query with the given `params` and format the results as a
/// tab-and-newline formatted string suitable for debug printing.
///
/// The query is printed followed by a newline, then the returned columns followed by a newline, and
/// then the data rows and columns. All columns are aligned.
pub fn dump_sql_query(
conn: &rusqlite::Connection,
sql: &str,
params: &[&dyn ToSql],
) -> Result<String> {
let mut stmt: rusqlite::Statement = conn.prepare(sql)?;
let mut tw = TabWriter::new(Vec::new()).padding(2);
writeln!(&mut tw, "{}", sql).unwrap();
for column_name in stmt.column_names() {
write!(&mut tw, "{}\t", column_name).unwrap();
}
writeln!(&mut tw).unwrap();
let r: Result<Vec<_>> = stmt
.query_and_then(params, |row| {
for i in 0..row.as_ref().column_count() {
let value: rusqlite::types::Value = row.get(i)?;
write!(&mut tw, "{:?}\t", value).unwrap();
}
writeln!(&mut tw).unwrap();
Ok(())
})?
.collect();
r?;
let dump = String::from_utf8(tw.into_inner().unwrap()).unwrap();
Ok(dump)
}
// A connection that doesn't try to be clever about possibly sharing its `Schema`. Compare to
// `mentat::Conn`.
pub struct TestConn {
pub sqlite: rusqlite::Connection,
pub partition_map: PartitionMap,
pub schema: Schema,
}
impl TestConn {
fn assert_materialized_views(&self) {
let materialized_ident_map = read_ident_map(&self.sqlite).expect("ident map");
let materialized_attribute_map = read_attribute_map(&self.sqlite).expect("schema map");
let materialized_schema = Schema::from_ident_map_and_attribute_map(
materialized_ident_map,
materialized_attribute_map,
)
.expect("schema");
assert_eq!(materialized_schema, self.schema);
}
pub fn transact<I>(&mut self, transaction: I) -> Result<TxReport>
where
I: Borrow<str>,
{
// Failure to parse the transaction is a coding error, so we unwrap.
let entities = edn::parse::entities(transaction.borrow()).unwrap_or_else(|_| {
panic!("to be able to parse {} into entities", transaction.borrow())
});
let details = {
// The block scopes the borrow of self.sqlite.
// We're about to write, so go straight ahead and get an IMMEDIATE transaction.
let tx = self
.sqlite
.transaction_with_behavior(TransactionBehavior::Immediate)?;
// Applying the transaction can fail, so we don't unwrap.
let details = transact(
&tx,
self.partition_map.clone(),
&self.schema,
&self.schema,
NullWatcher(),
entities,
)?;
tx.commit()?;
details
};
let (report, next_partition_map, next_schema, _watcher) = details;
self.partition_map = next_partition_map;
if let Some(next_schema) = next_schema {
self.schema = next_schema;
}
// Verify that we've updated the materialized views during transacting.
self.assert_materialized_views();
Ok(report)
}
pub fn transact_simple_terms<I>(
&mut self,
terms: I,
tempid_set: InternSet<TempId>,
) -> Result<TxReport>
where
I: IntoIterator<Item = TermWithTempIds>,
{
let details = {
// The block scopes the borrow of self.sqlite.
// We're about to write, so go straight ahead and get an IMMEDIATE transaction.
let tx = self
.sqlite
.transaction_with_behavior(TransactionBehavior::Immediate)?;
// Applying the transaction can fail, so we don't unwrap.
let details = transact_terms(
&tx,
self.partition_map.clone(),
&self.schema,
&self.schema,
NullWatcher(),
terms,
tempid_set,
)?;
tx.commit()?;
details
};
let (report, next_partition_map, next_schema, _watcher) = details;
self.partition_map = next_partition_map;
if let Some(next_schema) = next_schema {
self.schema = next_schema;
}
// Verify that we've updated the materialized views during transacting.
self.assert_materialized_views();
Ok(report)
}
pub fn last_tx_id(&self) -> Entid {
self.partition_map
.get(&":db.part/tx".to_string())
.unwrap()
.next_entid()
- 1
}
pub fn last_transaction(&self) -> Datoms {
transactions_after(&self.sqlite, &self.schema, self.last_tx_id() - 1)
.expect("last_transaction")
.0
.pop()
.unwrap()
}
pub fn transactions(&self) -> Transactions {
transactions_after(&self.sqlite, &self.schema, bootstrap::TX0).expect("transactions")
}
pub fn datoms(&self) -> Datoms {
datoms_after(&self.sqlite, &self.schema, bootstrap::TX0).expect("datoms")
}
pub fn fulltext_values(&self) -> FulltextValues {
fulltext_values(&self.sqlite).expect("fulltext_values")
}
pub fn with_sqlite(mut conn: rusqlite::Connection) -> TestConn {
let db = ensure_current_version(&mut conn).unwrap();
// Does not include :db/txInstant.
let datoms = datoms_after(&conn, &db.schema, 0).unwrap();
assert_eq!(datoms.0.len(), 94);
// Includes :db/txInstant.
let transactions = transactions_after(&conn, &db.schema, 0).unwrap();
assert_eq!(transactions.0.len(), 1);
assert_eq!(transactions.0[0].0.len(), 95);
let mut parts = db.partition_map;
// Add a fake partition to allow tests to do things like
// [:db/add 111 :foo/bar 222]
{
let fake_partition = Partition::new(100, 2000, 1000, true);
parts.insert(":db.part/fake".into(), fake_partition);
}
let test_conn = TestConn {
sqlite: conn,
partition_map: parts,
schema: db.schema,
};
// Verify that we've created the materialized views during bootstrapping.
test_conn.assert_materialized_views();
test_conn
}
pub fn sanitized_partition_map(&mut self) {
self.partition_map.remove(":db.part/fake");
}
}
impl Default for TestConn {
fn default() -> TestConn {
TestConn::with_sqlite(new_connection("").expect("Couldn't open in-memory db"))
}
}
pub struct TempIds(edn::Value);
impl TempIds {
pub fn to_edn(&self) -> edn::Value {
self.0.clone()
}
}
pub fn tempids(report: &TxReport) -> TempIds {
let mut map: BTreeMap<edn::Value, edn::Value> = BTreeMap::default();
for (tempid, &entid) in report.tempids.iter() {
map.insert(edn::Value::Text(tempid.clone()), edn::Value::Integer(entid));
}
TempIds(edn::Value::Map(map))
}
| {
ValueType::Long.value_type_tag()
} | conditional_block |
debug.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#![allow(dead_code)]
#![allow(unused_macros)]
/// Low-level functions for testing.
// Macro to parse a `Borrow<str>` to an `edn::Value` and assert the given `edn::Value` `matches`
// against it.
//
// This is a macro only to give nice line numbers when tests fail.
#[macro_export]
macro_rules! assert_matches {
( $input: expr, $expected: expr ) => {{
// Failure to parse the expected pattern is a coding error, so we unwrap.
let pattern_value = edn::parse::value($expected.borrow())
.expect(format!("to be able to parse expected {}", $expected).as_str())
.without_spans();
let input_value = $input.to_edn();
assert!(
input_value.matches(&pattern_value),
"Expected value:\n{}\nto match pattern:\n{}\n",
input_value.to_pretty(120).unwrap(),
pattern_value.to_pretty(120).unwrap()
);
}};
}
// Transact $input against the given $conn, expecting success or a `Result<TxReport, String>`.
//
// This unwraps safely and makes asserting errors pleasant.
#[macro_export]
macro_rules! assert_transact {
( $conn: expr, $input: expr, $expected: expr ) => {{
trace!("assert_transact: {}", $input);
let result = $conn.transact($input).map_err(|e| e.to_string());
assert_eq!(result, $expected.map_err(|e| e.to_string()));
}};
( $conn: expr, $input: expr ) => {{
trace!("assert_transact: {}", $input);
let result = $conn.transact($input);
assert!(
result.is_ok(),
"Expected Ok(_), got `{}`",
result.unwrap_err()
);
result.unwrap()
}};
}
use std::borrow::Borrow;
use std::collections::BTreeMap;
use std::io::Write;
use itertools::Itertools;
use rusqlite;
use rusqlite::types::ToSql;
use rusqlite::TransactionBehavior;
use tabwriter::TabWriter;
use crate::bootstrap;
use crate::db::*;
use crate::db::{read_attribute_map, read_ident_map};
use crate::entids;
use db_traits::errors::Result;
use edn;
use core_traits::{Entid, TypedValue, ValueType};
use crate::internal_types::TermWithTempIds;
use crate::schema::SchemaBuilding;
use crate::tx::{transact, transact_terms};
use crate::types::*;
use crate::watcher::NullWatcher;
use edn::entities::{EntidOrIdent, TempId};
use edn::InternSet;
use mentat_core::{HasSchema, SQLValueType, TxReport};
/// Represents a *datom* (assertion) in the store.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub struct Datom {
// TODO: generalize this.
pub e: EntidOrIdent,
pub a: EntidOrIdent,
pub v: edn::Value,
pub tx: i64,
pub added: Option<bool>,
}
/// Represents a set of datoms (assertions) in the store.
///
/// To make comparision easier, we deterministically order. The ordering is the ascending tuple
/// ordering determined by `(e, a, (value_type_tag, v), tx)`, where `value_type_tag` is an internal
/// value that is not exposed but is deterministic.
pub struct Datoms(pub Vec<Datom>);
/// Represents an ordered sequence of transactions in the store.
///
/// To make comparision easier, we deterministically order. The ordering is the ascending tuple
/// ordering determined by `(e, a, (value_type_tag, v), tx, added)`, where `value_type_tag` is an
/// internal value that is not exposed but is deterministic, and `added` is ordered such that
/// retracted assertions appear before added assertions.
pub struct Transactions(pub Vec<Datoms>);
/// Represents the fulltext values in the store.
pub struct FulltextValues(pub Vec<(i64, String)>);
impl Datom {
pub fn to_edn(&self) -> edn::Value {
let f = |entid: &EntidOrIdent| -> edn::Value {
match *entid {
EntidOrIdent::Entid(ref y) => edn::Value::Integer(*y),
EntidOrIdent::Ident(ref y) => edn::Value::Keyword(y.clone()),
}
};
let mut v = vec![f(&self.e), f(&self.a), self.v.clone()];
if let Some(added) = self.added {
v.push(edn::Value::Integer(self.tx));
v.push(edn::Value::Boolean(added));
}
edn::Value::Vector(v)
}
}
impl Datoms {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector((&self.0).iter().map(|x| x.to_edn()).collect())
}
}
impl Transactions {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector((&self.0).iter().map(|x| x.to_edn()).collect())
}
}
impl FulltextValues {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector(
(&self.0)
.iter()
.map(|&(x, ref y)| {
edn::Value::Vector(vec![edn::Value::Integer(x), edn::Value::Text(y.clone())])
})
.collect(),
)
}
}
/// Turn TypedValue::Ref into TypedValue::Keyword when it is possible.
trait ToIdent {
fn map_ident(self, schema: &Schema) -> Self;
}
impl ToIdent for TypedValue {
fn map_ident(self, schema: &Schema) -> Self {
if let TypedValue::Ref(e) = self {
schema
.get_ident(e)
.cloned()
.map(|i| i.into())
.unwrap_or(TypedValue::Ref(e))
} else {
self
}
}
}
/// Convert a numeric entid to an ident `Entid` if possible, otherwise a numeric `Entid`.
pub fn to_entid(schema: &Schema, entid: i64) -> EntidOrIdent {
schema
.get_ident(entid)
.map_or(EntidOrIdent::Entid(entid), |ident| {
EntidOrIdent::Ident(ident.clone())
})
}
// /// Convert a symbolic ident to an ident `Entid` if possible, otherwise a numeric `Entid`.
// pub fn to_ident(schema: &Schema, entid: i64) -> Entid {
// schema.get_ident(entid).map_or(Entid::Entid(entid), |ident| Entid::Ident(ident.clone()))
// }
/// Return the set of datoms in the store, ordered by (e, a, v, tx), but not including any datoms of
/// the form [... :db/txInstant ...].
pub fn datoms<S: Borrow<Schema>>(conn: &rusqlite::Connection, schema: &S) -> Result<Datoms> {
datoms_after(conn, schema, bootstrap::TX0 - 1)
}
/// Return the set of datoms in the store with transaction ID strictly greater than the given `tx`,
/// ordered by (e, a, v, tx).
///
/// The datom set returned does not include any datoms of the form [... :db/txInstant ...].
pub fn datoms_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Datoms> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx FROM datoms WHERE tx > ? ORDER BY e ASC, a ASC, value_type_tag ASC, v ASC, tx ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
if a == entids::DB_TX_INSTANT {
return Ok(None);
}
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else {
ValueType::Long.value_type_tag()
};
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
Ok(Some(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: None,
}))
})?
.collect();
Ok(Datoms(r?.into_iter().filter_map(|x| x).collect()))
}
/// Return the sequence of transactions in the store with transaction ID strictly greater than the
/// given `tx`, ordered by (tx, e, a, v).
///
/// Each transaction returned includes the [(transaction-tx) :db/txInstant ...] datom.
pub fn transactions_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Transactions> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx, added FROM transactions WHERE tx > ? ORDER BY tx ASC, e ASC, a ASC, value_type_tag ASC, v ASC, added ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else {
ValueType::Long.value_type_tag()
};
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
let added: bool = row.get(5)?;
Ok(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: Some(added),
})
})?
.collect();
// Group by tx.
let r: Vec<Datoms> = r?
.into_iter()
.group_by(|x| x.tx)
.into_iter()
.map(|(_key, group)| Datoms(group.collect()))
.collect();
Ok(Transactions(r))
}
/// Return the set of fulltext values in the store, ordered by rowid.
pub fn fulltext_values(conn: &rusqlite::Connection) -> Result<FulltextValues> {
let mut stmt: rusqlite::Statement =
conn.prepare("SELECT rowid, text FROM fulltext_values ORDER BY rowid")?;
let r: Result<Vec<_>> = stmt
.query_and_then([], |row| {
let rowid: i64 = row.get(0)?;
let text: String = row.get(1)?;
Ok((rowid, text))
})?
.collect();
r.map(FulltextValues)
}
/// Execute the given `sql` query with the given `params` and format the results as a
/// tab-and-newline formatted string suitable for debug printing.
///
/// The query is printed followed by a newline, then the returned columns followed by a newline, and
/// then the data rows and columns. All columns are aligned.
pub fn dump_sql_query(
conn: &rusqlite::Connection,
sql: &str,
params: &[&dyn ToSql],
) -> Result<String> {
let mut stmt: rusqlite::Statement = conn.prepare(sql)?;
let mut tw = TabWriter::new(Vec::new()).padding(2);
writeln!(&mut tw, "{}", sql).unwrap();
for column_name in stmt.column_names() {
write!(&mut tw, "{}\t", column_name).unwrap();
}
writeln!(&mut tw).unwrap();
let r: Result<Vec<_>> = stmt
.query_and_then(params, |row| {
for i in 0..row.as_ref().column_count() {
let value: rusqlite::types::Value = row.get(i)?;
write!(&mut tw, "{:?}\t", value).unwrap();
}
writeln!(&mut tw).unwrap();
Ok(())
})?
.collect();
r?;
let dump = String::from_utf8(tw.into_inner().unwrap()).unwrap();
Ok(dump)
}
// A connection that doesn't try to be clever about possibly sharing its `Schema`. Compare to
// `mentat::Conn`.
pub struct TestConn {
pub sqlite: rusqlite::Connection,
pub partition_map: PartitionMap,
pub schema: Schema,
}
impl TestConn {
fn assert_materialized_views(&self) {
let materialized_ident_map = read_ident_map(&self.sqlite).expect("ident map");
let materialized_attribute_map = read_attribute_map(&self.sqlite).expect("schema map");
let materialized_schema = Schema::from_ident_map_and_attribute_map(
materialized_ident_map,
materialized_attribute_map,
)
.expect("schema");
assert_eq!(materialized_schema, self.schema);
}
pub fn transact<I>(&mut self, transaction: I) -> Result<TxReport>
where
I: Borrow<str>,
{
// Failure to parse the transaction is a coding error, so we unwrap.
let entities = edn::parse::entities(transaction.borrow()).unwrap_or_else(|_| {
panic!("to be able to parse {} into entities", transaction.borrow())
});
let details = {
// The block scopes the borrow of self.sqlite.
// We're about to write, so go straight ahead and get an IMMEDIATE transaction.
let tx = self
.sqlite
.transaction_with_behavior(TransactionBehavior::Immediate)?;
// Applying the transaction can fail, so we don't unwrap.
let details = transact(
&tx,
self.partition_map.clone(),
&self.schema,
&self.schema,
NullWatcher(),
entities,
)?;
tx.commit()?;
details
};
let (report, next_partition_map, next_schema, _watcher) = details;
self.partition_map = next_partition_map;
if let Some(next_schema) = next_schema {
self.schema = next_schema;
}
// Verify that we've updated the materialized views during transacting.
self.assert_materialized_views();
Ok(report)
}
pub fn | <I>(
&mut self,
terms: I,
tempid_set: InternSet<TempId>,
) -> Result<TxReport>
where
I: IntoIterator<Item = TermWithTempIds>,
{
let details = {
// The block scopes the borrow of self.sqlite.
// We're about to write, so go straight ahead and get an IMMEDIATE transaction.
let tx = self
.sqlite
.transaction_with_behavior(TransactionBehavior::Immediate)?;
// Applying the transaction can fail, so we don't unwrap.
let details = transact_terms(
&tx,
self.partition_map.clone(),
&self.schema,
&self.schema,
NullWatcher(),
terms,
tempid_set,
)?;
tx.commit()?;
details
};
let (report, next_partition_map, next_schema, _watcher) = details;
self.partition_map = next_partition_map;
if let Some(next_schema) = next_schema {
self.schema = next_schema;
}
// Verify that we've updated the materialized views during transacting.
self.assert_materialized_views();
Ok(report)
}
pub fn last_tx_id(&self) -> Entid {
self.partition_map
.get(&":db.part/tx".to_string())
.unwrap()
.next_entid()
- 1
}
pub fn last_transaction(&self) -> Datoms {
transactions_after(&self.sqlite, &self.schema, self.last_tx_id() - 1)
.expect("last_transaction")
.0
.pop()
.unwrap()
}
pub fn transactions(&self) -> Transactions {
transactions_after(&self.sqlite, &self.schema, bootstrap::TX0).expect("transactions")
}
pub fn datoms(&self) -> Datoms {
datoms_after(&self.sqlite, &self.schema, bootstrap::TX0).expect("datoms")
}
pub fn fulltext_values(&self) -> FulltextValues {
fulltext_values(&self.sqlite).expect("fulltext_values")
}
pub fn with_sqlite(mut conn: rusqlite::Connection) -> TestConn {
let db = ensure_current_version(&mut conn).unwrap();
// Does not include :db/txInstant.
let datoms = datoms_after(&conn, &db.schema, 0).unwrap();
assert_eq!(datoms.0.len(), 94);
// Includes :db/txInstant.
let transactions = transactions_after(&conn, &db.schema, 0).unwrap();
assert_eq!(transactions.0.len(), 1);
assert_eq!(transactions.0[0].0.len(), 95);
let mut parts = db.partition_map;
// Add a fake partition to allow tests to do things like
// [:db/add 111 :foo/bar 222]
{
let fake_partition = Partition::new(100, 2000, 1000, true);
parts.insert(":db.part/fake".into(), fake_partition);
}
let test_conn = TestConn {
sqlite: conn,
partition_map: parts,
schema: db.schema,
};
// Verify that we've created the materialized views during bootstrapping.
test_conn.assert_materialized_views();
test_conn
}
pub fn sanitized_partition_map(&mut self) {
self.partition_map.remove(":db.part/fake");
}
}
impl Default for TestConn {
fn default() -> TestConn {
TestConn::with_sqlite(new_connection("").expect("Couldn't open in-memory db"))
}
}
pub struct TempIds(edn::Value);
impl TempIds {
pub fn to_edn(&self) -> edn::Value {
self.0.clone()
}
}
pub fn tempids(report: &TxReport) -> TempIds {
let mut map: BTreeMap<edn::Value, edn::Value> = BTreeMap::default();
for (tempid, &entid) in report.tempids.iter() {
map.insert(edn::Value::Text(tempid.clone()), edn::Value::Integer(entid));
}
TempIds(edn::Value::Map(map))
}
| transact_simple_terms | identifier_name |
debug.rs | // Copyright 2016 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#![allow(dead_code)]
#![allow(unused_macros)]
/// Low-level functions for testing.
// Macro to parse a `Borrow<str>` to an `edn::Value` and assert the given `edn::Value` `matches`
// against it.
//
// This is a macro only to give nice line numbers when tests fail.
#[macro_export]
macro_rules! assert_matches {
( $input: expr, $expected: expr ) => {{
// Failure to parse the expected pattern is a coding error, so we unwrap.
let pattern_value = edn::parse::value($expected.borrow())
.expect(format!("to be able to parse expected {}", $expected).as_str())
.without_spans();
let input_value = $input.to_edn();
assert!(
input_value.matches(&pattern_value),
"Expected value:\n{}\nto match pattern:\n{}\n",
input_value.to_pretty(120).unwrap(),
pattern_value.to_pretty(120).unwrap()
);
}};
}
// Transact $input against the given $conn, expecting success or a `Result<TxReport, String>`.
//
// This unwraps safely and makes asserting errors pleasant.
#[macro_export]
macro_rules! assert_transact {
( $conn: expr, $input: expr, $expected: expr ) => {{
trace!("assert_transact: {}", $input);
let result = $conn.transact($input).map_err(|e| e.to_string());
assert_eq!(result, $expected.map_err(|e| e.to_string()));
}};
( $conn: expr, $input: expr ) => {{
trace!("assert_transact: {}", $input);
let result = $conn.transact($input);
assert!(
result.is_ok(),
"Expected Ok(_), got `{}`",
result.unwrap_err()
);
result.unwrap()
}};
}
use std::borrow::Borrow;
use std::collections::BTreeMap;
use std::io::Write;
use itertools::Itertools;
use rusqlite;
use rusqlite::types::ToSql;
use rusqlite::TransactionBehavior;
use tabwriter::TabWriter;
use crate::bootstrap;
use crate::db::*;
use crate::db::{read_attribute_map, read_ident_map};
use crate::entids;
use db_traits::errors::Result;
use edn;
use core_traits::{Entid, TypedValue, ValueType};
use crate::internal_types::TermWithTempIds;
use crate::schema::SchemaBuilding;
use crate::tx::{transact, transact_terms};
use crate::types::*;
use crate::watcher::NullWatcher;
use edn::entities::{EntidOrIdent, TempId};
use edn::InternSet;
use mentat_core::{HasSchema, SQLValueType, TxReport};
/// Represents a *datom* (assertion) in the store.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub struct Datom {
// TODO: generalize this.
pub e: EntidOrIdent,
pub a: EntidOrIdent,
pub v: edn::Value,
pub tx: i64,
pub added: Option<bool>,
}
/// Represents a set of datoms (assertions) in the store.
///
/// To make comparision easier, we deterministically order. The ordering is the ascending tuple
/// ordering determined by `(e, a, (value_type_tag, v), tx)`, where `value_type_tag` is an internal
/// value that is not exposed but is deterministic.
pub struct Datoms(pub Vec<Datom>);
/// Represents an ordered sequence of transactions in the store.
///
/// To make comparision easier, we deterministically order. The ordering is the ascending tuple
/// ordering determined by `(e, a, (value_type_tag, v), tx, added)`, where `value_type_tag` is an
/// internal value that is not exposed but is deterministic, and `added` is ordered such that
/// retracted assertions appear before added assertions.
pub struct Transactions(pub Vec<Datoms>);
/// Represents the fulltext values in the store.
pub struct FulltextValues(pub Vec<(i64, String)>);
impl Datom {
pub fn to_edn(&self) -> edn::Value {
let f = |entid: &EntidOrIdent| -> edn::Value {
match *entid {
EntidOrIdent::Entid(ref y) => edn::Value::Integer(*y),
EntidOrIdent::Ident(ref y) => edn::Value::Keyword(y.clone()),
}
};
let mut v = vec![f(&self.e), f(&self.a), self.v.clone()];
if let Some(added) = self.added {
v.push(edn::Value::Integer(self.tx));
v.push(edn::Value::Boolean(added));
}
edn::Value::Vector(v)
}
}
impl Datoms {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector((&self.0).iter().map(|x| x.to_edn()).collect())
}
}
impl Transactions {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector((&self.0).iter().map(|x| x.to_edn()).collect())
}
}
impl FulltextValues {
pub fn to_edn(&self) -> edn::Value {
edn::Value::Vector(
(&self.0)
.iter()
.map(|&(x, ref y)| {
edn::Value::Vector(vec![edn::Value::Integer(x), edn::Value::Text(y.clone())])
})
.collect(),
)
}
}
/// Turn TypedValue::Ref into TypedValue::Keyword when it is possible.
trait ToIdent {
fn map_ident(self, schema: &Schema) -> Self;
}
impl ToIdent for TypedValue {
fn map_ident(self, schema: &Schema) -> Self {
if let TypedValue::Ref(e) = self {
schema
.get_ident(e)
.cloned()
.map(|i| i.into())
.unwrap_or(TypedValue::Ref(e))
} else {
self
}
}
}
/// Convert a numeric entid to an ident `Entid` if possible, otherwise a numeric `Entid`.
pub fn to_entid(schema: &Schema, entid: i64) -> EntidOrIdent {
schema
.get_ident(entid)
.map_or(EntidOrIdent::Entid(entid), |ident| {
EntidOrIdent::Ident(ident.clone())
})
}
// /// Convert a symbolic ident to an ident `Entid` if possible, otherwise a numeric `Entid`.
// pub fn to_ident(schema: &Schema, entid: i64) -> Entid {
// schema.get_ident(entid).map_or(Entid::Entid(entid), |ident| Entid::Ident(ident.clone()))
// }
/// Return the set of datoms in the store, ordered by (e, a, v, tx), but not including any datoms of
/// the form [... :db/txInstant ...].
pub fn datoms<S: Borrow<Schema>>(conn: &rusqlite::Connection, schema: &S) -> Result<Datoms> {
datoms_after(conn, schema, bootstrap::TX0 - 1)
}
/// Return the set of datoms in the store with transaction ID strictly greater than the given `tx`,
/// ordered by (e, a, v, tx).
///
/// The datom set returned does not include any datoms of the form [... :db/txInstant ...].
pub fn datoms_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Datoms> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx FROM datoms WHERE tx > ? ORDER BY e ASC, a ASC, value_type_tag ASC, v ASC, tx ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
if a == entids::DB_TX_INSTANT {
return Ok(None);
}
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else {
ValueType::Long.value_type_tag()
};
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
Ok(Some(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: None,
}))
})?
.collect();
Ok(Datoms(r?.into_iter().filter_map(|x| x).collect()))
}
/// Return the sequence of transactions in the store with transaction ID strictly greater than the
/// given `tx`, ordered by (tx, e, a, v).
///
/// Each transaction returned includes the [(transaction-tx) :db/txInstant ...] datom.
pub fn transactions_after<S: Borrow<Schema>>(
conn: &rusqlite::Connection,
schema: &S,
tx: i64,
) -> Result<Transactions> {
let borrowed_schema = schema.borrow();
let mut stmt: rusqlite::Statement = conn.prepare("SELECT e, a, v, value_type_tag, tx, added FROM transactions WHERE tx > ? ORDER BY tx ASC, e ASC, a ASC, value_type_tag ASC, v ASC, added ASC")?;
let r: Result<Vec<_>> = stmt
.query_and_then(&[&tx], |row| {
let e: i64 = row.get(0)?;
let a: i64 = row.get(1)?;
let v: rusqlite::types::Value = row.get(2)?;
let value_type_tag: i32 = row.get(3)?;
let attribute = borrowed_schema.require_attribute_for_entid(a)?;
let value_type_tag = if !attribute.fulltext {
value_type_tag
} else {
ValueType::Long.value_type_tag()
};
let typed_value =
TypedValue::from_sql_value_pair(v, value_type_tag)?.map_ident(borrowed_schema);
let (value, _) = typed_value.to_edn_value_pair();
let tx: i64 = row.get(4)?;
let added: bool = row.get(5)?;
Ok(Datom {
e: EntidOrIdent::Entid(e),
a: to_entid(borrowed_schema, a),
v: value,
tx,
added: Some(added),
})
})?
.collect();
// Group by tx.
let r: Vec<Datoms> = r?
.into_iter()
.group_by(|x| x.tx)
.into_iter()
.map(|(_key, group)| Datoms(group.collect()))
.collect();
Ok(Transactions(r))
}
/// Return the set of fulltext values in the store, ordered by rowid.
pub fn fulltext_values(conn: &rusqlite::Connection) -> Result<FulltextValues> {
let mut stmt: rusqlite::Statement =
conn.prepare("SELECT rowid, text FROM fulltext_values ORDER BY rowid")?;
let r: Result<Vec<_>> = stmt
.query_and_then([], |row| {
let rowid: i64 = row.get(0)?;
let text: String = row.get(1)?;
Ok((rowid, text))
})?
.collect();
r.map(FulltextValues)
}
/// Execute the given `sql` query with the given `params` and format the results as a
/// tab-and-newline formatted string suitable for debug printing.
///
/// The query is printed followed by a newline, then the returned columns followed by a newline, and
/// then the data rows and columns. All columns are aligned.
pub fn dump_sql_query(
conn: &rusqlite::Connection, | params: &[&dyn ToSql],
) -> Result<String> {
let mut stmt: rusqlite::Statement = conn.prepare(sql)?;
let mut tw = TabWriter::new(Vec::new()).padding(2);
writeln!(&mut tw, "{}", sql).unwrap();
for column_name in stmt.column_names() {
write!(&mut tw, "{}\t", column_name).unwrap();
}
writeln!(&mut tw).unwrap();
let r: Result<Vec<_>> = stmt
.query_and_then(params, |row| {
for i in 0..row.as_ref().column_count() {
let value: rusqlite::types::Value = row.get(i)?;
write!(&mut tw, "{:?}\t", value).unwrap();
}
writeln!(&mut tw).unwrap();
Ok(())
})?
.collect();
r?;
let dump = String::from_utf8(tw.into_inner().unwrap()).unwrap();
Ok(dump)
}
// A connection that doesn't try to be clever about possibly sharing its `Schema`. Compare to
// `mentat::Conn`.
pub struct TestConn {
pub sqlite: rusqlite::Connection,
pub partition_map: PartitionMap,
pub schema: Schema,
}
impl TestConn {
fn assert_materialized_views(&self) {
let materialized_ident_map = read_ident_map(&self.sqlite).expect("ident map");
let materialized_attribute_map = read_attribute_map(&self.sqlite).expect("schema map");
let materialized_schema = Schema::from_ident_map_and_attribute_map(
materialized_ident_map,
materialized_attribute_map,
)
.expect("schema");
assert_eq!(materialized_schema, self.schema);
}
pub fn transact<I>(&mut self, transaction: I) -> Result<TxReport>
where
I: Borrow<str>,
{
// Failure to parse the transaction is a coding error, so we unwrap.
let entities = edn::parse::entities(transaction.borrow()).unwrap_or_else(|_| {
panic!("to be able to parse {} into entities", transaction.borrow())
});
let details = {
// The block scopes the borrow of self.sqlite.
// We're about to write, so go straight ahead and get an IMMEDIATE transaction.
let tx = self
.sqlite
.transaction_with_behavior(TransactionBehavior::Immediate)?;
// Applying the transaction can fail, so we don't unwrap.
let details = transact(
&tx,
self.partition_map.clone(),
&self.schema,
&self.schema,
NullWatcher(),
entities,
)?;
tx.commit()?;
details
};
let (report, next_partition_map, next_schema, _watcher) = details;
self.partition_map = next_partition_map;
if let Some(next_schema) = next_schema {
self.schema = next_schema;
}
// Verify that we've updated the materialized views during transacting.
self.assert_materialized_views();
Ok(report)
}
pub fn transact_simple_terms<I>(
&mut self,
terms: I,
tempid_set: InternSet<TempId>,
) -> Result<TxReport>
where
I: IntoIterator<Item = TermWithTempIds>,
{
let details = {
// The block scopes the borrow of self.sqlite.
// We're about to write, so go straight ahead and get an IMMEDIATE transaction.
let tx = self
.sqlite
.transaction_with_behavior(TransactionBehavior::Immediate)?;
// Applying the transaction can fail, so we don't unwrap.
let details = transact_terms(
&tx,
self.partition_map.clone(),
&self.schema,
&self.schema,
NullWatcher(),
terms,
tempid_set,
)?;
tx.commit()?;
details
};
let (report, next_partition_map, next_schema, _watcher) = details;
self.partition_map = next_partition_map;
if let Some(next_schema) = next_schema {
self.schema = next_schema;
}
// Verify that we've updated the materialized views during transacting.
self.assert_materialized_views();
Ok(report)
}
pub fn last_tx_id(&self) -> Entid {
self.partition_map
.get(&":db.part/tx".to_string())
.unwrap()
.next_entid()
- 1
}
pub fn last_transaction(&self) -> Datoms {
transactions_after(&self.sqlite, &self.schema, self.last_tx_id() - 1)
.expect("last_transaction")
.0
.pop()
.unwrap()
}
pub fn transactions(&self) -> Transactions {
transactions_after(&self.sqlite, &self.schema, bootstrap::TX0).expect("transactions")
}
pub fn datoms(&self) -> Datoms {
datoms_after(&self.sqlite, &self.schema, bootstrap::TX0).expect("datoms")
}
pub fn fulltext_values(&self) -> FulltextValues {
fulltext_values(&self.sqlite).expect("fulltext_values")
}
pub fn with_sqlite(mut conn: rusqlite::Connection) -> TestConn {
let db = ensure_current_version(&mut conn).unwrap();
// Does not include :db/txInstant.
let datoms = datoms_after(&conn, &db.schema, 0).unwrap();
assert_eq!(datoms.0.len(), 94);
// Includes :db/txInstant.
let transactions = transactions_after(&conn, &db.schema, 0).unwrap();
assert_eq!(transactions.0.len(), 1);
assert_eq!(transactions.0[0].0.len(), 95);
let mut parts = db.partition_map;
// Add a fake partition to allow tests to do things like
// [:db/add 111 :foo/bar 222]
{
let fake_partition = Partition::new(100, 2000, 1000, true);
parts.insert(":db.part/fake".into(), fake_partition);
}
let test_conn = TestConn {
sqlite: conn,
partition_map: parts,
schema: db.schema,
};
// Verify that we've created the materialized views during bootstrapping.
test_conn.assert_materialized_views();
test_conn
}
pub fn sanitized_partition_map(&mut self) {
self.partition_map.remove(":db.part/fake");
}
}
impl Default for TestConn {
fn default() -> TestConn {
TestConn::with_sqlite(new_connection("").expect("Couldn't open in-memory db"))
}
}
pub struct TempIds(edn::Value);
impl TempIds {
pub fn to_edn(&self) -> edn::Value {
self.0.clone()
}
}
pub fn tempids(report: &TxReport) -> TempIds {
let mut map: BTreeMap<edn::Value, edn::Value> = BTreeMap::default();
for (tempid, &entid) in report.tempids.iter() {
map.insert(edn::Value::Text(tempid.clone()), edn::Value::Integer(entid));
}
TempIds(edn::Value::Map(map))
} | sql: &str, | random_line_split |
sub_files.py | #!/usr/bin/env python
'''Take a CSV with file metadata, POST new file objects to the ENCODE DCC, upload files to the ENCODE cloud bucket'''
import os, sys, logging, urlparse, requests, csv, StringIO, re, copy, json, subprocess, hashlib, tempfile
logger = logging.getLogger(__name__)
EPILOG = '''Notes:
Examples:
%(prog)s
'''
CSV_ARGS = {
'delimiter': ',',
'quotechar': '"',
'quoting': csv.QUOTE_MINIMAL,
'dialect': 'excel'
}
GET_HEADERS = {'accept': 'application/json'}
POST_HEADERS = {'accept': 'application/json', 'content-type': 'application/json'}
def get_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help='CSV file metadata to POST', nargs='?', type=argparse.FileType('rU'), default=sys.stdin)
parser.add_argument('--outfile', help='CSV output report', type=argparse.FileType(mode='wb',bufsize=0), default=sys.stdout)
parser.add_argument('--debug', help="Print debug messages", default=False, action='store_true')
parser.add_argument('--server', help="The server to POST to.", default=os.getenv('ENCODE_SERVER',None))
parser.add_argument('--authid', help="The authorization key ID for the server.", default=os.getenv('ENCODE_AUTHID',None))
parser.add_argument('--authpw', help="The authorization key for the server.", default=os.getenv('ENCODE_AUTHPW',None))
parser.add_argument('--dryrun', help="Don't POST to the database, just validate input.", default=False, action='store_true')
parser.add_argument('--encvaldata', help="Directory in which https://github.com/ENCODE-DCC/encValData.git is cloned.", default=os.path.expanduser("~/encValData/"))
args = parser.parse_args()
if args.debug:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else: #use the defaulf logging level
logging.basicConfig(format='%(levelname)s:%(message)s')
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if not args.server:
logger.error('Server name must be specified on the command line or in environment ENCODE_SERVER')
sys.exit(1)
if not args.authid or not args.authpw:
logger.error('Authorization keypair must be specified on the command line or in environment ENCODE_AUTHID, ENCODE_AUTHPW')
sys.exit(1)
if not os.path.isdir(args.encvaldata):
logger.error('No ENCODE validation data. git clone https://github.com/ENCODE-DCC/encValData.git')
sys.exit(1)
return args
def md5(path):
md5sum = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(1024*1024), b''):
md5sum.update(chunk)
return md5sum.hexdigest()
# This does not depend on hashlib
# if 'md5_command' not in globals():
# global md5_command
# if subprocess.check_output('which md5', shell=True):
# md5_command = 'md5 -q'
# elif subprocess.check_output('which md5sum', shell=True):
# md5_command = 'md5sum'
# else:
# md5_command = ''
# if not md5_command:
# logger.error("No MD5 command found (tried md5 and md5sum)")
# return None
# else:
# try:
# md5_output = subprocess.check_output(' '.join([md5_command, fn]), shell=True)
# except:
# return None
# else:
# return md5_output.partition(' ')[0].rstrip()
def test_encode_keys(server,keypair):
test_URI = "ENCBS000AAA"
url = urlparse.urljoin(server,test_URI)
r = requests.get(url, auth=keypair, headers=GET_HEADERS)
try:
r.raise_for_status()
except:
logger.debug('test_encode_keys got response %s' %(r.text))
return False
else:
return True
def input_csv(fh):
csv_args = CSV_ARGS
input_fieldnames = csv.reader(fh, **csv_args).next()
return csv.DictReader(fh, fieldnames=input_fieldnames, **csv_args)
def output_csv(fh,fieldnames):
csv_args = CSV_ARGS
additional_fields = ['accession','aws_return']
output_fieldnames = [fn for fn in fieldnames if fn] + additional_fields
output = csv.DictWriter(fh, fieldnames=output_fieldnames, **csv_args)
return output
def init_csvs(in_fh,out_fh):
input_reader = input_csv(in_fh)
output_writer = output_csv(out_fh,input_reader.fieldnames)
return input_reader, output_writer
def validate_file(f_obj, encValData, assembly=None, as_path=None):
path = f_obj.get('submitted_file_name')
file_format = f_obj.get('file_format')
file_format_type = f_obj.get('file_format_type')
output_type = f_obj.get('output_type')
gzip_types = [
"CEL",
"bam",
"bed",
"csfasta",
"csqual",
"fasta",
"fastq",
"gff",
"gtf",
"tar",
"sam",
"wig"
]
magic_number = open(path, 'rb').read(2)
is_gzipped = magic_number == b'\x1f\x8b'
if file_format in gzip_types:
if not is_gzipped:
logger.warning('%s: Expect %s format to be gzipped' %(path,file_format))
else:
if is_gzipped:
logger.warning('%s: Expect %s format to be un-gzipped' %(path,file_format))
if assembly:
chromInfo = '-chromInfo=%s/%s/chrom.sizes' % (encValData, assembly)
else:
chromInfo = None
if as_path:
as_file = '-as=%s' %(as_path)
else:
as_file = None
validate_map = {
('fasta', None): ['-type=fasta'],
('fastq', None): ['-type=fastq'],
('bam', None): ['-type=bam', chromInfo],
('bigWig', None): ['-type=bigWig', chromInfo],
('bed', 'bed3'): ['-type=bed3', chromInfo],
('bigBed', 'bed3'): ['-type=bed3', chromInfo],
('bed', 'bed3+'): ['-type=bed3+', chromInfo],
('bigBed', 'bed3+'): ['-type=bed3+', chromInfo],
('bed', 'bed6'): ['-type=bed6+', chromInfo],
('bigBed', 'bed6'): ['-type=bigBed6+', chromInfo],
('bed', 'bedLogR'): ['-type=bed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bigBed', 'bedLogR'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bed', 'bedMethyl'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bigBed', 'bedMethyl'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bed', 'broadPeak'): ['-type=bed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bigBed', 'broadPeak'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bed', 'gappedPeak'): ['-type=bed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bigBed', 'gappedPeak'): ['-type=bigBed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bed', 'narrowPeak'): ['-type=bed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bigBed', 'narrowPeak'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bigBed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bed', 'bedExonScore'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bigBed', 'bedExonScore'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bed', 'bedRrbs'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bigBed', 'bedRrbs'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bed', 'enhancerAssay'): ['-type=bed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bigBed', 'enhancerAssay'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bed', 'modPepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bigBed', 'modPepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bed', 'pepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bigBed', 'pepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bed', 'openChromCombinedPeaks'): ['-type=bed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bigBed', 'openChromCombinedPeaks'): ['-type=bigBed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bed', 'peptideMapping'): ['-type=bed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bigBed', 'peptideMapping'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bed', 'shortFrags'): ['-type=bed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('bigBed', 'shortFrags'): ['-type=bigBed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('rcc', None): ['-type=rcc'],
('idat', None): ['-type=idat'],
('bedpe', None): ['-type=bed3+', chromInfo],
('bedpe', 'mango'): ['-type=bed3+', chromInfo],
('gtf', None): None,
('tar', None): None,
('tsv', None): None,
('csv', None): None,
('2bit', None): None,
('csfasta', None): ['-type=csfasta'],
('csqual', None): ['-type=csqual'],
('CEL', None): None,
('sam', None): None,
('wig', None): None,
('hdf5', None): None,
('gff', None): None
}
validate_args = validate_map.get((file_format, file_format_type))
if validate_args is None:
logger.warning('No rules to validate file_format %s and file_format_type %s' %(file_format, file_format_type))
return False
if (file_format, file_format_type) in [('bed', 'bed3'), ('bed', 'bed3+')] and as_file: #TODO: Update file schema and change to bed3+
validate_args = ['-type=bed3+', chromInfo] #TODO: Update file schema. This is to foce bed3+ for validateFiles but pass bed3 to file_format_type
validate_args.append(as_file)
tokens = ['validateFiles'] + validate_args + [path]
logger.debug('Running: %s' %(tokens))
try:
subprocess.check_output(tokens)
except subprocess.CalledProcessError as e:
logger.error("validateFiles returned %s" %(e.output))
return False
else:
logger.debug("%s: validateFiles passed" %(path))
return True
def post_file(file_metadata, server, keypair, dryrun=False):
local_path = file_metadata.get('submitted_file_name')
if not file_metadata.get('md5sum'):
file_metadata['md5sum'] = md5(local_path)
try:
logger.debug("POST JSON: %s" %(json.dumps(file_metadata)))
except:
pass
if dryrun:
file_obj = copy.copy(file_metadata)
file_obj.update({'accession':None})
return file_obj
else:
url = urlparse.urljoin(server,'/files/')
r = requests.post(url, auth=keypair, headers=POST_HEADERS, data=json.dumps(file_metadata))
try:
r.raise_for_status()
except:
logger.warning('POST failed: %s %s' %(r.status_code, r.reason))
logger.warning(r.text)
return None
else:
return r.json()['@graph'][0]
def upload_file(file_obj, dryrun=False):
if dryrun:
return None
else:
creds = file_obj['upload_credentials']
logger.debug('AWS creds: %s' %(creds))
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
path = file_obj.get('submitted_file_name')
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
logger.error("AWS upload failed with exit code %d" %(e.returncode))
return e.returncode
else:
return 0
def get_asfile(uri_json, server, keypair):
try:
uris = json.loads(uri_json)
except:
logger.error("Could not parse as JSON: %s" %(uri_json))
return None
for uri in uris:
url = server + '/' + uri
r = requests.get(url, headers=GET_HEADERS, auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to get ENCODE object %s" %(uri))
return None
document_obj = r.json()
r = requests.get(urlparse.urljoin(server, document_obj['uuid'] + '/' + document_obj['attachment']['href']), auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to download ENCODE document %s" %(uri))
return None
f = tempfile.NamedTemporaryFile(delete=False)
f.write(r.text)
return f
def process_row(row):
json_payload = {}
for key,value in row.iteritems():
if not key:
continue
try:
json_payload.update({key:json.loads(value)})
except:
try:
json_payload.update({key:json.loads('"%s"' %(value))})
except:
logger.warning('Could not convert field %s value %s to JSON' %(n,key,value))
return None
return json_payload
def main():
args = get_args()
server = args.server
keypair = (args.authid, args.authpw)
if not test_encode_keys(server, keypair):
logger.error("Invalid ENCODE server or keys: server=%s authid=%s authpw=%s" %(args.server,args.authid,args.authpw))
sys.exit(1)
try:
subprocess.check_output('which validateFiles', shell=True)
except:
logger.error("validateFiles is not in path. See http://hgdownload.cse.ucsc.edu/admin/exe/")
sys.exit(1)
input_csv, output_csv = init_csvs(args.infile, args.outfile)
output_csv.writeheader()
for n,row in enumerate(input_csv,start=2): #row 1 is the header
as_file = get_asfile(row.get('file_format_specifications'), server, keypair)
if as_file:
as_file.close() #validateFiles needs a closed file for -as, otherwise it gives a return code of -11
validated = validate_file(row, args.encvaldata, row.get('assembly'), as_file.name)
os.unlink(as_file.name)
else:
validated = validate_file(row, args.encvaldata, row.get('assembly'))
if not validated:
logger.warning('Skipping row %d: file %s failed validation' %(n,row['submitted_file_name']))
continue
json_payload = process_row(row)
if not json_payload:
logger.warning('Skipping row %d: invalid field format for JSON' %(n))
continue
file_object = post_file(json_payload, server, keypair, args.dryrun)
if not file_object:
logger.warning('Skipping row %d: POST file object failed' %(n))
continue
aws_return_code = upload_file(file_object, args.dryrun)
if aws_return_code:
logger.warning('Row %d: Non-zero AWS upload return code %d' %(aws_return_code))
output_row = {}
for key in output_csv.fieldnames:
output_row.update({key:file_object.get(key)})
output_row.update({'aws_return':aws_return_code})
output_csv.writerow(output_row)
if __name__ == '__main__':
| main() | conditional_block |
|
sub_files.py | #!/usr/bin/env python
'''Take a CSV with file metadata, POST new file objects to the ENCODE DCC, upload files to the ENCODE cloud bucket'''
import os, sys, logging, urlparse, requests, csv, StringIO, re, copy, json, subprocess, hashlib, tempfile
logger = logging.getLogger(__name__)
EPILOG = '''Notes:
Examples:
%(prog)s
'''
CSV_ARGS = {
'delimiter': ',',
'quotechar': '"',
'quoting': csv.QUOTE_MINIMAL,
'dialect': 'excel'
}
GET_HEADERS = {'accept': 'application/json'}
POST_HEADERS = {'accept': 'application/json', 'content-type': 'application/json'}
def | ():
import argparse
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help='CSV file metadata to POST', nargs='?', type=argparse.FileType('rU'), default=sys.stdin)
parser.add_argument('--outfile', help='CSV output report', type=argparse.FileType(mode='wb',bufsize=0), default=sys.stdout)
parser.add_argument('--debug', help="Print debug messages", default=False, action='store_true')
parser.add_argument('--server', help="The server to POST to.", default=os.getenv('ENCODE_SERVER',None))
parser.add_argument('--authid', help="The authorization key ID for the server.", default=os.getenv('ENCODE_AUTHID',None))
parser.add_argument('--authpw', help="The authorization key for the server.", default=os.getenv('ENCODE_AUTHPW',None))
parser.add_argument('--dryrun', help="Don't POST to the database, just validate input.", default=False, action='store_true')
parser.add_argument('--encvaldata', help="Directory in which https://github.com/ENCODE-DCC/encValData.git is cloned.", default=os.path.expanduser("~/encValData/"))
args = parser.parse_args()
if args.debug:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else: #use the defaulf logging level
logging.basicConfig(format='%(levelname)s:%(message)s')
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if not args.server:
logger.error('Server name must be specified on the command line or in environment ENCODE_SERVER')
sys.exit(1)
if not args.authid or not args.authpw:
logger.error('Authorization keypair must be specified on the command line or in environment ENCODE_AUTHID, ENCODE_AUTHPW')
sys.exit(1)
if not os.path.isdir(args.encvaldata):
logger.error('No ENCODE validation data. git clone https://github.com/ENCODE-DCC/encValData.git')
sys.exit(1)
return args
def md5(path):
md5sum = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(1024*1024), b''):
md5sum.update(chunk)
return md5sum.hexdigest()
# This does not depend on hashlib
# if 'md5_command' not in globals():
# global md5_command
# if subprocess.check_output('which md5', shell=True):
# md5_command = 'md5 -q'
# elif subprocess.check_output('which md5sum', shell=True):
# md5_command = 'md5sum'
# else:
# md5_command = ''
# if not md5_command:
# logger.error("No MD5 command found (tried md5 and md5sum)")
# return None
# else:
# try:
# md5_output = subprocess.check_output(' '.join([md5_command, fn]), shell=True)
# except:
# return None
# else:
# return md5_output.partition(' ')[0].rstrip()
def test_encode_keys(server,keypair):
test_URI = "ENCBS000AAA"
url = urlparse.urljoin(server,test_URI)
r = requests.get(url, auth=keypair, headers=GET_HEADERS)
try:
r.raise_for_status()
except:
logger.debug('test_encode_keys got response %s' %(r.text))
return False
else:
return True
def input_csv(fh):
csv_args = CSV_ARGS
input_fieldnames = csv.reader(fh, **csv_args).next()
return csv.DictReader(fh, fieldnames=input_fieldnames, **csv_args)
def output_csv(fh,fieldnames):
csv_args = CSV_ARGS
additional_fields = ['accession','aws_return']
output_fieldnames = [fn for fn in fieldnames if fn] + additional_fields
output = csv.DictWriter(fh, fieldnames=output_fieldnames, **csv_args)
return output
def init_csvs(in_fh,out_fh):
input_reader = input_csv(in_fh)
output_writer = output_csv(out_fh,input_reader.fieldnames)
return input_reader, output_writer
def validate_file(f_obj, encValData, assembly=None, as_path=None):
path = f_obj.get('submitted_file_name')
file_format = f_obj.get('file_format')
file_format_type = f_obj.get('file_format_type')
output_type = f_obj.get('output_type')
gzip_types = [
"CEL",
"bam",
"bed",
"csfasta",
"csqual",
"fasta",
"fastq",
"gff",
"gtf",
"tar",
"sam",
"wig"
]
magic_number = open(path, 'rb').read(2)
is_gzipped = magic_number == b'\x1f\x8b'
if file_format in gzip_types:
if not is_gzipped:
logger.warning('%s: Expect %s format to be gzipped' %(path,file_format))
else:
if is_gzipped:
logger.warning('%s: Expect %s format to be un-gzipped' %(path,file_format))
if assembly:
chromInfo = '-chromInfo=%s/%s/chrom.sizes' % (encValData, assembly)
else:
chromInfo = None
if as_path:
as_file = '-as=%s' %(as_path)
else:
as_file = None
validate_map = {
('fasta', None): ['-type=fasta'],
('fastq', None): ['-type=fastq'],
('bam', None): ['-type=bam', chromInfo],
('bigWig', None): ['-type=bigWig', chromInfo],
('bed', 'bed3'): ['-type=bed3', chromInfo],
('bigBed', 'bed3'): ['-type=bed3', chromInfo],
('bed', 'bed3+'): ['-type=bed3+', chromInfo],
('bigBed', 'bed3+'): ['-type=bed3+', chromInfo],
('bed', 'bed6'): ['-type=bed6+', chromInfo],
('bigBed', 'bed6'): ['-type=bigBed6+', chromInfo],
('bed', 'bedLogR'): ['-type=bed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bigBed', 'bedLogR'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bed', 'bedMethyl'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bigBed', 'bedMethyl'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bed', 'broadPeak'): ['-type=bed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bigBed', 'broadPeak'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bed', 'gappedPeak'): ['-type=bed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bigBed', 'gappedPeak'): ['-type=bigBed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bed', 'narrowPeak'): ['-type=bed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bigBed', 'narrowPeak'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bigBed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bed', 'bedExonScore'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bigBed', 'bedExonScore'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bed', 'bedRrbs'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bigBed', 'bedRrbs'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bed', 'enhancerAssay'): ['-type=bed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bigBed', 'enhancerAssay'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bed', 'modPepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bigBed', 'modPepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bed', 'pepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bigBed', 'pepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bed', 'openChromCombinedPeaks'): ['-type=bed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bigBed', 'openChromCombinedPeaks'): ['-type=bigBed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bed', 'peptideMapping'): ['-type=bed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bigBed', 'peptideMapping'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bed', 'shortFrags'): ['-type=bed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('bigBed', 'shortFrags'): ['-type=bigBed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('rcc', None): ['-type=rcc'],
('idat', None): ['-type=idat'],
('bedpe', None): ['-type=bed3+', chromInfo],
('bedpe', 'mango'): ['-type=bed3+', chromInfo],
('gtf', None): None,
('tar', None): None,
('tsv', None): None,
('csv', None): None,
('2bit', None): None,
('csfasta', None): ['-type=csfasta'],
('csqual', None): ['-type=csqual'],
('CEL', None): None,
('sam', None): None,
('wig', None): None,
('hdf5', None): None,
('gff', None): None
}
validate_args = validate_map.get((file_format, file_format_type))
if validate_args is None:
logger.warning('No rules to validate file_format %s and file_format_type %s' %(file_format, file_format_type))
return False
if (file_format, file_format_type) in [('bed', 'bed3'), ('bed', 'bed3+')] and as_file: #TODO: Update file schema and change to bed3+
validate_args = ['-type=bed3+', chromInfo] #TODO: Update file schema. This is to foce bed3+ for validateFiles but pass bed3 to file_format_type
validate_args.append(as_file)
tokens = ['validateFiles'] + validate_args + [path]
logger.debug('Running: %s' %(tokens))
try:
subprocess.check_output(tokens)
except subprocess.CalledProcessError as e:
logger.error("validateFiles returned %s" %(e.output))
return False
else:
logger.debug("%s: validateFiles passed" %(path))
return True
def post_file(file_metadata, server, keypair, dryrun=False):
local_path = file_metadata.get('submitted_file_name')
if not file_metadata.get('md5sum'):
file_metadata['md5sum'] = md5(local_path)
try:
logger.debug("POST JSON: %s" %(json.dumps(file_metadata)))
except:
pass
if dryrun:
file_obj = copy.copy(file_metadata)
file_obj.update({'accession':None})
return file_obj
else:
url = urlparse.urljoin(server,'/files/')
r = requests.post(url, auth=keypair, headers=POST_HEADERS, data=json.dumps(file_metadata))
try:
r.raise_for_status()
except:
logger.warning('POST failed: %s %s' %(r.status_code, r.reason))
logger.warning(r.text)
return None
else:
return r.json()['@graph'][0]
def upload_file(file_obj, dryrun=False):
if dryrun:
return None
else:
creds = file_obj['upload_credentials']
logger.debug('AWS creds: %s' %(creds))
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
path = file_obj.get('submitted_file_name')
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
logger.error("AWS upload failed with exit code %d" %(e.returncode))
return e.returncode
else:
return 0
def get_asfile(uri_json, server, keypair):
try:
uris = json.loads(uri_json)
except:
logger.error("Could not parse as JSON: %s" %(uri_json))
return None
for uri in uris:
url = server + '/' + uri
r = requests.get(url, headers=GET_HEADERS, auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to get ENCODE object %s" %(uri))
return None
document_obj = r.json()
r = requests.get(urlparse.urljoin(server, document_obj['uuid'] + '/' + document_obj['attachment']['href']), auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to download ENCODE document %s" %(uri))
return None
f = tempfile.NamedTemporaryFile(delete=False)
f.write(r.text)
return f
def process_row(row):
json_payload = {}
for key,value in row.iteritems():
if not key:
continue
try:
json_payload.update({key:json.loads(value)})
except:
try:
json_payload.update({key:json.loads('"%s"' %(value))})
except:
logger.warning('Could not convert field %s value %s to JSON' %(n,key,value))
return None
return json_payload
def main():
args = get_args()
server = args.server
keypair = (args.authid, args.authpw)
if not test_encode_keys(server, keypair):
logger.error("Invalid ENCODE server or keys: server=%s authid=%s authpw=%s" %(args.server,args.authid,args.authpw))
sys.exit(1)
try:
subprocess.check_output('which validateFiles', shell=True)
except:
logger.error("validateFiles is not in path. See http://hgdownload.cse.ucsc.edu/admin/exe/")
sys.exit(1)
input_csv, output_csv = init_csvs(args.infile, args.outfile)
output_csv.writeheader()
for n,row in enumerate(input_csv,start=2): #row 1 is the header
as_file = get_asfile(row.get('file_format_specifications'), server, keypair)
if as_file:
as_file.close() #validateFiles needs a closed file for -as, otherwise it gives a return code of -11
validated = validate_file(row, args.encvaldata, row.get('assembly'), as_file.name)
os.unlink(as_file.name)
else:
validated = validate_file(row, args.encvaldata, row.get('assembly'))
if not validated:
logger.warning('Skipping row %d: file %s failed validation' %(n,row['submitted_file_name']))
continue
json_payload = process_row(row)
if not json_payload:
logger.warning('Skipping row %d: invalid field format for JSON' %(n))
continue
file_object = post_file(json_payload, server, keypair, args.dryrun)
if not file_object:
logger.warning('Skipping row %d: POST file object failed' %(n))
continue
aws_return_code = upload_file(file_object, args.dryrun)
if aws_return_code:
logger.warning('Row %d: Non-zero AWS upload return code %d' %(aws_return_code))
output_row = {}
for key in output_csv.fieldnames:
output_row.update({key:file_object.get(key)})
output_row.update({'aws_return':aws_return_code})
output_csv.writerow(output_row)
if __name__ == '__main__':
main()
| get_args | identifier_name |
sub_files.py | #!/usr/bin/env python
'''Take a CSV with file metadata, POST new file objects to the ENCODE DCC, upload files to the ENCODE cloud bucket'''
import os, sys, logging, urlparse, requests, csv, StringIO, re, copy, json, subprocess, hashlib, tempfile
logger = logging.getLogger(__name__)
EPILOG = '''Notes:
Examples:
%(prog)s
'''
CSV_ARGS = {
'delimiter': ',',
'quotechar': '"',
'quoting': csv.QUOTE_MINIMAL,
'dialect': 'excel'
}
GET_HEADERS = {'accept': 'application/json'}
POST_HEADERS = {'accept': 'application/json', 'content-type': 'application/json'}
def get_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help='CSV file metadata to POST', nargs='?', type=argparse.FileType('rU'), default=sys.stdin)
parser.add_argument('--outfile', help='CSV output report', type=argparse.FileType(mode='wb',bufsize=0), default=sys.stdout)
parser.add_argument('--debug', help="Print debug messages", default=False, action='store_true')
parser.add_argument('--server', help="The server to POST to.", default=os.getenv('ENCODE_SERVER',None))
parser.add_argument('--authid', help="The authorization key ID for the server.", default=os.getenv('ENCODE_AUTHID',None))
parser.add_argument('--authpw', help="The authorization key for the server.", default=os.getenv('ENCODE_AUTHPW',None))
parser.add_argument('--dryrun', help="Don't POST to the database, just validate input.", default=False, action='store_true')
parser.add_argument('--encvaldata', help="Directory in which https://github.com/ENCODE-DCC/encValData.git is cloned.", default=os.path.expanduser("~/encValData/"))
args = parser.parse_args()
if args.debug:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else: #use the defaulf logging level
logging.basicConfig(format='%(levelname)s:%(message)s')
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if not args.server:
logger.error('Server name must be specified on the command line or in environment ENCODE_SERVER')
sys.exit(1)
if not args.authid or not args.authpw:
logger.error('Authorization keypair must be specified on the command line or in environment ENCODE_AUTHID, ENCODE_AUTHPW')
sys.exit(1)
if not os.path.isdir(args.encvaldata):
logger.error('No ENCODE validation data. git clone https://github.com/ENCODE-DCC/encValData.git')
sys.exit(1)
return args
def md5(path):
md5sum = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(1024*1024), b''):
md5sum.update(chunk)
return md5sum.hexdigest()
# This does not depend on hashlib
# if 'md5_command' not in globals():
# global md5_command
# if subprocess.check_output('which md5', shell=True):
# md5_command = 'md5 -q'
# elif subprocess.check_output('which md5sum', shell=True):
# md5_command = 'md5sum'
# else:
# md5_command = ''
# if not md5_command:
# logger.error("No MD5 command found (tried md5 and md5sum)")
# return None
# else:
# try:
# md5_output = subprocess.check_output(' '.join([md5_command, fn]), shell=True)
# except:
# return None
# else:
# return md5_output.partition(' ')[0].rstrip()
def test_encode_keys(server,keypair):
|
def input_csv(fh):
csv_args = CSV_ARGS
input_fieldnames = csv.reader(fh, **csv_args).next()
return csv.DictReader(fh, fieldnames=input_fieldnames, **csv_args)
def output_csv(fh,fieldnames):
csv_args = CSV_ARGS
additional_fields = ['accession','aws_return']
output_fieldnames = [fn for fn in fieldnames if fn] + additional_fields
output = csv.DictWriter(fh, fieldnames=output_fieldnames, **csv_args)
return output
def init_csvs(in_fh,out_fh):
input_reader = input_csv(in_fh)
output_writer = output_csv(out_fh,input_reader.fieldnames)
return input_reader, output_writer
def validate_file(f_obj, encValData, assembly=None, as_path=None):
path = f_obj.get('submitted_file_name')
file_format = f_obj.get('file_format')
file_format_type = f_obj.get('file_format_type')
output_type = f_obj.get('output_type')
gzip_types = [
"CEL",
"bam",
"bed",
"csfasta",
"csqual",
"fasta",
"fastq",
"gff",
"gtf",
"tar",
"sam",
"wig"
]
magic_number = open(path, 'rb').read(2)
is_gzipped = magic_number == b'\x1f\x8b'
if file_format in gzip_types:
if not is_gzipped:
logger.warning('%s: Expect %s format to be gzipped' %(path,file_format))
else:
if is_gzipped:
logger.warning('%s: Expect %s format to be un-gzipped' %(path,file_format))
if assembly:
chromInfo = '-chromInfo=%s/%s/chrom.sizes' % (encValData, assembly)
else:
chromInfo = None
if as_path:
as_file = '-as=%s' %(as_path)
else:
as_file = None
validate_map = {
('fasta', None): ['-type=fasta'],
('fastq', None): ['-type=fastq'],
('bam', None): ['-type=bam', chromInfo],
('bigWig', None): ['-type=bigWig', chromInfo],
('bed', 'bed3'): ['-type=bed3', chromInfo],
('bigBed', 'bed3'): ['-type=bed3', chromInfo],
('bed', 'bed3+'): ['-type=bed3+', chromInfo],
('bigBed', 'bed3+'): ['-type=bed3+', chromInfo],
('bed', 'bed6'): ['-type=bed6+', chromInfo],
('bigBed', 'bed6'): ['-type=bigBed6+', chromInfo],
('bed', 'bedLogR'): ['-type=bed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bigBed', 'bedLogR'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bed', 'bedMethyl'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bigBed', 'bedMethyl'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bed', 'broadPeak'): ['-type=bed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bigBed', 'broadPeak'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bed', 'gappedPeak'): ['-type=bed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bigBed', 'gappedPeak'): ['-type=bigBed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bed', 'narrowPeak'): ['-type=bed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bigBed', 'narrowPeak'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bigBed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bed', 'bedExonScore'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bigBed', 'bedExonScore'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bed', 'bedRrbs'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bigBed', 'bedRrbs'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bed', 'enhancerAssay'): ['-type=bed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bigBed', 'enhancerAssay'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bed', 'modPepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bigBed', 'modPepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bed', 'pepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bigBed', 'pepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bed', 'openChromCombinedPeaks'): ['-type=bed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bigBed', 'openChromCombinedPeaks'): ['-type=bigBed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bed', 'peptideMapping'): ['-type=bed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bigBed', 'peptideMapping'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bed', 'shortFrags'): ['-type=bed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('bigBed', 'shortFrags'): ['-type=bigBed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('rcc', None): ['-type=rcc'],
('idat', None): ['-type=idat'],
('bedpe', None): ['-type=bed3+', chromInfo],
('bedpe', 'mango'): ['-type=bed3+', chromInfo],
('gtf', None): None,
('tar', None): None,
('tsv', None): None,
('csv', None): None,
('2bit', None): None,
('csfasta', None): ['-type=csfasta'],
('csqual', None): ['-type=csqual'],
('CEL', None): None,
('sam', None): None,
('wig', None): None,
('hdf5', None): None,
('gff', None): None
}
validate_args = validate_map.get((file_format, file_format_type))
if validate_args is None:
logger.warning('No rules to validate file_format %s and file_format_type %s' %(file_format, file_format_type))
return False
if (file_format, file_format_type) in [('bed', 'bed3'), ('bed', 'bed3+')] and as_file: #TODO: Update file schema and change to bed3+
validate_args = ['-type=bed3+', chromInfo] #TODO: Update file schema. This is to foce bed3+ for validateFiles but pass bed3 to file_format_type
validate_args.append(as_file)
tokens = ['validateFiles'] + validate_args + [path]
logger.debug('Running: %s' %(tokens))
try:
subprocess.check_output(tokens)
except subprocess.CalledProcessError as e:
logger.error("validateFiles returned %s" %(e.output))
return False
else:
logger.debug("%s: validateFiles passed" %(path))
return True
def post_file(file_metadata, server, keypair, dryrun=False):
local_path = file_metadata.get('submitted_file_name')
if not file_metadata.get('md5sum'):
file_metadata['md5sum'] = md5(local_path)
try:
logger.debug("POST JSON: %s" %(json.dumps(file_metadata)))
except:
pass
if dryrun:
file_obj = copy.copy(file_metadata)
file_obj.update({'accession':None})
return file_obj
else:
url = urlparse.urljoin(server,'/files/')
r = requests.post(url, auth=keypair, headers=POST_HEADERS, data=json.dumps(file_metadata))
try:
r.raise_for_status()
except:
logger.warning('POST failed: %s %s' %(r.status_code, r.reason))
logger.warning(r.text)
return None
else:
return r.json()['@graph'][0]
def upload_file(file_obj, dryrun=False):
if dryrun:
return None
else:
creds = file_obj['upload_credentials']
logger.debug('AWS creds: %s' %(creds))
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
path = file_obj.get('submitted_file_name')
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
logger.error("AWS upload failed with exit code %d" %(e.returncode))
return e.returncode
else:
return 0
def get_asfile(uri_json, server, keypair):
try:
uris = json.loads(uri_json)
except:
logger.error("Could not parse as JSON: %s" %(uri_json))
return None
for uri in uris:
url = server + '/' + uri
r = requests.get(url, headers=GET_HEADERS, auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to get ENCODE object %s" %(uri))
return None
document_obj = r.json()
r = requests.get(urlparse.urljoin(server, document_obj['uuid'] + '/' + document_obj['attachment']['href']), auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to download ENCODE document %s" %(uri))
return None
f = tempfile.NamedTemporaryFile(delete=False)
f.write(r.text)
return f
def process_row(row):
json_payload = {}
for key,value in row.iteritems():
if not key:
continue
try:
json_payload.update({key:json.loads(value)})
except:
try:
json_payload.update({key:json.loads('"%s"' %(value))})
except:
logger.warning('Could not convert field %s value %s to JSON' %(n,key,value))
return None
return json_payload
def main():
args = get_args()
server = args.server
keypair = (args.authid, args.authpw)
if not test_encode_keys(server, keypair):
logger.error("Invalid ENCODE server or keys: server=%s authid=%s authpw=%s" %(args.server,args.authid,args.authpw))
sys.exit(1)
try:
subprocess.check_output('which validateFiles', shell=True)
except:
logger.error("validateFiles is not in path. See http://hgdownload.cse.ucsc.edu/admin/exe/")
sys.exit(1)
input_csv, output_csv = init_csvs(args.infile, args.outfile)
output_csv.writeheader()
for n,row in enumerate(input_csv,start=2): #row 1 is the header
as_file = get_asfile(row.get('file_format_specifications'), server, keypair)
if as_file:
as_file.close() #validateFiles needs a closed file for -as, otherwise it gives a return code of -11
validated = validate_file(row, args.encvaldata, row.get('assembly'), as_file.name)
os.unlink(as_file.name)
else:
validated = validate_file(row, args.encvaldata, row.get('assembly'))
if not validated:
logger.warning('Skipping row %d: file %s failed validation' %(n,row['submitted_file_name']))
continue
json_payload = process_row(row)
if not json_payload:
logger.warning('Skipping row %d: invalid field format for JSON' %(n))
continue
file_object = post_file(json_payload, server, keypair, args.dryrun)
if not file_object:
logger.warning('Skipping row %d: POST file object failed' %(n))
continue
aws_return_code = upload_file(file_object, args.dryrun)
if aws_return_code:
logger.warning('Row %d: Non-zero AWS upload return code %d' %(aws_return_code))
output_row = {}
for key in output_csv.fieldnames:
output_row.update({key:file_object.get(key)})
output_row.update({'aws_return':aws_return_code})
output_csv.writerow(output_row)
if __name__ == '__main__':
main()
| test_URI = "ENCBS000AAA"
url = urlparse.urljoin(server,test_URI)
r = requests.get(url, auth=keypair, headers=GET_HEADERS)
try:
r.raise_for_status()
except:
logger.debug('test_encode_keys got response %s' %(r.text))
return False
else:
return True | identifier_body |
sub_files.py | #!/usr/bin/env python
'''Take a CSV with file metadata, POST new file objects to the ENCODE DCC, upload files to the ENCODE cloud bucket'''
import os, sys, logging, urlparse, requests, csv, StringIO, re, copy, json, subprocess, hashlib, tempfile
logger = logging.getLogger(__name__)
EPILOG = '''Notes:
Examples:
%(prog)s
'''
CSV_ARGS = {
'delimiter': ',',
'quotechar': '"',
'quoting': csv.QUOTE_MINIMAL,
'dialect': 'excel'
}
GET_HEADERS = {'accept': 'application/json'}
POST_HEADERS = {'accept': 'application/json', 'content-type': 'application/json'}
def get_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help='CSV file metadata to POST', nargs='?', type=argparse.FileType('rU'), default=sys.stdin)
parser.add_argument('--outfile', help='CSV output report', type=argparse.FileType(mode='wb',bufsize=0), default=sys.stdout)
parser.add_argument('--debug', help="Print debug messages", default=False, action='store_true')
parser.add_argument('--server', help="The server to POST to.", default=os.getenv('ENCODE_SERVER',None))
parser.add_argument('--authid', help="The authorization key ID for the server.", default=os.getenv('ENCODE_AUTHID',None))
parser.add_argument('--authpw', help="The authorization key for the server.", default=os.getenv('ENCODE_AUTHPW',None))
parser.add_argument('--dryrun', help="Don't POST to the database, just validate input.", default=False, action='store_true')
parser.add_argument('--encvaldata', help="Directory in which https://github.com/ENCODE-DCC/encValData.git is cloned.", default=os.path.expanduser("~/encValData/"))
args = parser.parse_args()
if args.debug:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else: #use the defaulf logging level
logging.basicConfig(format='%(levelname)s:%(message)s')
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if not args.server:
logger.error('Server name must be specified on the command line or in environment ENCODE_SERVER')
sys.exit(1)
if not args.authid or not args.authpw:
logger.error('Authorization keypair must be specified on the command line or in environment ENCODE_AUTHID, ENCODE_AUTHPW')
sys.exit(1)
if not os.path.isdir(args.encvaldata):
logger.error('No ENCODE validation data. git clone https://github.com/ENCODE-DCC/encValData.git')
sys.exit(1)
return args
def md5(path):
md5sum = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(1024*1024), b''):
md5sum.update(chunk)
return md5sum.hexdigest()
# This does not depend on hashlib
# if 'md5_command' not in globals():
# global md5_command
# if subprocess.check_output('which md5', shell=True):
# md5_command = 'md5 -q'
# elif subprocess.check_output('which md5sum', shell=True):
# md5_command = 'md5sum'
# else:
# md5_command = ''
# if not md5_command:
# logger.error("No MD5 command found (tried md5 and md5sum)")
# return None
# else:
# try:
# md5_output = subprocess.check_output(' '.join([md5_command, fn]), shell=True)
# except:
# return None
# else:
# return md5_output.partition(' ')[0].rstrip()
def test_encode_keys(server,keypair):
test_URI = "ENCBS000AAA"
url = urlparse.urljoin(server,test_URI)
r = requests.get(url, auth=keypair, headers=GET_HEADERS)
try:
r.raise_for_status()
except:
logger.debug('test_encode_keys got response %s' %(r.text))
return False
else:
return True
def input_csv(fh):
csv_args = CSV_ARGS
input_fieldnames = csv.reader(fh, **csv_args).next()
return csv.DictReader(fh, fieldnames=input_fieldnames, **csv_args)
def output_csv(fh,fieldnames):
csv_args = CSV_ARGS
additional_fields = ['accession','aws_return']
output_fieldnames = [fn for fn in fieldnames if fn] + additional_fields
output = csv.DictWriter(fh, fieldnames=output_fieldnames, **csv_args)
return output
def init_csvs(in_fh,out_fh):
input_reader = input_csv(in_fh)
output_writer = output_csv(out_fh,input_reader.fieldnames)
return input_reader, output_writer
def validate_file(f_obj, encValData, assembly=None, as_path=None):
path = f_obj.get('submitted_file_name')
file_format = f_obj.get('file_format')
file_format_type = f_obj.get('file_format_type')
output_type = f_obj.get('output_type')
gzip_types = [
"CEL",
"bam",
"bed",
"csfasta",
"csqual",
"fasta",
"fastq",
"gff",
"gtf",
"tar",
"sam",
"wig"
]
magic_number = open(path, 'rb').read(2)
is_gzipped = magic_number == b'\x1f\x8b'
if file_format in gzip_types:
if not is_gzipped:
logger.warning('%s: Expect %s format to be gzipped' %(path,file_format))
else:
if is_gzipped:
logger.warning('%s: Expect %s format to be un-gzipped' %(path,file_format))
if assembly:
chromInfo = '-chromInfo=%s/%s/chrom.sizes' % (encValData, assembly)
else:
chromInfo = None
if as_path:
as_file = '-as=%s' %(as_path)
else:
as_file = None
validate_map = {
('fasta', None): ['-type=fasta'],
('fastq', None): ['-type=fastq'],
('bam', None): ['-type=bam', chromInfo],
('bigWig', None): ['-type=bigWig', chromInfo],
('bed', 'bed3'): ['-type=bed3', chromInfo],
('bigBed', 'bed3'): ['-type=bed3', chromInfo],
('bed', 'bed3+'): ['-type=bed3+', chromInfo],
('bigBed', 'bed3+'): ['-type=bed3+', chromInfo],
('bed', 'bed6'): ['-type=bed6+', chromInfo],
('bigBed', 'bed6'): ['-type=bigBed6+', chromInfo],
('bed', 'bedLogR'): ['-type=bed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bigBed', 'bedLogR'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
('bed', 'bedMethyl'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bigBed', 'bedMethyl'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
('bed', 'broadPeak'): ['-type=bed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bigBed', 'broadPeak'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
('bed', 'gappedPeak'): ['-type=bed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bigBed', 'gappedPeak'): ['-type=bigBed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
('bed', 'narrowPeak'): ['-type=bed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bigBed', 'narrowPeak'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
('bed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bigBed', 'bedRnaElements'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
('bed', 'bedExonScore'): ['-type=bed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bigBed', 'bedExonScore'): ['-type=bigBed6+3', chromInfo, '-as=%s/as/bedExonScore.as' % encValData],
('bed', 'bedRrbs'): ['-type=bed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bigBed', 'bedRrbs'): ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedRrbs.as' % encValData],
('bed', 'enhancerAssay'): ['-type=bed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bigBed', 'enhancerAssay'): ['-type=bigBed9+1', chromInfo, '-as=%s/as/enhancerAssay.as' % encValData],
('bed', 'modPepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bigBed', 'modPepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/modPepMap.as' % encValData],
('bed', 'pepMap'): ['-type=bed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bigBed', 'pepMap'): ['-type=bigBed9+7', chromInfo, '-as=%s/as/pepMap.as' % encValData],
('bed', 'openChromCombinedPeaks'): ['-type=bed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bigBed', 'openChromCombinedPeaks'): ['-type=bigBed9+12', chromInfo, '-as=%s/as/openChromCombinedPeaks.as' % encValData],
('bed', 'peptideMapping'): ['-type=bed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bigBed', 'peptideMapping'): ['-type=bigBed6+4', chromInfo, '-as=%s/as/peptideMapping.as' % encValData],
('bed', 'shortFrags'): ['-type=bed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('bigBed', 'shortFrags'): ['-type=bigBed6+21', chromInfo, '-as=%s/as/shortFrags.as' % encValData],
('rcc', None): ['-type=rcc'],
('idat', None): ['-type=idat'],
('bedpe', None): ['-type=bed3+', chromInfo],
('bedpe', 'mango'): ['-type=bed3+', chromInfo],
('gtf', None): None,
('tar', None): None,
('tsv', None): None,
('csv', None): None,
('2bit', None): None,
('csfasta', None): ['-type=csfasta'],
('csqual', None): ['-type=csqual'],
('CEL', None): None,
('sam', None): None,
('wig', None): None,
('hdf5', None): None,
('gff', None): None
}
validate_args = validate_map.get((file_format, file_format_type))
if validate_args is None:
logger.warning('No rules to validate file_format %s and file_format_type %s' %(file_format, file_format_type))
return False
if (file_format, file_format_type) in [('bed', 'bed3'), ('bed', 'bed3+')] and as_file: #TODO: Update file schema and change to bed3+
validate_args = ['-type=bed3+', chromInfo] #TODO: Update file schema. This is to foce bed3+ for validateFiles but pass bed3 to file_format_type
validate_args.append(as_file)
tokens = ['validateFiles'] + validate_args + [path]
logger.debug('Running: %s' %(tokens))
try:
subprocess.check_output(tokens)
except subprocess.CalledProcessError as e:
logger.error("validateFiles returned %s" %(e.output))
return False
else:
logger.debug("%s: validateFiles passed" %(path))
return True
def post_file(file_metadata, server, keypair, dryrun=False):
local_path = file_metadata.get('submitted_file_name')
if not file_metadata.get('md5sum'):
file_metadata['md5sum'] = md5(local_path)
try:
logger.debug("POST JSON: %s" %(json.dumps(file_metadata)))
except:
pass
if dryrun:
file_obj = copy.copy(file_metadata)
file_obj.update({'accession':None})
return file_obj
else:
url = urlparse.urljoin(server,'/files/')
r = requests.post(url, auth=keypair, headers=POST_HEADERS, data=json.dumps(file_metadata))
try:
r.raise_for_status()
except:
logger.warning('POST failed: %s %s' %(r.status_code, r.reason))
logger.warning(r.text)
return None
else:
return r.json()['@graph'][0]
def upload_file(file_obj, dryrun=False):
if dryrun:
return None
else:
creds = file_obj['upload_credentials']
logger.debug('AWS creds: %s' %(creds))
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
path = file_obj.get('submitted_file_name')
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
logger.error("AWS upload failed with exit code %d" %(e.returncode))
return e.returncode
else:
return 0
def get_asfile(uri_json, server, keypair):
try:
uris = json.loads(uri_json)
except:
logger.error("Could not parse as JSON: %s" %(uri_json))
return None
for uri in uris:
url = server + '/' + uri
r = requests.get(url, headers=GET_HEADERS, auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to get ENCODE object %s" %(uri))
return None
document_obj = r.json()
r = requests.get(urlparse.urljoin(server, document_obj['uuid'] + '/' + document_obj['attachment']['href']), auth=keypair)
try:
r.raise_for_status()
except:
logger.error("Failed to download ENCODE document %s" %(uri))
return None
f = tempfile.NamedTemporaryFile(delete=False)
f.write(r.text)
return f
def process_row(row):
json_payload = {}
for key,value in row.iteritems():
if not key:
continue
try:
json_payload.update({key:json.loads(value)})
except: | return None
return json_payload
def main():
args = get_args()
server = args.server
keypair = (args.authid, args.authpw)
if not test_encode_keys(server, keypair):
logger.error("Invalid ENCODE server or keys: server=%s authid=%s authpw=%s" %(args.server,args.authid,args.authpw))
sys.exit(1)
try:
subprocess.check_output('which validateFiles', shell=True)
except:
logger.error("validateFiles is not in path. See http://hgdownload.cse.ucsc.edu/admin/exe/")
sys.exit(1)
input_csv, output_csv = init_csvs(args.infile, args.outfile)
output_csv.writeheader()
for n,row in enumerate(input_csv,start=2): #row 1 is the header
as_file = get_asfile(row.get('file_format_specifications'), server, keypair)
if as_file:
as_file.close() #validateFiles needs a closed file for -as, otherwise it gives a return code of -11
validated = validate_file(row, args.encvaldata, row.get('assembly'), as_file.name)
os.unlink(as_file.name)
else:
validated = validate_file(row, args.encvaldata, row.get('assembly'))
if not validated:
logger.warning('Skipping row %d: file %s failed validation' %(n,row['submitted_file_name']))
continue
json_payload = process_row(row)
if not json_payload:
logger.warning('Skipping row %d: invalid field format for JSON' %(n))
continue
file_object = post_file(json_payload, server, keypair, args.dryrun)
if not file_object:
logger.warning('Skipping row %d: POST file object failed' %(n))
continue
aws_return_code = upload_file(file_object, args.dryrun)
if aws_return_code:
logger.warning('Row %d: Non-zero AWS upload return code %d' %(aws_return_code))
output_row = {}
for key in output_csv.fieldnames:
output_row.update({key:file_object.get(key)})
output_row.update({'aws_return':aws_return_code})
output_csv.writerow(output_row)
if __name__ == '__main__':
main() | try:
json_payload.update({key:json.loads('"%s"' %(value))})
except:
logger.warning('Could not convert field %s value %s to JSON' %(n,key,value)) | random_line_split |
plugin.go | /*----------------------------------------------------------------
* Copyright (c) ThoughtWorks, Inc.
* Licensed under the Apache License, Version 2.0
* See LICENSE in the project root for license information.
*----------------------------------------------------------------*/
package plugin
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/getgauge/common"
"github.com/getgauge/gauge-proto/go/gauge_messages"
"github.com/getgauge/gauge/api/infoGatherer"
"github.com/getgauge/gauge/config"
"github.com/getgauge/gauge/conn"
"github.com/getgauge/gauge/gauge"
"github.com/getgauge/gauge/logger"
"github.com/getgauge/gauge/manifest"
"github.com/getgauge/gauge/plugin/pluginInfo"
"github.com/getgauge/gauge/version"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
type pluginScope string
const (
executionScope pluginScope = "execution"
docScope pluginScope = "documentation"
pluginConnectionPortEnv = "plugin_connection_port"
)
type plugin struct {
mutex *sync.Mutex
connection net.Conn
gRPCConn *grpc.ClientConn
ReporterClient gauge_messages.ReporterClient
DocumenterClient gauge_messages.DocumenterClient
pluginCmd *exec.Cmd
descriptor *PluginDescriptor
killTimer *time.Timer
}
func isProcessRunning(p *plugin) bool {
p.mutex.Lock()
ps := p.pluginCmd.ProcessState
p.mutex.Unlock()
return ps == nil || !ps.Exited()
}
func (p *plugin) killGrpcProcess() error {
var m *gauge_messages.Empty
var err error
if p.ReporterClient != nil {
m, err = p.ReporterClient.Kill(context.Background(), &gauge_messages.KillProcessRequest{})
} else if p.DocumenterClient != nil {
m, err = p.DocumenterClient.Kill(context.Background(), &gauge_messages.KillProcessRequest{})
}
if m == nil || err != nil {
errStatus, _ := status.FromError(err)
if errStatus.Code() == codes.Unavailable {
// Ref https://www.grpc.io/docs/guides/error/#general-errors
// GRPC_STATUS_UNAVAILABLE is thrown when Server is shutting down. Ignore it here.
return nil
}
return err
}
if p.gRPCConn == nil && p.pluginCmd == nil {
return nil
}
defer p.gRPCConn.Close()
if isProcessRunning(p) {
exited := make(chan bool, 1)
go func() {
for {
if isProcessRunning(p) {
time.Sleep(100 * time.Millisecond)
} else {
exited <- true
return
}
}
}()
select {
case done := <-exited:
if done {
logger.Debugf(true, "Runner with PID:%d has exited", p.pluginCmd.Process.Pid)
return nil
}
case <-time.After(config.PluginKillTimeout()):
logger.Warningf(true, "Killing runner with PID:%d forcefully", p.pluginCmd.Process.Pid)
return p.pluginCmd.Process.Kill()
}
}
return nil
}
func (p *plugin) kill(wg *sync.WaitGroup) error {
defer wg.Done()
if p.gRPCConn != nil && p.ReporterClient != nil {
return p.killGrpcProcess()
}
if isProcessRunning(p) {
defer p.connection.Close()
p.killTimer = time.NewTimer(config.PluginKillTimeout())
err := conn.SendProcessKillMessage(p.connection)
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
exited := make(chan bool, 1)
go func() {
for {
if isProcessRunning(p) {
time.Sleep(100 * time.Millisecond)
} else {
exited <- true
return
}
}
}()
select {
case <-exited:
if !p.killTimer.Stop() {
<-p.killTimer.C
}
logger.Debugf(true, "Plugin [%s] with pid [%d] has exited", p.descriptor.Name, p.pluginCmd.Process.Pid)
case <-p.killTimer.C:
logger.Warningf(true, "Plugin [%s] with pid [%d] did not exit after %.2f seconds. Forcefully killing it.", p.descriptor.Name, p.pluginCmd.Process.Pid, config.PluginKillTimeout().Seconds())
err := p.pluginCmd.Process.Kill()
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
return err
}
}
return nil
}
// IsPluginInstalled checks if given plugin with specific version is installed or not.
func IsPluginInstalled(pluginName, pluginVersion string) bool {
pluginsInstallDir, err := common.GetPluginsInstallDir(pluginName)
if err != nil {
return false
}
thisPluginDir := filepath.Join(pluginsInstallDir, pluginName)
if !common.DirExists(thisPluginDir) {
return false
}
if pluginVersion != "" {
return common.FileExists(filepath.Join(thisPluginDir, pluginVersion, common.PluginJSONFile))
}
return true
}
func getPluginJSONPath(pluginName, pluginVersion string) (string, error) {
if !IsPluginInstalled(pluginName, pluginVersion) {
plugin := strings.TrimSpace(fmt.Sprintf("%s %s", pluginName, pluginVersion))
return "", fmt.Errorf("Plugin %s is not installed", plugin)
}
pluginInstallDir, err := GetInstallDir(pluginName, "")
if err != nil {
return "", err
}
return filepath.Join(pluginInstallDir, common.PluginJSONFile), nil
}
// GetPluginDescriptor return the information about the plugin including name, id, commands to start etc.
func GetPluginDescriptor(pluginID, pluginVersion string) (*PluginDescriptor, error) {
pluginJSON, err := getPluginJSONPath(pluginID, pluginVersion)
if err != nil {
return nil, err
}
return GetPluginDescriptorFromJSON(pluginJSON)
}
func GetPluginDescriptorFromJSON(pluginJSON string) (*PluginDescriptor, error) {
pluginJSONContents, err := common.ReadFileContents(pluginJSON)
if err != nil {
return nil, err
}
var pd PluginDescriptor
if err = json.Unmarshal([]byte(pluginJSONContents), &pd); err != nil {
return nil, fmt.Errorf("%s: %s", pluginJSON, err.Error())
}
pd.pluginPath = filepath.Dir(pluginJSON)
return &pd, nil
}
func startPlugin(pd *PluginDescriptor, action pluginScope) (*plugin, error) {
var command []string
switch runtime.GOOS {
case "windows":
command = pd.Command.Windows
case "darwin":
command = pd.Command.Darwin
default:
command = pd.Command.Linux
}
if len(command) == 0 {
return nil, fmt.Errorf("Platform specific command not specified: %s.", runtime.GOOS)
}
if pd.hasCapability(gRPCSupportCapability) {
return startGRPCPlugin(pd, command)
}
return startLegacyPlugin(pd, command)
}
func startGRPCPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
portChan := make(chan string)
writer := &logger.LogWriter{
Stderr: logger.NewCustomWriter(portChan, os.Stderr, pd.ID, true),
Stdout: logger.NewCustomWriter(portChan, os.Stdout, pd.ID, false),
}
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
go func() {
err = cmd.Wait()
if err != nil {
logger.Errorf(true, "Error occurred while waiting for plugin process to finish.\nError : %s", err.Error())
}
}()
if err != nil {
return nil, err
}
var port string
select {
case port = <-portChan:
close(portChan)
case <-time.After(config.PluginConnectionTimeout()):
return nil, fmt.Errorf("timed out connecting to %s", pd.ID)
}
logger.Debugf(true, "Attempting to connect to grpc server at port: %s", port)
gRPCConn, err := grpc.Dial(fmt.Sprintf("%s:%s", "127.0.0.1", port),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1024*1024*1024), grpc.MaxCallRecvMsgSize(1024*1024*1024)),
grpc.WithBlock())
if err != nil {
return nil, err
}
plugin := &plugin{
pluginCmd: cmd,
descriptor: pd,
gRPCConn: gRPCConn,
mutex: &sync.Mutex{},
}
if pd.hasScope(docScope) {
plugin.DocumenterClient = gauge_messages.NewDocumenterClient(gRPCConn)
} else {
plugin.ReporterClient = gauge_messages.NewReporterClient(gRPCConn)
}
logger.Debugf(true, "Successfully made the connection with plugin with port: %s", port)
return plugin, nil
}
func startLegacyPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
writer := logger.NewLogWriter(pd.ID, true, 0)
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
if err != nil {
return nil, err
}
var mutex = &sync.Mutex{}
go func() {
pState, _ := cmd.Process.Wait()
mutex.Lock()
cmd.ProcessState = pState
mutex.Unlock()
}()
plugin := &plugin{pluginCmd: cmd, descriptor: pd, mutex: mutex}
return plugin, nil
}
func SetEnvForPlugin(action pluginScope, pd *PluginDescriptor, m *manifest.Manifest, pluginEnvVars map[string]string) error {
pluginEnvVars[fmt.Sprintf("%s_action", pd.ID)] = string(action)
pluginEnvVars["test_language"] = m.Language
return setEnvironmentProperties(pluginEnvVars)
}
func setEnvironmentProperties(properties map[string]string) error {
for k, v := range properties {
if err := common.SetEnvVariable(k, v); err != nil {
return err
}
}
return nil
}
func IsPluginAdded(m *manifest.Manifest, descriptor *PluginDescriptor) bool {
for _, pluginID := range m.Plugins {
if pluginID == descriptor.ID {
return true
}
} | }
func startPluginsForExecution(m *manifest.Manifest) (Handler, []string) {
var warnings []string
handler := &GaugePlugins{}
envProperties := make(map[string]string)
for _, pluginID := range m.Plugins {
pd, err := GetPluginDescriptor(pluginID, "")
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to start plugin %s. %s. To install, run `gauge install %s`.", pluginID, err.Error(), pluginID))
continue
}
compatibilityErr := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport)
if compatibilityErr != nil {
warnings = append(warnings, fmt.Sprintf("Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion))
continue
}
if pd.hasScope(executionScope) {
gaugeConnectionHandler, err := conn.NewGaugeConnectionHandler(0, nil)
if err != nil {
warnings = append(warnings, err.Error())
continue
}
envProperties[pluginConnectionPortEnv] = strconv.Itoa(gaugeConnectionHandler.ConnectionPortNumber())
prop, err := common.GetGaugeConfigurationFor(common.GaugePropertiesFile)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to read Gauge configuration. %s", err.Error()))
continue
}
envProperties["plugin_kill_timeout"] = prop["plugin_kill_timeout"]
err = SetEnvForPlugin(executionScope, pd, m, envProperties)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error setting environment for plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
logger.Debugf(true, "Starting %s plugin", pd.Name)
plugin, err := startPlugin(pd, executionScope)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
if plugin.gRPCConn != nil {
handler.addPlugin(pluginID, plugin)
continue
}
pluginConnection, err := gaugeConnectionHandler.AcceptConnection(config.PluginConnectionTimeout(), make(chan error))
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. Failed to connect to plugin. %s", pd.Name, pd.Version, err.Error()))
err := plugin.pluginCmd.Process.Kill()
if err != nil {
logger.Errorf(false, "unable to kill plugin %s: %s", plugin.descriptor.Name, err.Error())
}
continue
}
logger.Debugf(true, "Established connection to %s plugin", pd.Name)
plugin.connection = pluginConnection
handler.addPlugin(pluginID, plugin)
}
}
return handler, warnings
}
func GenerateDoc(pluginName string, specDirs []string, startAPIFunc func([]string) int) {
pd, err := GetPluginDescriptor(pluginName, "")
if err != nil {
logger.Fatalf(true, "Error starting plugin %s. Failed to get plugin.json. %s. To install, run `gauge install %s`.", pluginName, err.Error(), pluginName)
}
if err := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport); err != nil {
logger.Fatalf(true, "Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion)
}
if !pd.hasScope(docScope) {
logger.Fatalf(true, "Invalid plugin name: %s, this plugin cannot generate documentation.", pd.Name)
}
var sources []string
for _, src := range specDirs {
path, _ := filepath.Abs(src)
sources = append(sources, path)
}
os.Setenv("GAUGE_SPEC_DIRS", strings.Join(sources, "||"))
os.Setenv("GAUGE_PROJECT_ROOT", config.ProjectRoot)
if pd.hasCapability(gRPCSupportCapability) {
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
_, err = p.DocumenterClient.GenerateDocs(context.Background(), getSpecDetails(specDirs))
grpcErr := p.killGrpcProcess()
if grpcErr != nil {
logger.Errorf(false, "Unable to kill plugin %s : %s", p.descriptor.Name, grpcErr.Error())
}
if err != nil {
logger.Fatalf(true, "Failed to generate docs. %s", err.Error())
}
} else {
port := startAPIFunc(specDirs)
err := os.Setenv(common.APIPortEnvVariableName, strconv.Itoa(port))
if err != nil {
logger.Fatalf(true, "Failed to set env GAUGE_API_PORT. %s", err.Error())
}
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
for isProcessRunning(p) {
}
}
}
func (p *plugin) invokeService(m *gauge_messages.Message) error {
ctx := context.Background()
var err error
switch m.GetMessageType() {
case gauge_messages.Message_SuiteExecutionResult:
_, err = p.ReporterClient.NotifySuiteResult(ctx, m.GetSuiteExecutionResult())
case gauge_messages.Message_ExecutionStarting:
_, err = p.ReporterClient.NotifyExecutionStarting(ctx, m.GetExecutionStartingRequest())
case gauge_messages.Message_ExecutionEnding:
_, err = p.ReporterClient.NotifyExecutionEnding(ctx, m.GetExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionEnding:
_, err = p.ReporterClient.NotifySpecExecutionEnding(ctx, m.GetSpecExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionStarting:
_, err = p.ReporterClient.NotifySpecExecutionStarting(ctx, m.GetSpecExecutionStartingRequest())
case gauge_messages.Message_ScenarioExecutionEnding:
_, err = p.ReporterClient.NotifyScenarioExecutionEnding(ctx, m.GetScenarioExecutionEndingRequest())
case gauge_messages.Message_ScenarioExecutionStarting:
_, err = p.ReporterClient.NotifyScenarioExecutionStarting(ctx, m.GetScenarioExecutionStartingRequest())
case gauge_messages.Message_StepExecutionEnding:
_, err = p.ReporterClient.NotifyStepExecutionEnding(ctx, m.GetStepExecutionEndingRequest())
case gauge_messages.Message_StepExecutionStarting:
_, err = p.ReporterClient.NotifyStepExecutionStarting(ctx, m.GetStepExecutionStartingRequest())
}
return err
}
func (p *plugin) sendMessage(message *gauge_messages.Message) error {
if p.gRPCConn != nil {
return p.invokeService(message)
}
messageID := common.GetUniqueID()
message.MessageId = messageID
messageBytes, err := proto.Marshal(message)
if err != nil {
return err
}
err = conn.Write(p.connection, messageBytes)
if err != nil {
return fmt.Errorf("[Warning] Failed to send message to plugin: %s %s", p.descriptor.ID, err.Error())
}
return nil
}
func StartPlugins(m *manifest.Manifest) Handler {
pluginHandler, warnings := startPluginsForExecution(m)
logger.HandleWarningMessages(true, warnings)
return pluginHandler
}
func PluginsWithoutScope() (infos []pluginInfo.PluginInfo) {
if plugins, err := pluginInfo.GetAllInstalledPluginsWithVersion(); err == nil {
for _, p := range plugins {
pd, err := GetPluginDescriptor(p.Name, p.Version.String())
if err == nil && !pd.hasAnyScope() {
infos = append(infos, p)
}
}
}
return
}
// GetInstallDir returns the install directory of given plugin and a given version.
func GetInstallDir(pluginName, v string) (string, error) {
allPluginsInstallDir, err := common.GetPluginsInstallDir(pluginName)
if err != nil {
return "", err
}
pluginDir := filepath.Join(allPluginsInstallDir, pluginName)
if v != "" {
pluginDir = filepath.Join(pluginDir, v)
} else {
latestPlugin, err := pluginInfo.GetLatestInstalledPlugin(pluginDir)
if err != nil {
return "", err
}
pluginDir = latestPlugin.Path
}
return pluginDir, nil
}
func GetLanguageJSONFilePath(language string) (string, error) {
languageInstallDir, err := GetInstallDir(language, "")
if err != nil {
return "", err
}
languageJSON := filepath.Join(languageInstallDir, fmt.Sprintf("%s.json", language))
if !common.FileExists(languageJSON) {
return "", fmt.Errorf("Failed to find the implementation for: %s. %s does not exist.", language, languageJSON)
}
return languageJSON, nil
}
func IsLanguagePlugin(plugin string) bool {
if _, err := GetLanguageJSONFilePath(plugin); err != nil {
return false
}
return true
}
func QueryParams() string {
return fmt.Sprintf("?l=%s&p=%s&o=%s&a=%s", language(), plugins(), runtime.GOOS, runtime.GOARCH)
}
func language() string {
if config.ProjectRoot == "" {
return ""
}
m, err := manifest.ProjectManifest()
if err != nil {
return ""
}
return m.Language
}
func plugins() string {
pluginInfos, err := pluginInfo.GetAllInstalledPluginsWithVersion()
if err != nil {
return ""
}
var plugins []string
for _, p := range pluginInfos {
plugins = append(plugins, p.Name)
}
return strings.Join(plugins, ",")
}
func getSpecDetails(specDirs []string) *gauge_messages.SpecDetails {
sig := &infoGatherer.SpecInfoGatherer{SpecDirs: specDirs}
sig.Init()
specDetails := make([]*gauge_messages.SpecDetails_SpecDetail, 0)
for _, d := range sig.GetAvailableSpecDetails(specDirs) {
detail := &gauge_messages.SpecDetails_SpecDetail{}
if d.HasSpec() {
detail.Spec = gauge.ConvertToProtoSpec(d.Spec)
}
for _, e := range d.Errs {
detail.ParseErrors = append(detail.ParseErrors, &gauge_messages.Error{Type: gauge_messages.Error_PARSE_ERROR, Filename: e.FileName, Message: e.Message, LineNumber: int32(e.LineNo)})
}
specDetails = append(specDetails, detail)
}
return &gauge_messages.SpecDetails{
Details: specDetails,
}
} | return false | random_line_split |
plugin.go | /*----------------------------------------------------------------
* Copyright (c) ThoughtWorks, Inc.
* Licensed under the Apache License, Version 2.0
* See LICENSE in the project root for license information.
*----------------------------------------------------------------*/
package plugin
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/getgauge/common"
"github.com/getgauge/gauge-proto/go/gauge_messages"
"github.com/getgauge/gauge/api/infoGatherer"
"github.com/getgauge/gauge/config"
"github.com/getgauge/gauge/conn"
"github.com/getgauge/gauge/gauge"
"github.com/getgauge/gauge/logger"
"github.com/getgauge/gauge/manifest"
"github.com/getgauge/gauge/plugin/pluginInfo"
"github.com/getgauge/gauge/version"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
type pluginScope string
const (
executionScope pluginScope = "execution"
docScope pluginScope = "documentation"
pluginConnectionPortEnv = "plugin_connection_port"
)
type plugin struct {
mutex *sync.Mutex
connection net.Conn
gRPCConn *grpc.ClientConn
ReporterClient gauge_messages.ReporterClient
DocumenterClient gauge_messages.DocumenterClient
pluginCmd *exec.Cmd
descriptor *PluginDescriptor
killTimer *time.Timer
}
func isProcessRunning(p *plugin) bool {
p.mutex.Lock()
ps := p.pluginCmd.ProcessState
p.mutex.Unlock()
return ps == nil || !ps.Exited()
}
func (p *plugin) killGrpcProcess() error {
var m *gauge_messages.Empty
var err error
if p.ReporterClient != nil {
m, err = p.ReporterClient.Kill(context.Background(), &gauge_messages.KillProcessRequest{})
} else if p.DocumenterClient != nil {
m, err = p.DocumenterClient.Kill(context.Background(), &gauge_messages.KillProcessRequest{})
}
if m == nil || err != nil {
errStatus, _ := status.FromError(err)
if errStatus.Code() == codes.Unavailable {
// Ref https://www.grpc.io/docs/guides/error/#general-errors
// GRPC_STATUS_UNAVAILABLE is thrown when Server is shutting down. Ignore it here.
return nil
}
return err
}
if p.gRPCConn == nil && p.pluginCmd == nil {
return nil
}
defer p.gRPCConn.Close()
if isProcessRunning(p) {
exited := make(chan bool, 1)
go func() {
for {
if isProcessRunning(p) {
time.Sleep(100 * time.Millisecond)
} else {
exited <- true
return
}
}
}()
select {
case done := <-exited:
if done {
logger.Debugf(true, "Runner with PID:%d has exited", p.pluginCmd.Process.Pid)
return nil
}
case <-time.After(config.PluginKillTimeout()):
logger.Warningf(true, "Killing runner with PID:%d forcefully", p.pluginCmd.Process.Pid)
return p.pluginCmd.Process.Kill()
}
}
return nil
}
func (p *plugin) kill(wg *sync.WaitGroup) error {
defer wg.Done()
if p.gRPCConn != nil && p.ReporterClient != nil {
return p.killGrpcProcess()
}
if isProcessRunning(p) {
defer p.connection.Close()
p.killTimer = time.NewTimer(config.PluginKillTimeout())
err := conn.SendProcessKillMessage(p.connection)
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
exited := make(chan bool, 1)
go func() {
for {
if isProcessRunning(p) {
time.Sleep(100 * time.Millisecond)
} else {
exited <- true
return
}
}
}()
select {
case <-exited:
if !p.killTimer.Stop() {
<-p.killTimer.C
}
logger.Debugf(true, "Plugin [%s] with pid [%d] has exited", p.descriptor.Name, p.pluginCmd.Process.Pid)
case <-p.killTimer.C:
logger.Warningf(true, "Plugin [%s] with pid [%d] did not exit after %.2f seconds. Forcefully killing it.", p.descriptor.Name, p.pluginCmd.Process.Pid, config.PluginKillTimeout().Seconds())
err := p.pluginCmd.Process.Kill()
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
return err
}
}
return nil
}
// IsPluginInstalled checks if given plugin with specific version is installed or not.
func IsPluginInstalled(pluginName, pluginVersion string) bool {
pluginsInstallDir, err := common.GetPluginsInstallDir(pluginName)
if err != nil {
return false
}
thisPluginDir := filepath.Join(pluginsInstallDir, pluginName)
if !common.DirExists(thisPluginDir) {
return false
}
if pluginVersion != "" {
return common.FileExists(filepath.Join(thisPluginDir, pluginVersion, common.PluginJSONFile))
}
return true
}
func getPluginJSONPath(pluginName, pluginVersion string) (string, error) {
if !IsPluginInstalled(pluginName, pluginVersion) {
plugin := strings.TrimSpace(fmt.Sprintf("%s %s", pluginName, pluginVersion))
return "", fmt.Errorf("Plugin %s is not installed", plugin)
}
pluginInstallDir, err := GetInstallDir(pluginName, "")
if err != nil {
return "", err
}
return filepath.Join(pluginInstallDir, common.PluginJSONFile), nil
}
// GetPluginDescriptor return the information about the plugin including name, id, commands to start etc.
func GetPluginDescriptor(pluginID, pluginVersion string) (*PluginDescriptor, error) {
pluginJSON, err := getPluginJSONPath(pluginID, pluginVersion)
if err != nil {
return nil, err
}
return GetPluginDescriptorFromJSON(pluginJSON)
}
func GetPluginDescriptorFromJSON(pluginJSON string) (*PluginDescriptor, error) {
pluginJSONContents, err := common.ReadFileContents(pluginJSON)
if err != nil {
return nil, err
}
var pd PluginDescriptor
if err = json.Unmarshal([]byte(pluginJSONContents), &pd); err != nil {
return nil, fmt.Errorf("%s: %s", pluginJSON, err.Error())
}
pd.pluginPath = filepath.Dir(pluginJSON)
return &pd, nil
}
func startPlugin(pd *PluginDescriptor, action pluginScope) (*plugin, error) {
var command []string
switch runtime.GOOS {
case "windows":
command = pd.Command.Windows
case "darwin":
command = pd.Command.Darwin
default:
command = pd.Command.Linux
}
if len(command) == 0 {
return nil, fmt.Errorf("Platform specific command not specified: %s.", runtime.GOOS)
}
if pd.hasCapability(gRPCSupportCapability) {
return startGRPCPlugin(pd, command)
}
return startLegacyPlugin(pd, command)
}
func startGRPCPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
portChan := make(chan string)
writer := &logger.LogWriter{
Stderr: logger.NewCustomWriter(portChan, os.Stderr, pd.ID, true),
Stdout: logger.NewCustomWriter(portChan, os.Stdout, pd.ID, false),
}
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
go func() {
err = cmd.Wait()
if err != nil {
logger.Errorf(true, "Error occurred while waiting for plugin process to finish.\nError : %s", err.Error())
}
}()
if err != nil {
return nil, err
}
var port string
select {
case port = <-portChan:
close(portChan)
case <-time.After(config.PluginConnectionTimeout()):
return nil, fmt.Errorf("timed out connecting to %s", pd.ID)
}
logger.Debugf(true, "Attempting to connect to grpc server at port: %s", port)
gRPCConn, err := grpc.Dial(fmt.Sprintf("%s:%s", "127.0.0.1", port),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1024*1024*1024), grpc.MaxCallRecvMsgSize(1024*1024*1024)),
grpc.WithBlock())
if err != nil {
return nil, err
}
plugin := &plugin{
pluginCmd: cmd,
descriptor: pd,
gRPCConn: gRPCConn,
mutex: &sync.Mutex{},
}
if pd.hasScope(docScope) {
plugin.DocumenterClient = gauge_messages.NewDocumenterClient(gRPCConn)
} else {
plugin.ReporterClient = gauge_messages.NewReporterClient(gRPCConn)
}
logger.Debugf(true, "Successfully made the connection with plugin with port: %s", port)
return plugin, nil
}
func startLegacyPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
writer := logger.NewLogWriter(pd.ID, true, 0)
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
if err != nil {
return nil, err
}
var mutex = &sync.Mutex{}
go func() {
pState, _ := cmd.Process.Wait()
mutex.Lock()
cmd.ProcessState = pState
mutex.Unlock()
}()
plugin := &plugin{pluginCmd: cmd, descriptor: pd, mutex: mutex}
return plugin, nil
}
func SetEnvForPlugin(action pluginScope, pd *PluginDescriptor, m *manifest.Manifest, pluginEnvVars map[string]string) error {
pluginEnvVars[fmt.Sprintf("%s_action", pd.ID)] = string(action)
pluginEnvVars["test_language"] = m.Language
return setEnvironmentProperties(pluginEnvVars)
}
func setEnvironmentProperties(properties map[string]string) error {
for k, v := range properties {
if err := common.SetEnvVariable(k, v); err != nil {
return err
}
}
return nil
}
func IsPluginAdded(m *manifest.Manifest, descriptor *PluginDescriptor) bool {
for _, pluginID := range m.Plugins {
if pluginID == descriptor.ID {
return true
}
}
return false
}
func startPluginsForExecution(m *manifest.Manifest) (Handler, []string) {
var warnings []string
handler := &GaugePlugins{}
envProperties := make(map[string]string)
for _, pluginID := range m.Plugins {
pd, err := GetPluginDescriptor(pluginID, "")
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to start plugin %s. %s. To install, run `gauge install %s`.", pluginID, err.Error(), pluginID))
continue
}
compatibilityErr := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport)
if compatibilityErr != nil {
warnings = append(warnings, fmt.Sprintf("Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion))
continue
}
if pd.hasScope(executionScope) {
gaugeConnectionHandler, err := conn.NewGaugeConnectionHandler(0, nil)
if err != nil {
warnings = append(warnings, err.Error())
continue
}
envProperties[pluginConnectionPortEnv] = strconv.Itoa(gaugeConnectionHandler.ConnectionPortNumber())
prop, err := common.GetGaugeConfigurationFor(common.GaugePropertiesFile)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to read Gauge configuration. %s", err.Error()))
continue
}
envProperties["plugin_kill_timeout"] = prop["plugin_kill_timeout"]
err = SetEnvForPlugin(executionScope, pd, m, envProperties)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error setting environment for plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
logger.Debugf(true, "Starting %s plugin", pd.Name)
plugin, err := startPlugin(pd, executionScope)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
if plugin.gRPCConn != nil {
handler.addPlugin(pluginID, plugin)
continue
}
pluginConnection, err := gaugeConnectionHandler.AcceptConnection(config.PluginConnectionTimeout(), make(chan error))
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. Failed to connect to plugin. %s", pd.Name, pd.Version, err.Error()))
err := plugin.pluginCmd.Process.Kill()
if err != nil {
logger.Errorf(false, "unable to kill plugin %s: %s", plugin.descriptor.Name, err.Error())
}
continue
}
logger.Debugf(true, "Established connection to %s plugin", pd.Name)
plugin.connection = pluginConnection
handler.addPlugin(pluginID, plugin)
}
}
return handler, warnings
}
func GenerateDoc(pluginName string, specDirs []string, startAPIFunc func([]string) int) {
pd, err := GetPluginDescriptor(pluginName, "")
if err != nil {
logger.Fatalf(true, "Error starting plugin %s. Failed to get plugin.json. %s. To install, run `gauge install %s`.", pluginName, err.Error(), pluginName)
}
if err := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport); err != nil {
logger.Fatalf(true, "Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion)
}
if !pd.hasScope(docScope) {
logger.Fatalf(true, "Invalid plugin name: %s, this plugin cannot generate documentation.", pd.Name)
}
var sources []string
for _, src := range specDirs {
path, _ := filepath.Abs(src)
sources = append(sources, path)
}
os.Setenv("GAUGE_SPEC_DIRS", strings.Join(sources, "||"))
os.Setenv("GAUGE_PROJECT_ROOT", config.ProjectRoot)
if pd.hasCapability(gRPCSupportCapability) {
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
_, err = p.DocumenterClient.GenerateDocs(context.Background(), getSpecDetails(specDirs))
grpcErr := p.killGrpcProcess()
if grpcErr != nil {
logger.Errorf(false, "Unable to kill plugin %s : %s", p.descriptor.Name, grpcErr.Error())
}
if err != nil {
logger.Fatalf(true, "Failed to generate docs. %s", err.Error())
}
} else {
port := startAPIFunc(specDirs)
err := os.Setenv(common.APIPortEnvVariableName, strconv.Itoa(port))
if err != nil {
logger.Fatalf(true, "Failed to set env GAUGE_API_PORT. %s", err.Error())
}
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
for isProcessRunning(p) {
}
}
}
func (p *plugin) invokeService(m *gauge_messages.Message) error {
ctx := context.Background()
var err error
switch m.GetMessageType() {
case gauge_messages.Message_SuiteExecutionResult:
_, err = p.ReporterClient.NotifySuiteResult(ctx, m.GetSuiteExecutionResult())
case gauge_messages.Message_ExecutionStarting:
_, err = p.ReporterClient.NotifyExecutionStarting(ctx, m.GetExecutionStartingRequest())
case gauge_messages.Message_ExecutionEnding:
_, err = p.ReporterClient.NotifyExecutionEnding(ctx, m.GetExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionEnding:
_, err = p.ReporterClient.NotifySpecExecutionEnding(ctx, m.GetSpecExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionStarting:
_, err = p.ReporterClient.NotifySpecExecutionStarting(ctx, m.GetSpecExecutionStartingRequest())
case gauge_messages.Message_ScenarioExecutionEnding:
_, err = p.ReporterClient.NotifyScenarioExecutionEnding(ctx, m.GetScenarioExecutionEndingRequest())
case gauge_messages.Message_ScenarioExecutionStarting:
_, err = p.ReporterClient.NotifyScenarioExecutionStarting(ctx, m.GetScenarioExecutionStartingRequest())
case gauge_messages.Message_StepExecutionEnding:
_, err = p.ReporterClient.NotifyStepExecutionEnding(ctx, m.GetStepExecutionEndingRequest())
case gauge_messages.Message_StepExecutionStarting:
_, err = p.ReporterClient.NotifyStepExecutionStarting(ctx, m.GetStepExecutionStartingRequest())
}
return err
}
func (p *plugin) sendMessage(message *gauge_messages.Message) error |
func StartPlugins(m *manifest.Manifest) Handler {
pluginHandler, warnings := startPluginsForExecution(m)
logger.HandleWarningMessages(true, warnings)
return pluginHandler
}
func PluginsWithoutScope() (infos []pluginInfo.PluginInfo) {
if plugins, err := pluginInfo.GetAllInstalledPluginsWithVersion(); err == nil {
for _, p := range plugins {
pd, err := GetPluginDescriptor(p.Name, p.Version.String())
if err == nil && !pd.hasAnyScope() {
infos = append(infos, p)
}
}
}
return
}
// GetInstallDir returns the install directory of given plugin and a given version.
func GetInstallDir(pluginName, v string) (string, error) {
allPluginsInstallDir, err := common.GetPluginsInstallDir(pluginName)
if err != nil {
return "", err
}
pluginDir := filepath.Join(allPluginsInstallDir, pluginName)
if v != "" {
pluginDir = filepath.Join(pluginDir, v)
} else {
latestPlugin, err := pluginInfo.GetLatestInstalledPlugin(pluginDir)
if err != nil {
return "", err
}
pluginDir = latestPlugin.Path
}
return pluginDir, nil
}
func GetLanguageJSONFilePath(language string) (string, error) {
languageInstallDir, err := GetInstallDir(language, "")
if err != nil {
return "", err
}
languageJSON := filepath.Join(languageInstallDir, fmt.Sprintf("%s.json", language))
if !common.FileExists(languageJSON) {
return "", fmt.Errorf("Failed to find the implementation for: %s. %s does not exist.", language, languageJSON)
}
return languageJSON, nil
}
func IsLanguagePlugin(plugin string) bool {
if _, err := GetLanguageJSONFilePath(plugin); err != nil {
return false
}
return true
}
func QueryParams() string {
return fmt.Sprintf("?l=%s&p=%s&o=%s&a=%s", language(), plugins(), runtime.GOOS, runtime.GOARCH)
}
func language() string {
if config.ProjectRoot == "" {
return ""
}
m, err := manifest.ProjectManifest()
if err != nil {
return ""
}
return m.Language
}
func plugins() string {
pluginInfos, err := pluginInfo.GetAllInstalledPluginsWithVersion()
if err != nil {
return ""
}
var plugins []string
for _, p := range pluginInfos {
plugins = append(plugins, p.Name)
}
return strings.Join(plugins, ",")
}
func getSpecDetails(specDirs []string) *gauge_messages.SpecDetails {
sig := &infoGatherer.SpecInfoGatherer{SpecDirs: specDirs}
sig.Init()
specDetails := make([]*gauge_messages.SpecDetails_SpecDetail, 0)
for _, d := range sig.GetAvailableSpecDetails(specDirs) {
detail := &gauge_messages.SpecDetails_SpecDetail{}
if d.HasSpec() {
detail.Spec = gauge.ConvertToProtoSpec(d.Spec)
}
for _, e := range d.Errs {
detail.ParseErrors = append(detail.ParseErrors, &gauge_messages.Error{Type: gauge_messages.Error_PARSE_ERROR, Filename: e.FileName, Message: e.Message, LineNumber: int32(e.LineNo)})
}
specDetails = append(specDetails, detail)
}
return &gauge_messages.SpecDetails{
Details: specDetails,
}
}
| {
if p.gRPCConn != nil {
return p.invokeService(message)
}
messageID := common.GetUniqueID()
message.MessageId = messageID
messageBytes, err := proto.Marshal(message)
if err != nil {
return err
}
err = conn.Write(p.connection, messageBytes)
if err != nil {
return fmt.Errorf("[Warning] Failed to send message to plugin: %s %s", p.descriptor.ID, err.Error())
}
return nil
} | identifier_body |
plugin.go | /*----------------------------------------------------------------
* Copyright (c) ThoughtWorks, Inc.
* Licensed under the Apache License, Version 2.0
* See LICENSE in the project root for license information.
*----------------------------------------------------------------*/
package plugin
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/getgauge/common"
"github.com/getgauge/gauge-proto/go/gauge_messages"
"github.com/getgauge/gauge/api/infoGatherer"
"github.com/getgauge/gauge/config"
"github.com/getgauge/gauge/conn"
"github.com/getgauge/gauge/gauge"
"github.com/getgauge/gauge/logger"
"github.com/getgauge/gauge/manifest"
"github.com/getgauge/gauge/plugin/pluginInfo"
"github.com/getgauge/gauge/version"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
type pluginScope string
const (
executionScope pluginScope = "execution"
docScope pluginScope = "documentation"
pluginConnectionPortEnv = "plugin_connection_port"
)
type plugin struct {
mutex *sync.Mutex
connection net.Conn
gRPCConn *grpc.ClientConn
ReporterClient gauge_messages.ReporterClient
DocumenterClient gauge_messages.DocumenterClient
pluginCmd *exec.Cmd
descriptor *PluginDescriptor
killTimer *time.Timer
}
func isProcessRunning(p *plugin) bool {
p.mutex.Lock()
ps := p.pluginCmd.ProcessState
p.mutex.Unlock()
return ps == nil || !ps.Exited()
}
func (p *plugin) killGrpcProcess() error {
var m *gauge_messages.Empty
var err error
if p.ReporterClient != nil {
m, err = p.ReporterClient.Kill(context.Background(), &gauge_messages.KillProcessRequest{})
} else if p.DocumenterClient != nil {
m, err = p.DocumenterClient.Kill(context.Background(), &gauge_messages.KillProcessRequest{})
}
if m == nil || err != nil {
errStatus, _ := status.FromError(err)
if errStatus.Code() == codes.Unavailable {
// Ref https://www.grpc.io/docs/guides/error/#general-errors
// GRPC_STATUS_UNAVAILABLE is thrown when Server is shutting down. Ignore it here.
return nil
}
return err
}
if p.gRPCConn == nil && p.pluginCmd == nil {
return nil
}
defer p.gRPCConn.Close()
if isProcessRunning(p) {
exited := make(chan bool, 1)
go func() {
for {
if isProcessRunning(p) {
time.Sleep(100 * time.Millisecond)
} else {
exited <- true
return
}
}
}()
select {
case done := <-exited:
if done {
logger.Debugf(true, "Runner with PID:%d has exited", p.pluginCmd.Process.Pid)
return nil
}
case <-time.After(config.PluginKillTimeout()):
logger.Warningf(true, "Killing runner with PID:%d forcefully", p.pluginCmd.Process.Pid)
return p.pluginCmd.Process.Kill()
}
}
return nil
}
func (p *plugin) kill(wg *sync.WaitGroup) error {
defer wg.Done()
if p.gRPCConn != nil && p.ReporterClient != nil {
return p.killGrpcProcess()
}
if isProcessRunning(p) {
defer p.connection.Close()
p.killTimer = time.NewTimer(config.PluginKillTimeout())
err := conn.SendProcessKillMessage(p.connection)
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
exited := make(chan bool, 1)
go func() {
for {
if isProcessRunning(p) {
time.Sleep(100 * time.Millisecond)
} else {
exited <- true
return
}
}
}()
select {
case <-exited:
if !p.killTimer.Stop() {
<-p.killTimer.C
}
logger.Debugf(true, "Plugin [%s] with pid [%d] has exited", p.descriptor.Name, p.pluginCmd.Process.Pid)
case <-p.killTimer.C:
logger.Warningf(true, "Plugin [%s] with pid [%d] did not exit after %.2f seconds. Forcefully killing it.", p.descriptor.Name, p.pluginCmd.Process.Pid, config.PluginKillTimeout().Seconds())
err := p.pluginCmd.Process.Kill()
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
return err
}
}
return nil
}
// IsPluginInstalled checks if given plugin with specific version is installed or not.
func IsPluginInstalled(pluginName, pluginVersion string) bool {
pluginsInstallDir, err := common.GetPluginsInstallDir(pluginName)
if err != nil {
return false
}
thisPluginDir := filepath.Join(pluginsInstallDir, pluginName)
if !common.DirExists(thisPluginDir) {
return false
}
if pluginVersion != "" {
return common.FileExists(filepath.Join(thisPluginDir, pluginVersion, common.PluginJSONFile))
}
return true
}
func getPluginJSONPath(pluginName, pluginVersion string) (string, error) {
if !IsPluginInstalled(pluginName, pluginVersion) {
plugin := strings.TrimSpace(fmt.Sprintf("%s %s", pluginName, pluginVersion))
return "", fmt.Errorf("Plugin %s is not installed", plugin)
}
pluginInstallDir, err := GetInstallDir(pluginName, "")
if err != nil {
return "", err
}
return filepath.Join(pluginInstallDir, common.PluginJSONFile), nil
}
// GetPluginDescriptor return the information about the plugin including name, id, commands to start etc.
func GetPluginDescriptor(pluginID, pluginVersion string) (*PluginDescriptor, error) {
pluginJSON, err := getPluginJSONPath(pluginID, pluginVersion)
if err != nil {
return nil, err
}
return GetPluginDescriptorFromJSON(pluginJSON)
}
func GetPluginDescriptorFromJSON(pluginJSON string) (*PluginDescriptor, error) {
pluginJSONContents, err := common.ReadFileContents(pluginJSON)
if err != nil {
return nil, err
}
var pd PluginDescriptor
if err = json.Unmarshal([]byte(pluginJSONContents), &pd); err != nil {
return nil, fmt.Errorf("%s: %s", pluginJSON, err.Error())
}
pd.pluginPath = filepath.Dir(pluginJSON)
return &pd, nil
}
func startPlugin(pd *PluginDescriptor, action pluginScope) (*plugin, error) {
var command []string
switch runtime.GOOS {
case "windows":
command = pd.Command.Windows
case "darwin":
command = pd.Command.Darwin
default:
command = pd.Command.Linux
}
if len(command) == 0 {
return nil, fmt.Errorf("Platform specific command not specified: %s.", runtime.GOOS)
}
if pd.hasCapability(gRPCSupportCapability) {
return startGRPCPlugin(pd, command)
}
return startLegacyPlugin(pd, command)
}
func startGRPCPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
portChan := make(chan string)
writer := &logger.LogWriter{
Stderr: logger.NewCustomWriter(portChan, os.Stderr, pd.ID, true),
Stdout: logger.NewCustomWriter(portChan, os.Stdout, pd.ID, false),
}
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
go func() {
err = cmd.Wait()
if err != nil {
logger.Errorf(true, "Error occurred while waiting for plugin process to finish.\nError : %s", err.Error())
}
}()
if err != nil {
return nil, err
}
var port string
select {
case port = <-portChan:
close(portChan)
case <-time.After(config.PluginConnectionTimeout()):
return nil, fmt.Errorf("timed out connecting to %s", pd.ID)
}
logger.Debugf(true, "Attempting to connect to grpc server at port: %s", port)
gRPCConn, err := grpc.Dial(fmt.Sprintf("%s:%s", "127.0.0.1", port),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1024*1024*1024), grpc.MaxCallRecvMsgSize(1024*1024*1024)),
grpc.WithBlock())
if err != nil {
return nil, err
}
plugin := &plugin{
pluginCmd: cmd,
descriptor: pd,
gRPCConn: gRPCConn,
mutex: &sync.Mutex{},
}
if pd.hasScope(docScope) {
plugin.DocumenterClient = gauge_messages.NewDocumenterClient(gRPCConn)
} else {
plugin.ReporterClient = gauge_messages.NewReporterClient(gRPCConn)
}
logger.Debugf(true, "Successfully made the connection with plugin with port: %s", port)
return plugin, nil
}
func startLegacyPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
writer := logger.NewLogWriter(pd.ID, true, 0)
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
if err != nil {
return nil, err
}
var mutex = &sync.Mutex{}
go func() {
pState, _ := cmd.Process.Wait()
mutex.Lock()
cmd.ProcessState = pState
mutex.Unlock()
}()
plugin := &plugin{pluginCmd: cmd, descriptor: pd, mutex: mutex}
return plugin, nil
}
func SetEnvForPlugin(action pluginScope, pd *PluginDescriptor, m *manifest.Manifest, pluginEnvVars map[string]string) error {
pluginEnvVars[fmt.Sprintf("%s_action", pd.ID)] = string(action)
pluginEnvVars["test_language"] = m.Language
return setEnvironmentProperties(pluginEnvVars)
}
func setEnvironmentProperties(properties map[string]string) error {
for k, v := range properties {
if err := common.SetEnvVariable(k, v); err != nil {
return err
}
}
return nil
}
func IsPluginAdded(m *manifest.Manifest, descriptor *PluginDescriptor) bool {
for _, pluginID := range m.Plugins {
if pluginID == descriptor.ID {
return true
}
}
return false
}
func startPluginsForExecution(m *manifest.Manifest) (Handler, []string) {
var warnings []string
handler := &GaugePlugins{}
envProperties := make(map[string]string)
for _, pluginID := range m.Plugins {
pd, err := GetPluginDescriptor(pluginID, "")
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to start plugin %s. %s. To install, run `gauge install %s`.", pluginID, err.Error(), pluginID))
continue
}
compatibilityErr := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport)
if compatibilityErr != nil {
warnings = append(warnings, fmt.Sprintf("Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion))
continue
}
if pd.hasScope(executionScope) {
gaugeConnectionHandler, err := conn.NewGaugeConnectionHandler(0, nil)
if err != nil {
warnings = append(warnings, err.Error())
continue
}
envProperties[pluginConnectionPortEnv] = strconv.Itoa(gaugeConnectionHandler.ConnectionPortNumber())
prop, err := common.GetGaugeConfigurationFor(common.GaugePropertiesFile)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to read Gauge configuration. %s", err.Error()))
continue
}
envProperties["plugin_kill_timeout"] = prop["plugin_kill_timeout"]
err = SetEnvForPlugin(executionScope, pd, m, envProperties)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error setting environment for plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
logger.Debugf(true, "Starting %s plugin", pd.Name)
plugin, err := startPlugin(pd, executionScope)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
if plugin.gRPCConn != nil {
handler.addPlugin(pluginID, plugin)
continue
}
pluginConnection, err := gaugeConnectionHandler.AcceptConnection(config.PluginConnectionTimeout(), make(chan error))
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. Failed to connect to plugin. %s", pd.Name, pd.Version, err.Error()))
err := plugin.pluginCmd.Process.Kill()
if err != nil {
logger.Errorf(false, "unable to kill plugin %s: %s", plugin.descriptor.Name, err.Error())
}
continue
}
logger.Debugf(true, "Established connection to %s plugin", pd.Name)
plugin.connection = pluginConnection
handler.addPlugin(pluginID, plugin)
}
}
return handler, warnings
}
func GenerateDoc(pluginName string, specDirs []string, startAPIFunc func([]string) int) {
pd, err := GetPluginDescriptor(pluginName, "")
if err != nil {
logger.Fatalf(true, "Error starting plugin %s. Failed to get plugin.json. %s. To install, run `gauge install %s`.", pluginName, err.Error(), pluginName)
}
if err := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport); err != nil {
logger.Fatalf(true, "Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion)
}
if !pd.hasScope(docScope) {
logger.Fatalf(true, "Invalid plugin name: %s, this plugin cannot generate documentation.", pd.Name)
}
var sources []string
for _, src := range specDirs {
path, _ := filepath.Abs(src)
sources = append(sources, path)
}
os.Setenv("GAUGE_SPEC_DIRS", strings.Join(sources, "||"))
os.Setenv("GAUGE_PROJECT_ROOT", config.ProjectRoot)
if pd.hasCapability(gRPCSupportCapability) {
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
_, err = p.DocumenterClient.GenerateDocs(context.Background(), getSpecDetails(specDirs))
grpcErr := p.killGrpcProcess()
if grpcErr != nil {
logger.Errorf(false, "Unable to kill plugin %s : %s", p.descriptor.Name, grpcErr.Error())
}
if err != nil {
logger.Fatalf(true, "Failed to generate docs. %s", err.Error())
}
} else {
port := startAPIFunc(specDirs)
err := os.Setenv(common.APIPortEnvVariableName, strconv.Itoa(port))
if err != nil {
logger.Fatalf(true, "Failed to set env GAUGE_API_PORT. %s", err.Error())
}
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
for isProcessRunning(p) {
}
}
}
func (p *plugin) invokeService(m *gauge_messages.Message) error {
ctx := context.Background()
var err error
switch m.GetMessageType() {
case gauge_messages.Message_SuiteExecutionResult:
_, err = p.ReporterClient.NotifySuiteResult(ctx, m.GetSuiteExecutionResult())
case gauge_messages.Message_ExecutionStarting:
_, err = p.ReporterClient.NotifyExecutionStarting(ctx, m.GetExecutionStartingRequest())
case gauge_messages.Message_ExecutionEnding:
_, err = p.ReporterClient.NotifyExecutionEnding(ctx, m.GetExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionEnding:
_, err = p.ReporterClient.NotifySpecExecutionEnding(ctx, m.GetSpecExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionStarting:
_, err = p.ReporterClient.NotifySpecExecutionStarting(ctx, m.GetSpecExecutionStartingRequest())
case gauge_messages.Message_ScenarioExecutionEnding:
_, err = p.ReporterClient.NotifyScenarioExecutionEnding(ctx, m.GetScenarioExecutionEndingRequest())
case gauge_messages.Message_ScenarioExecutionStarting:
_, err = p.ReporterClient.NotifyScenarioExecutionStarting(ctx, m.GetScenarioExecutionStartingRequest())
case gauge_messages.Message_StepExecutionEnding:
_, err = p.ReporterClient.NotifyStepExecutionEnding(ctx, m.GetStepExecutionEndingRequest())
case gauge_messages.Message_StepExecutionStarting:
_, err = p.ReporterClient.NotifyStepExecutionStarting(ctx, m.GetStepExecutionStartingRequest())
}
return err
}
func (p *plugin) sendMessage(message *gauge_messages.Message) error {
if p.gRPCConn != nil {
return p.invokeService(message)
}
messageID := common.GetUniqueID()
message.MessageId = messageID
messageBytes, err := proto.Marshal(message)
if err != nil {
return err
}
err = conn.Write(p.connection, messageBytes)
if err != nil {
return fmt.Errorf("[Warning] Failed to send message to plugin: %s %s", p.descriptor.ID, err.Error())
}
return nil
}
func StartPlugins(m *manifest.Manifest) Handler {
pluginHandler, warnings := startPluginsForExecution(m)
logger.HandleWarningMessages(true, warnings)
return pluginHandler
}
func PluginsWithoutScope() (infos []pluginInfo.PluginInfo) {
if plugins, err := pluginInfo.GetAllInstalledPluginsWithVersion(); err == nil {
for _, p := range plugins {
pd, err := GetPluginDescriptor(p.Name, p.Version.String())
if err == nil && !pd.hasAnyScope() |
}
}
return
}
// GetInstallDir returns the install directory of given plugin and a given version.
func GetInstallDir(pluginName, v string) (string, error) {
allPluginsInstallDir, err := common.GetPluginsInstallDir(pluginName)
if err != nil {
return "", err
}
pluginDir := filepath.Join(allPluginsInstallDir, pluginName)
if v != "" {
pluginDir = filepath.Join(pluginDir, v)
} else {
latestPlugin, err := pluginInfo.GetLatestInstalledPlugin(pluginDir)
if err != nil {
return "", err
}
pluginDir = latestPlugin.Path
}
return pluginDir, nil
}
func GetLanguageJSONFilePath(language string) (string, error) {
languageInstallDir, err := GetInstallDir(language, "")
if err != nil {
return "", err
}
languageJSON := filepath.Join(languageInstallDir, fmt.Sprintf("%s.json", language))
if !common.FileExists(languageJSON) {
return "", fmt.Errorf("Failed to find the implementation for: %s. %s does not exist.", language, languageJSON)
}
return languageJSON, nil
}
func IsLanguagePlugin(plugin string) bool {
if _, err := GetLanguageJSONFilePath(plugin); err != nil {
return false
}
return true
}
func QueryParams() string {
return fmt.Sprintf("?l=%s&p=%s&o=%s&a=%s", language(), plugins(), runtime.GOOS, runtime.GOARCH)
}
func language() string {
if config.ProjectRoot == "" {
return ""
}
m, err := manifest.ProjectManifest()
if err != nil {
return ""
}
return m.Language
}
func plugins() string {
pluginInfos, err := pluginInfo.GetAllInstalledPluginsWithVersion()
if err != nil {
return ""
}
var plugins []string
for _, p := range pluginInfos {
plugins = append(plugins, p.Name)
}
return strings.Join(plugins, ",")
}
func getSpecDetails(specDirs []string) *gauge_messages.SpecDetails {
sig := &infoGatherer.SpecInfoGatherer{SpecDirs: specDirs}
sig.Init()
specDetails := make([]*gauge_messages.SpecDetails_SpecDetail, 0)
for _, d := range sig.GetAvailableSpecDetails(specDirs) {
detail := &gauge_messages.SpecDetails_SpecDetail{}
if d.HasSpec() {
detail.Spec = gauge.ConvertToProtoSpec(d.Spec)
}
for _, e := range d.Errs {
detail.ParseErrors = append(detail.ParseErrors, &gauge_messages.Error{Type: gauge_messages.Error_PARSE_ERROR, Filename: e.FileName, Message: e.Message, LineNumber: int32(e.LineNo)})
}
specDetails = append(specDetails, detail)
}
return &gauge_messages.SpecDetails{
Details: specDetails,
}
}
| {
infos = append(infos, p)
} | conditional_block |
plugin.go | /*----------------------------------------------------------------
* Copyright (c) ThoughtWorks, Inc.
* Licensed under the Apache License, Version 2.0
* See LICENSE in the project root for license information.
*----------------------------------------------------------------*/
package plugin
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/getgauge/common"
"github.com/getgauge/gauge-proto/go/gauge_messages"
"github.com/getgauge/gauge/api/infoGatherer"
"github.com/getgauge/gauge/config"
"github.com/getgauge/gauge/conn"
"github.com/getgauge/gauge/gauge"
"github.com/getgauge/gauge/logger"
"github.com/getgauge/gauge/manifest"
"github.com/getgauge/gauge/plugin/pluginInfo"
"github.com/getgauge/gauge/version"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
type pluginScope string
const (
executionScope pluginScope = "execution"
docScope pluginScope = "documentation"
pluginConnectionPortEnv = "plugin_connection_port"
)
type plugin struct {
mutex *sync.Mutex
connection net.Conn
gRPCConn *grpc.ClientConn
ReporterClient gauge_messages.ReporterClient
DocumenterClient gauge_messages.DocumenterClient
pluginCmd *exec.Cmd
descriptor *PluginDescriptor
killTimer *time.Timer
}
func isProcessRunning(p *plugin) bool {
p.mutex.Lock()
ps := p.pluginCmd.ProcessState
p.mutex.Unlock()
return ps == nil || !ps.Exited()
}
func (p *plugin) killGrpcProcess() error {
var m *gauge_messages.Empty
var err error
if p.ReporterClient != nil {
m, err = p.ReporterClient.Kill(context.Background(), &gauge_messages.KillProcessRequest{})
} else if p.DocumenterClient != nil {
m, err = p.DocumenterClient.Kill(context.Background(), &gauge_messages.KillProcessRequest{})
}
if m == nil || err != nil {
errStatus, _ := status.FromError(err)
if errStatus.Code() == codes.Unavailable {
// Ref https://www.grpc.io/docs/guides/error/#general-errors
// GRPC_STATUS_UNAVAILABLE is thrown when Server is shutting down. Ignore it here.
return nil
}
return err
}
if p.gRPCConn == nil && p.pluginCmd == nil {
return nil
}
defer p.gRPCConn.Close()
if isProcessRunning(p) {
exited := make(chan bool, 1)
go func() {
for {
if isProcessRunning(p) {
time.Sleep(100 * time.Millisecond)
} else {
exited <- true
return
}
}
}()
select {
case done := <-exited:
if done {
logger.Debugf(true, "Runner with PID:%d has exited", p.pluginCmd.Process.Pid)
return nil
}
case <-time.After(config.PluginKillTimeout()):
logger.Warningf(true, "Killing runner with PID:%d forcefully", p.pluginCmd.Process.Pid)
return p.pluginCmd.Process.Kill()
}
}
return nil
}
func (p *plugin) kill(wg *sync.WaitGroup) error {
defer wg.Done()
if p.gRPCConn != nil && p.ReporterClient != nil {
return p.killGrpcProcess()
}
if isProcessRunning(p) {
defer p.connection.Close()
p.killTimer = time.NewTimer(config.PluginKillTimeout())
err := conn.SendProcessKillMessage(p.connection)
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
exited := make(chan bool, 1)
go func() {
for {
if isProcessRunning(p) {
time.Sleep(100 * time.Millisecond)
} else {
exited <- true
return
}
}
}()
select {
case <-exited:
if !p.killTimer.Stop() {
<-p.killTimer.C
}
logger.Debugf(true, "Plugin [%s] with pid [%d] has exited", p.descriptor.Name, p.pluginCmd.Process.Pid)
case <-p.killTimer.C:
logger.Warningf(true, "Plugin [%s] with pid [%d] did not exit after %.2f seconds. Forcefully killing it.", p.descriptor.Name, p.pluginCmd.Process.Pid, config.PluginKillTimeout().Seconds())
err := p.pluginCmd.Process.Kill()
if err != nil {
logger.Warningf(true, "Error while killing plugin %s : %s ", p.descriptor.Name, err.Error())
}
return err
}
}
return nil
}
// IsPluginInstalled checks if given plugin with specific version is installed or not.
func IsPluginInstalled(pluginName, pluginVersion string) bool {
pluginsInstallDir, err := common.GetPluginsInstallDir(pluginName)
if err != nil {
return false
}
thisPluginDir := filepath.Join(pluginsInstallDir, pluginName)
if !common.DirExists(thisPluginDir) {
return false
}
if pluginVersion != "" {
return common.FileExists(filepath.Join(thisPluginDir, pluginVersion, common.PluginJSONFile))
}
return true
}
func getPluginJSONPath(pluginName, pluginVersion string) (string, error) {
if !IsPluginInstalled(pluginName, pluginVersion) {
plugin := strings.TrimSpace(fmt.Sprintf("%s %s", pluginName, pluginVersion))
return "", fmt.Errorf("Plugin %s is not installed", plugin)
}
pluginInstallDir, err := GetInstallDir(pluginName, "")
if err != nil {
return "", err
}
return filepath.Join(pluginInstallDir, common.PluginJSONFile), nil
}
// GetPluginDescriptor return the information about the plugin including name, id, commands to start etc.
func GetPluginDescriptor(pluginID, pluginVersion string) (*PluginDescriptor, error) {
pluginJSON, err := getPluginJSONPath(pluginID, pluginVersion)
if err != nil {
return nil, err
}
return GetPluginDescriptorFromJSON(pluginJSON)
}
func GetPluginDescriptorFromJSON(pluginJSON string) (*PluginDescriptor, error) {
pluginJSONContents, err := common.ReadFileContents(pluginJSON)
if err != nil {
return nil, err
}
var pd PluginDescriptor
if err = json.Unmarshal([]byte(pluginJSONContents), &pd); err != nil {
return nil, fmt.Errorf("%s: %s", pluginJSON, err.Error())
}
pd.pluginPath = filepath.Dir(pluginJSON)
return &pd, nil
}
func startPlugin(pd *PluginDescriptor, action pluginScope) (*plugin, error) {
var command []string
switch runtime.GOOS {
case "windows":
command = pd.Command.Windows
case "darwin":
command = pd.Command.Darwin
default:
command = pd.Command.Linux
}
if len(command) == 0 {
return nil, fmt.Errorf("Platform specific command not specified: %s.", runtime.GOOS)
}
if pd.hasCapability(gRPCSupportCapability) {
return startGRPCPlugin(pd, command)
}
return startLegacyPlugin(pd, command)
}
func startGRPCPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
portChan := make(chan string)
writer := &logger.LogWriter{
Stderr: logger.NewCustomWriter(portChan, os.Stderr, pd.ID, true),
Stdout: logger.NewCustomWriter(portChan, os.Stdout, pd.ID, false),
}
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
go func() {
err = cmd.Wait()
if err != nil {
logger.Errorf(true, "Error occurred while waiting for plugin process to finish.\nError : %s", err.Error())
}
}()
if err != nil {
return nil, err
}
var port string
select {
case port = <-portChan:
close(portChan)
case <-time.After(config.PluginConnectionTimeout()):
return nil, fmt.Errorf("timed out connecting to %s", pd.ID)
}
logger.Debugf(true, "Attempting to connect to grpc server at port: %s", port)
gRPCConn, err := grpc.Dial(fmt.Sprintf("%s:%s", "127.0.0.1", port),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(1024*1024*1024), grpc.MaxCallRecvMsgSize(1024*1024*1024)),
grpc.WithBlock())
if err != nil {
return nil, err
}
plugin := &plugin{
pluginCmd: cmd,
descriptor: pd,
gRPCConn: gRPCConn,
mutex: &sync.Mutex{},
}
if pd.hasScope(docScope) {
plugin.DocumenterClient = gauge_messages.NewDocumenterClient(gRPCConn)
} else {
plugin.ReporterClient = gauge_messages.NewReporterClient(gRPCConn)
}
logger.Debugf(true, "Successfully made the connection with plugin with port: %s", port)
return plugin, nil
}
func startLegacyPlugin(pd *PluginDescriptor, command []string) (*plugin, error) {
writer := logger.NewLogWriter(pd.ID, true, 0)
cmd, err := common.ExecuteCommand(command, pd.pluginPath, writer.Stdout, writer.Stderr)
if err != nil {
return nil, err
}
var mutex = &sync.Mutex{}
go func() {
pState, _ := cmd.Process.Wait()
mutex.Lock()
cmd.ProcessState = pState
mutex.Unlock()
}()
plugin := &plugin{pluginCmd: cmd, descriptor: pd, mutex: mutex}
return plugin, nil
}
func SetEnvForPlugin(action pluginScope, pd *PluginDescriptor, m *manifest.Manifest, pluginEnvVars map[string]string) error {
pluginEnvVars[fmt.Sprintf("%s_action", pd.ID)] = string(action)
pluginEnvVars["test_language"] = m.Language
return setEnvironmentProperties(pluginEnvVars)
}
func setEnvironmentProperties(properties map[string]string) error {
for k, v := range properties {
if err := common.SetEnvVariable(k, v); err != nil {
return err
}
}
return nil
}
func IsPluginAdded(m *manifest.Manifest, descriptor *PluginDescriptor) bool {
for _, pluginID := range m.Plugins {
if pluginID == descriptor.ID {
return true
}
}
return false
}
func | (m *manifest.Manifest) (Handler, []string) {
var warnings []string
handler := &GaugePlugins{}
envProperties := make(map[string]string)
for _, pluginID := range m.Plugins {
pd, err := GetPluginDescriptor(pluginID, "")
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to start plugin %s. %s. To install, run `gauge install %s`.", pluginID, err.Error(), pluginID))
continue
}
compatibilityErr := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport)
if compatibilityErr != nil {
warnings = append(warnings, fmt.Sprintf("Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion))
continue
}
if pd.hasScope(executionScope) {
gaugeConnectionHandler, err := conn.NewGaugeConnectionHandler(0, nil)
if err != nil {
warnings = append(warnings, err.Error())
continue
}
envProperties[pluginConnectionPortEnv] = strconv.Itoa(gaugeConnectionHandler.ConnectionPortNumber())
prop, err := common.GetGaugeConfigurationFor(common.GaugePropertiesFile)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Unable to read Gauge configuration. %s", err.Error()))
continue
}
envProperties["plugin_kill_timeout"] = prop["plugin_kill_timeout"]
err = SetEnvForPlugin(executionScope, pd, m, envProperties)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error setting environment for plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
logger.Debugf(true, "Starting %s plugin", pd.Name)
plugin, err := startPlugin(pd, executionScope)
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. %s", pd.Name, pd.Version, err.Error()))
continue
}
if plugin.gRPCConn != nil {
handler.addPlugin(pluginID, plugin)
continue
}
pluginConnection, err := gaugeConnectionHandler.AcceptConnection(config.PluginConnectionTimeout(), make(chan error))
if err != nil {
warnings = append(warnings, fmt.Sprintf("Error starting plugin %s %s. Failed to connect to plugin. %s", pd.Name, pd.Version, err.Error()))
err := plugin.pluginCmd.Process.Kill()
if err != nil {
logger.Errorf(false, "unable to kill plugin %s: %s", plugin.descriptor.Name, err.Error())
}
continue
}
logger.Debugf(true, "Established connection to %s plugin", pd.Name)
plugin.connection = pluginConnection
handler.addPlugin(pluginID, plugin)
}
}
return handler, warnings
}
func GenerateDoc(pluginName string, specDirs []string, startAPIFunc func([]string) int) {
pd, err := GetPluginDescriptor(pluginName, "")
if err != nil {
logger.Fatalf(true, "Error starting plugin %s. Failed to get plugin.json. %s. To install, run `gauge install %s`.", pluginName, err.Error(), pluginName)
}
if err := version.CheckCompatibility(version.CurrentGaugeVersion, &pd.GaugeVersionSupport); err != nil {
logger.Fatalf(true, "Compatible %s plugin version to current Gauge version %s not found", pd.Name, version.CurrentGaugeVersion)
}
if !pd.hasScope(docScope) {
logger.Fatalf(true, "Invalid plugin name: %s, this plugin cannot generate documentation.", pd.Name)
}
var sources []string
for _, src := range specDirs {
path, _ := filepath.Abs(src)
sources = append(sources, path)
}
os.Setenv("GAUGE_SPEC_DIRS", strings.Join(sources, "||"))
os.Setenv("GAUGE_PROJECT_ROOT", config.ProjectRoot)
if pd.hasCapability(gRPCSupportCapability) {
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
_, err = p.DocumenterClient.GenerateDocs(context.Background(), getSpecDetails(specDirs))
grpcErr := p.killGrpcProcess()
if grpcErr != nil {
logger.Errorf(false, "Unable to kill plugin %s : %s", p.descriptor.Name, grpcErr.Error())
}
if err != nil {
logger.Fatalf(true, "Failed to generate docs. %s", err.Error())
}
} else {
port := startAPIFunc(specDirs)
err := os.Setenv(common.APIPortEnvVariableName, strconv.Itoa(port))
if err != nil {
logger.Fatalf(true, "Failed to set env GAUGE_API_PORT. %s", err.Error())
}
p, err := startPlugin(pd, docScope)
if err != nil {
logger.Fatalf(true, " %s %s. %s", pd.Name, pd.Version, err.Error())
}
for isProcessRunning(p) {
}
}
}
func (p *plugin) invokeService(m *gauge_messages.Message) error {
ctx := context.Background()
var err error
switch m.GetMessageType() {
case gauge_messages.Message_SuiteExecutionResult:
_, err = p.ReporterClient.NotifySuiteResult(ctx, m.GetSuiteExecutionResult())
case gauge_messages.Message_ExecutionStarting:
_, err = p.ReporterClient.NotifyExecutionStarting(ctx, m.GetExecutionStartingRequest())
case gauge_messages.Message_ExecutionEnding:
_, err = p.ReporterClient.NotifyExecutionEnding(ctx, m.GetExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionEnding:
_, err = p.ReporterClient.NotifySpecExecutionEnding(ctx, m.GetSpecExecutionEndingRequest())
case gauge_messages.Message_SpecExecutionStarting:
_, err = p.ReporterClient.NotifySpecExecutionStarting(ctx, m.GetSpecExecutionStartingRequest())
case gauge_messages.Message_ScenarioExecutionEnding:
_, err = p.ReporterClient.NotifyScenarioExecutionEnding(ctx, m.GetScenarioExecutionEndingRequest())
case gauge_messages.Message_ScenarioExecutionStarting:
_, err = p.ReporterClient.NotifyScenarioExecutionStarting(ctx, m.GetScenarioExecutionStartingRequest())
case gauge_messages.Message_StepExecutionEnding:
_, err = p.ReporterClient.NotifyStepExecutionEnding(ctx, m.GetStepExecutionEndingRequest())
case gauge_messages.Message_StepExecutionStarting:
_, err = p.ReporterClient.NotifyStepExecutionStarting(ctx, m.GetStepExecutionStartingRequest())
}
return err
}
func (p *plugin) sendMessage(message *gauge_messages.Message) error {
if p.gRPCConn != nil {
return p.invokeService(message)
}
messageID := common.GetUniqueID()
message.MessageId = messageID
messageBytes, err := proto.Marshal(message)
if err != nil {
return err
}
err = conn.Write(p.connection, messageBytes)
if err != nil {
return fmt.Errorf("[Warning] Failed to send message to plugin: %s %s", p.descriptor.ID, err.Error())
}
return nil
}
func StartPlugins(m *manifest.Manifest) Handler {
pluginHandler, warnings := startPluginsForExecution(m)
logger.HandleWarningMessages(true, warnings)
return pluginHandler
}
func PluginsWithoutScope() (infos []pluginInfo.PluginInfo) {
if plugins, err := pluginInfo.GetAllInstalledPluginsWithVersion(); err == nil {
for _, p := range plugins {
pd, err := GetPluginDescriptor(p.Name, p.Version.String())
if err == nil && !pd.hasAnyScope() {
infos = append(infos, p)
}
}
}
return
}
// GetInstallDir returns the install directory of given plugin and a given version.
func GetInstallDir(pluginName, v string) (string, error) {
allPluginsInstallDir, err := common.GetPluginsInstallDir(pluginName)
if err != nil {
return "", err
}
pluginDir := filepath.Join(allPluginsInstallDir, pluginName)
if v != "" {
pluginDir = filepath.Join(pluginDir, v)
} else {
latestPlugin, err := pluginInfo.GetLatestInstalledPlugin(pluginDir)
if err != nil {
return "", err
}
pluginDir = latestPlugin.Path
}
return pluginDir, nil
}
func GetLanguageJSONFilePath(language string) (string, error) {
languageInstallDir, err := GetInstallDir(language, "")
if err != nil {
return "", err
}
languageJSON := filepath.Join(languageInstallDir, fmt.Sprintf("%s.json", language))
if !common.FileExists(languageJSON) {
return "", fmt.Errorf("Failed to find the implementation for: %s. %s does not exist.", language, languageJSON)
}
return languageJSON, nil
}
func IsLanguagePlugin(plugin string) bool {
if _, err := GetLanguageJSONFilePath(plugin); err != nil {
return false
}
return true
}
func QueryParams() string {
return fmt.Sprintf("?l=%s&p=%s&o=%s&a=%s", language(), plugins(), runtime.GOOS, runtime.GOARCH)
}
func language() string {
if config.ProjectRoot == "" {
return ""
}
m, err := manifest.ProjectManifest()
if err != nil {
return ""
}
return m.Language
}
func plugins() string {
pluginInfos, err := pluginInfo.GetAllInstalledPluginsWithVersion()
if err != nil {
return ""
}
var plugins []string
for _, p := range pluginInfos {
plugins = append(plugins, p.Name)
}
return strings.Join(plugins, ",")
}
func getSpecDetails(specDirs []string) *gauge_messages.SpecDetails {
sig := &infoGatherer.SpecInfoGatherer{SpecDirs: specDirs}
sig.Init()
specDetails := make([]*gauge_messages.SpecDetails_SpecDetail, 0)
for _, d := range sig.GetAvailableSpecDetails(specDirs) {
detail := &gauge_messages.SpecDetails_SpecDetail{}
if d.HasSpec() {
detail.Spec = gauge.ConvertToProtoSpec(d.Spec)
}
for _, e := range d.Errs {
detail.ParseErrors = append(detail.ParseErrors, &gauge_messages.Error{Type: gauge_messages.Error_PARSE_ERROR, Filename: e.FileName, Message: e.Message, LineNumber: int32(e.LineNo)})
}
specDetails = append(specDetails, detail)
}
return &gauge_messages.SpecDetails{
Details: specDetails,
}
}
| startPluginsForExecution | identifier_name |
hkdf.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate alloc;
pub use crate::prelude::*;
use crate::CryptoProvider;
use alloc::vec;
use alloc::vec::Vec;
use core::iter;
use core::marker::PhantomData;
use crypto_provider::hkdf::Hkdf;
use hex_literal::hex;
use rstest_reuse::template;
/// Generates the test cases to validate the hkdf implementation.
/// For example, to test `MyCryptoProvider`:
///
/// ```
/// mod tests {
/// use std::marker::PhantomData;
/// use crypto_provider::testing::CryptoProviderTestCase;
/// #[apply(hkdf_test_cases)]
/// fn hkdf_tests(testcase: CryptoProviderTestCase<MyCryptoProvider>){
/// testcase(PhantomData::<MyCryptoProvider>);
/// }
/// }
/// ```
#[template]
#[export]
#[rstest]
#[case::basic_test_hkdf(basic_test_hkdf)]
#[case::test_rfc5869_sha256(test_rfc5869_sha256)]
#[case::test_lengths(test_lengths)]
#[case::test_max_length(test_max_length)]
#[case::test_max_length_exceeded(test_max_length_exceeded)]
#[case::test_unsupported_length(test_unsupported_length)]
#[case::test_expand_multi_info(test_expand_multi_info)]
#[case::run_hkdf_sha256_vectors(run_hkdf_sha256_vectors)]
#[case::run_hkdf_sha512_vectors(run_hkdf_sha512_vectors)]
fn hkdf_test_cases<C: CryptoProvider>(#[case] testcase: CryptoProviderTestCase<C>) {}
const MAX_SHA256_LENGTH: usize = 255 * (256 / 8); // =8160
///
pub struct Test<'a> {
ikm: &'a [u8],
salt: &'a [u8],
info: &'a [u8],
okm: &'a [u8],
}
/// data taken from sample code in Readme of crates.io page
pub fn basic_test_hkdf<C: CryptoProvider>(_: PhantomData<C>) {
let ikm = hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b");
let salt = hex!("000102030405060708090a0b0c");
let info = hex!("f0f1f2f3f4f5f6f7f8f9");
let hk = C::HkdfSha256::new(Some(&salt[..]), &ikm);
let mut okm = [0u8; 42];
hk.expand(&info, &mut okm)
.expect("42 is a valid length for Sha256 to output");
let expected = hex!(
"
3cb25f25faacd57a90434f64d0362f2a
2d2d0a90cf1a5a4c5db02d56ecc4c5bf
34007208d5b887185865
"
);
assert_eq!(okm, expected);
}
// Test Vectors from https://tools.ietf.org/html/rfc5869.
#[rustfmt::skip]
///
pub fn test_rfc5869_sha256<C: CryptoProvider>(_: PhantomData<C>) {
let tests = [
Test {
// Test Case 1
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!("000102030405060708090a0b0c"),
info: &hex!("f0f1f2f3f4f5f6f7f8f9"),
okm: &hex!("
3cb25f25faacd57a90434f64d0362f2a
2d2d0a90cf1a5a4c5db02d56ecc4c5bf | 34007208d5b887185865
"),
},
Test {
// Test Case 2
ikm: &hex!("
000102030405060708090a0b0c0d0e0f
101112131415161718191a1b1c1d1e1f
202122232425262728292a2b2c2d2e2f
303132333435363738393a3b3c3d3e3f
404142434445464748494a4b4c4d4e4f
"),
salt: &hex!("
606162636465666768696a6b6c6d6e6f
707172737475767778797a7b7c7d7e7f
808182838485868788898a8b8c8d8e8f
909192939495969798999a9b9c9d9e9f
a0a1a2a3a4a5a6a7a8a9aaabacadaeaf
"),
info: &hex!("
b0b1b2b3b4b5b6b7b8b9babbbcbdbebf
c0c1c2c3c4c5c6c7c8c9cacbcccdcecf
d0d1d2d3d4d5d6d7d8d9dadbdcdddedf
e0e1e2e3e4e5e6e7e8e9eaebecedeeef
f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
"),
okm: &hex!("
b11e398dc80327a1c8e7f78c596a4934
4f012eda2d4efad8a050cc4c19afa97c
59045a99cac7827271cb41c65e590e09
da3275600c2f09b8367793a9aca3db71
cc30c58179ec3e87c14c01d5c1f3434f
1d87
"),
},
Test {
// Test Case 3
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!(""),
info: &hex!(""),
okm: &hex!("
8da4e775a563c18f715f802a063c5a31
b8a11f5c5ee1879ec3454e5f3c738d2d
9d201395faa4b61a96c8
"),
},
];
for Test { ikm, salt, info, okm } in tests.iter() {
let salt = if salt.is_empty() {
None
} else {
Some(&salt[..])
};
let hkdf = C::HkdfSha256::new(salt, ikm);
let mut okm2 = vec![0u8; okm.len()];
assert!(hkdf.expand(&info[..], &mut okm2).is_ok());
assert_eq!(okm2[..], okm[..]);
}
}
///
pub fn test_lengths<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(None, &[]);
let mut longest = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut longest).is_ok());
// Runtime is O(length), so exhaustively testing all legal lengths
// would take too long (at least without --release). Only test a
// subset: the first 500, the last 10, and every 100th in between.
// 0 is an invalid key length for openssl, so start at 1
let lengths = (1..MAX_SHA256_LENGTH + 1)
.filter(|&len| !(500..=MAX_SHA256_LENGTH - 10).contains(&len) || len % 100 == 0);
for length in lengths {
let mut okm = vec![0u8; length];
assert!(hkdf.expand(&[], &mut okm).is_ok());
assert_eq!(okm.len(), length);
assert_eq!(okm[..], longest[..length]);
}
}
///
pub fn test_max_length<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut okm).is_ok());
}
///
pub fn test_max_length_exceeded<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH + 1];
assert!(hkdf.expand(&[], &mut okm).is_err());
}
///
pub fn test_unsupported_length<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; 90000];
assert!(hkdf.expand(&[], &mut okm).is_err());
}
///
pub fn test_expand_multi_info<C: CryptoProvider>(_: PhantomData<C>) {
let info_components = &[
&b"09090909090909090909090909090909090909090909"[..],
&b"8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a"[..],
&b"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0"[..],
&b"4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4"[..],
&b"1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d"[..],
];
let hkdf = C::HkdfSha256::new(None, b"some ikm here");
// Compute HKDF-Expand on the concatenation of all the info components
let mut oneshot_res = [0u8; 16];
hkdf.expand(&info_components.concat(), &mut oneshot_res)
.unwrap();
// Now iteratively join the components of info_components until it's all 1 component. The value
// of HKDF-Expand should be the same throughout
let mut num_concatted = 0;
let mut info_head = Vec::new();
while num_concatted < info_components.len() {
info_head.extend(info_components[num_concatted]);
// Build the new input to be the info head followed by the remaining components
let input: Vec<&[u8]> = iter::once(info_head.as_slice())
.chain(info_components.iter().cloned().skip(num_concatted + 1))
.collect();
// Compute and compare to the one-shot answer
let mut multipart_res = [0u8; 16];
hkdf.expand_multi_info(&input, &mut multipart_res).unwrap();
assert_eq!(multipart_res, oneshot_res);
num_concatted += 1;
}
}
///
pub fn run_hkdf_sha256_vectors<C: CryptoProvider>(_: PhantomData<C>) {
run_hkdf_test_vectors::<C::HkdfSha256>(HashAlg::Sha256)
}
///
pub fn run_hkdf_sha512_vectors<C: CryptoProvider>(_: PhantomData<C>) {
run_hkdf_test_vectors::<C::HkdfSha512>(HashAlg::Sha512)
}
enum HashAlg {
Sha256,
Sha512,
}
///
fn run_hkdf_test_vectors<K: Hkdf>(hash: HashAlg) {
let test_name = match hash {
HashAlg::Sha256 => wycheproof::hkdf::TestName::HkdfSha256,
HashAlg::Sha512 => wycheproof::hkdf::TestName::HkdfSha512,
};
let test_set =
wycheproof::hkdf::TestSet::load(test_name).expect("should be able to load test set");
for test_group in test_set.test_groups {
for test in test_group.tests {
let ikm = test.ikm;
let salt = test.salt;
let info = test.info;
let okm = test.okm;
let tc_id = test.tc_id;
if let Some(desc) = run_test::<K>(
ikm.as_slice(),
salt.as_slice(),
info.as_slice(),
okm.as_slice(),
) {
panic!(
"\n\
Failed test {tc_id}: {desc}\n\
ikm:\t{ikm:?}\n\
salt:\t{salt:?}\n\
info:\t{info:?}\n\
okm:\t{okm:?}\n"
);
}
}
}
}
fn run_test<K: Hkdf>(ikm: &[u8], salt: &[u8], info: &[u8], okm: &[u8]) -> Option<&'static str> {
let prk = K::new(Some(salt), ikm);
let mut got_okm = vec![0; okm.len()];
if prk.expand(info, &mut got_okm).is_err() {
return Some("prk expand");
}
if got_okm != okm {
return Some("mismatch in okm");
}
None
} | random_line_split |
|
hkdf.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate alloc;
pub use crate::prelude::*;
use crate::CryptoProvider;
use alloc::vec;
use alloc::vec::Vec;
use core::iter;
use core::marker::PhantomData;
use crypto_provider::hkdf::Hkdf;
use hex_literal::hex;
use rstest_reuse::template;
/// Generates the test cases to validate the hkdf implementation.
/// For example, to test `MyCryptoProvider`:
///
/// ```
/// mod tests {
/// use std::marker::PhantomData;
/// use crypto_provider::testing::CryptoProviderTestCase;
/// #[apply(hkdf_test_cases)]
/// fn hkdf_tests(testcase: CryptoProviderTestCase<MyCryptoProvider>){
/// testcase(PhantomData::<MyCryptoProvider>);
/// }
/// }
/// ```
#[template]
#[export]
#[rstest]
#[case::basic_test_hkdf(basic_test_hkdf)]
#[case::test_rfc5869_sha256(test_rfc5869_sha256)]
#[case::test_lengths(test_lengths)]
#[case::test_max_length(test_max_length)]
#[case::test_max_length_exceeded(test_max_length_exceeded)]
#[case::test_unsupported_length(test_unsupported_length)]
#[case::test_expand_multi_info(test_expand_multi_info)]
#[case::run_hkdf_sha256_vectors(run_hkdf_sha256_vectors)]
#[case::run_hkdf_sha512_vectors(run_hkdf_sha512_vectors)]
fn | <C: CryptoProvider>(#[case] testcase: CryptoProviderTestCase<C>) {}
const MAX_SHA256_LENGTH: usize = 255 * (256 / 8); // =8160
///
pub struct Test<'a> {
ikm: &'a [u8],
salt: &'a [u8],
info: &'a [u8],
okm: &'a [u8],
}
/// data taken from sample code in Readme of crates.io page
pub fn basic_test_hkdf<C: CryptoProvider>(_: PhantomData<C>) {
let ikm = hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b");
let salt = hex!("000102030405060708090a0b0c");
let info = hex!("f0f1f2f3f4f5f6f7f8f9");
let hk = C::HkdfSha256::new(Some(&salt[..]), &ikm);
let mut okm = [0u8; 42];
hk.expand(&info, &mut okm)
.expect("42 is a valid length for Sha256 to output");
let expected = hex!(
"
3cb25f25faacd57a90434f64d0362f2a
2d2d0a90cf1a5a4c5db02d56ecc4c5bf
34007208d5b887185865
"
);
assert_eq!(okm, expected);
}
// Test Vectors from https://tools.ietf.org/html/rfc5869.
#[rustfmt::skip]
///
pub fn test_rfc5869_sha256<C: CryptoProvider>(_: PhantomData<C>) {
let tests = [
Test {
// Test Case 1
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!("000102030405060708090a0b0c"),
info: &hex!("f0f1f2f3f4f5f6f7f8f9"),
okm: &hex!("
3cb25f25faacd57a90434f64d0362f2a
2d2d0a90cf1a5a4c5db02d56ecc4c5bf
34007208d5b887185865
"),
},
Test {
// Test Case 2
ikm: &hex!("
000102030405060708090a0b0c0d0e0f
101112131415161718191a1b1c1d1e1f
202122232425262728292a2b2c2d2e2f
303132333435363738393a3b3c3d3e3f
404142434445464748494a4b4c4d4e4f
"),
salt: &hex!("
606162636465666768696a6b6c6d6e6f
707172737475767778797a7b7c7d7e7f
808182838485868788898a8b8c8d8e8f
909192939495969798999a9b9c9d9e9f
a0a1a2a3a4a5a6a7a8a9aaabacadaeaf
"),
info: &hex!("
b0b1b2b3b4b5b6b7b8b9babbbcbdbebf
c0c1c2c3c4c5c6c7c8c9cacbcccdcecf
d0d1d2d3d4d5d6d7d8d9dadbdcdddedf
e0e1e2e3e4e5e6e7e8e9eaebecedeeef
f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
"),
okm: &hex!("
b11e398dc80327a1c8e7f78c596a4934
4f012eda2d4efad8a050cc4c19afa97c
59045a99cac7827271cb41c65e590e09
da3275600c2f09b8367793a9aca3db71
cc30c58179ec3e87c14c01d5c1f3434f
1d87
"),
},
Test {
// Test Case 3
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!(""),
info: &hex!(""),
okm: &hex!("
8da4e775a563c18f715f802a063c5a31
b8a11f5c5ee1879ec3454e5f3c738d2d
9d201395faa4b61a96c8
"),
},
];
for Test { ikm, salt, info, okm } in tests.iter() {
let salt = if salt.is_empty() {
None
} else {
Some(&salt[..])
};
let hkdf = C::HkdfSha256::new(salt, ikm);
let mut okm2 = vec![0u8; okm.len()];
assert!(hkdf.expand(&info[..], &mut okm2).is_ok());
assert_eq!(okm2[..], okm[..]);
}
}
///
pub fn test_lengths<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(None, &[]);
let mut longest = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut longest).is_ok());
// Runtime is O(length), so exhaustively testing all legal lengths
// would take too long (at least without --release). Only test a
// subset: the first 500, the last 10, and every 100th in between.
// 0 is an invalid key length for openssl, so start at 1
let lengths = (1..MAX_SHA256_LENGTH + 1)
.filter(|&len| !(500..=MAX_SHA256_LENGTH - 10).contains(&len) || len % 100 == 0);
for length in lengths {
let mut okm = vec![0u8; length];
assert!(hkdf.expand(&[], &mut okm).is_ok());
assert_eq!(okm.len(), length);
assert_eq!(okm[..], longest[..length]);
}
}
///
pub fn test_max_length<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut okm).is_ok());
}
///
pub fn test_max_length_exceeded<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH + 1];
assert!(hkdf.expand(&[], &mut okm).is_err());
}
///
pub fn test_unsupported_length<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; 90000];
assert!(hkdf.expand(&[], &mut okm).is_err());
}
///
pub fn test_expand_multi_info<C: CryptoProvider>(_: PhantomData<C>) {
let info_components = &[
&b"09090909090909090909090909090909090909090909"[..],
&b"8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a"[..],
&b"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0"[..],
&b"4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4"[..],
&b"1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d"[..],
];
let hkdf = C::HkdfSha256::new(None, b"some ikm here");
// Compute HKDF-Expand on the concatenation of all the info components
let mut oneshot_res = [0u8; 16];
hkdf.expand(&info_components.concat(), &mut oneshot_res)
.unwrap();
// Now iteratively join the components of info_components until it's all 1 component. The value
// of HKDF-Expand should be the same throughout
let mut num_concatted = 0;
let mut info_head = Vec::new();
while num_concatted < info_components.len() {
info_head.extend(info_components[num_concatted]);
// Build the new input to be the info head followed by the remaining components
let input: Vec<&[u8]> = iter::once(info_head.as_slice())
.chain(info_components.iter().cloned().skip(num_concatted + 1))
.collect();
// Compute and compare to the one-shot answer
let mut multipart_res = [0u8; 16];
hkdf.expand_multi_info(&input, &mut multipart_res).unwrap();
assert_eq!(multipart_res, oneshot_res);
num_concatted += 1;
}
}
///
pub fn run_hkdf_sha256_vectors<C: CryptoProvider>(_: PhantomData<C>) {
run_hkdf_test_vectors::<C::HkdfSha256>(HashAlg::Sha256)
}
///
pub fn run_hkdf_sha512_vectors<C: CryptoProvider>(_: PhantomData<C>) {
run_hkdf_test_vectors::<C::HkdfSha512>(HashAlg::Sha512)
}
enum HashAlg {
Sha256,
Sha512,
}
///
fn run_hkdf_test_vectors<K: Hkdf>(hash: HashAlg) {
let test_name = match hash {
HashAlg::Sha256 => wycheproof::hkdf::TestName::HkdfSha256,
HashAlg::Sha512 => wycheproof::hkdf::TestName::HkdfSha512,
};
let test_set =
wycheproof::hkdf::TestSet::load(test_name).expect("should be able to load test set");
for test_group in test_set.test_groups {
for test in test_group.tests {
let ikm = test.ikm;
let salt = test.salt;
let info = test.info;
let okm = test.okm;
let tc_id = test.tc_id;
if let Some(desc) = run_test::<K>(
ikm.as_slice(),
salt.as_slice(),
info.as_slice(),
okm.as_slice(),
) {
panic!(
"\n\
Failed test {tc_id}: {desc}\n\
ikm:\t{ikm:?}\n\
salt:\t{salt:?}\n\
info:\t{info:?}\n\
okm:\t{okm:?}\n"
);
}
}
}
}
fn run_test<K: Hkdf>(ikm: &[u8], salt: &[u8], info: &[u8], okm: &[u8]) -> Option<&'static str> {
let prk = K::new(Some(salt), ikm);
let mut got_okm = vec![0; okm.len()];
if prk.expand(info, &mut got_okm).is_err() {
return Some("prk expand");
}
if got_okm != okm {
return Some("mismatch in okm");
}
None
}
| hkdf_test_cases | identifier_name |
hkdf.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate alloc;
pub use crate::prelude::*;
use crate::CryptoProvider;
use alloc::vec;
use alloc::vec::Vec;
use core::iter;
use core::marker::PhantomData;
use crypto_provider::hkdf::Hkdf;
use hex_literal::hex;
use rstest_reuse::template;
/// Generates the test cases to validate the hkdf implementation.
/// For example, to test `MyCryptoProvider`:
///
/// ```
/// mod tests {
/// use std::marker::PhantomData;
/// use crypto_provider::testing::CryptoProviderTestCase;
/// #[apply(hkdf_test_cases)]
/// fn hkdf_tests(testcase: CryptoProviderTestCase<MyCryptoProvider>){
/// testcase(PhantomData::<MyCryptoProvider>);
/// }
/// }
/// ```
#[template]
#[export]
#[rstest]
#[case::basic_test_hkdf(basic_test_hkdf)]
#[case::test_rfc5869_sha256(test_rfc5869_sha256)]
#[case::test_lengths(test_lengths)]
#[case::test_max_length(test_max_length)]
#[case::test_max_length_exceeded(test_max_length_exceeded)]
#[case::test_unsupported_length(test_unsupported_length)]
#[case::test_expand_multi_info(test_expand_multi_info)]
#[case::run_hkdf_sha256_vectors(run_hkdf_sha256_vectors)]
#[case::run_hkdf_sha512_vectors(run_hkdf_sha512_vectors)]
fn hkdf_test_cases<C: CryptoProvider>(#[case] testcase: CryptoProviderTestCase<C>) {}
const MAX_SHA256_LENGTH: usize = 255 * (256 / 8); // =8160
///
pub struct Test<'a> {
ikm: &'a [u8],
salt: &'a [u8],
info: &'a [u8],
okm: &'a [u8],
}
/// data taken from sample code in Readme of crates.io page
pub fn basic_test_hkdf<C: CryptoProvider>(_: PhantomData<C>) {
let ikm = hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b");
let salt = hex!("000102030405060708090a0b0c");
let info = hex!("f0f1f2f3f4f5f6f7f8f9");
let hk = C::HkdfSha256::new(Some(&salt[..]), &ikm);
let mut okm = [0u8; 42];
hk.expand(&info, &mut okm)
.expect("42 is a valid length for Sha256 to output");
let expected = hex!(
"
3cb25f25faacd57a90434f64d0362f2a
2d2d0a90cf1a5a4c5db02d56ecc4c5bf
34007208d5b887185865
"
);
assert_eq!(okm, expected);
}
// Test Vectors from https://tools.ietf.org/html/rfc5869.
#[rustfmt::skip]
///
pub fn test_rfc5869_sha256<C: CryptoProvider>(_: PhantomData<C>) {
let tests = [
Test {
// Test Case 1
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!("000102030405060708090a0b0c"),
info: &hex!("f0f1f2f3f4f5f6f7f8f9"),
okm: &hex!("
3cb25f25faacd57a90434f64d0362f2a
2d2d0a90cf1a5a4c5db02d56ecc4c5bf
34007208d5b887185865
"),
},
Test {
// Test Case 2
ikm: &hex!("
000102030405060708090a0b0c0d0e0f
101112131415161718191a1b1c1d1e1f
202122232425262728292a2b2c2d2e2f
303132333435363738393a3b3c3d3e3f
404142434445464748494a4b4c4d4e4f
"),
salt: &hex!("
606162636465666768696a6b6c6d6e6f
707172737475767778797a7b7c7d7e7f
808182838485868788898a8b8c8d8e8f
909192939495969798999a9b9c9d9e9f
a0a1a2a3a4a5a6a7a8a9aaabacadaeaf
"),
info: &hex!("
b0b1b2b3b4b5b6b7b8b9babbbcbdbebf
c0c1c2c3c4c5c6c7c8c9cacbcccdcecf
d0d1d2d3d4d5d6d7d8d9dadbdcdddedf
e0e1e2e3e4e5e6e7e8e9eaebecedeeef
f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff
"),
okm: &hex!("
b11e398dc80327a1c8e7f78c596a4934
4f012eda2d4efad8a050cc4c19afa97c
59045a99cac7827271cb41c65e590e09
da3275600c2f09b8367793a9aca3db71
cc30c58179ec3e87c14c01d5c1f3434f
1d87
"),
},
Test {
// Test Case 3
ikm: &hex!("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b"),
salt: &hex!(""),
info: &hex!(""),
okm: &hex!("
8da4e775a563c18f715f802a063c5a31
b8a11f5c5ee1879ec3454e5f3c738d2d
9d201395faa4b61a96c8
"),
},
];
for Test { ikm, salt, info, okm } in tests.iter() {
let salt = if salt.is_empty() {
None
} else {
Some(&salt[..])
};
let hkdf = C::HkdfSha256::new(salt, ikm);
let mut okm2 = vec![0u8; okm.len()];
assert!(hkdf.expand(&info[..], &mut okm2).is_ok());
assert_eq!(okm2[..], okm[..]);
}
}
///
pub fn test_lengths<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(None, &[]);
let mut longest = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut longest).is_ok());
// Runtime is O(length), so exhaustively testing all legal lengths
// would take too long (at least without --release). Only test a
// subset: the first 500, the last 10, and every 100th in between.
// 0 is an invalid key length for openssl, so start at 1
let lengths = (1..MAX_SHA256_LENGTH + 1)
.filter(|&len| !(500..=MAX_SHA256_LENGTH - 10).contains(&len) || len % 100 == 0);
for length in lengths {
let mut okm = vec![0u8; length];
assert!(hkdf.expand(&[], &mut okm).is_ok());
assert_eq!(okm.len(), length);
assert_eq!(okm[..], longest[..length]);
}
}
///
pub fn test_max_length<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH];
assert!(hkdf.expand(&[], &mut okm).is_ok());
}
///
pub fn test_max_length_exceeded<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; MAX_SHA256_LENGTH + 1];
assert!(hkdf.expand(&[], &mut okm).is_err());
}
///
pub fn test_unsupported_length<C: CryptoProvider>(_: PhantomData<C>) {
let hkdf = C::HkdfSha256::new(Some(&[]), &[]);
let mut okm = vec![0u8; 90000];
assert!(hkdf.expand(&[], &mut okm).is_err());
}
///
pub fn test_expand_multi_info<C: CryptoProvider>(_: PhantomData<C>) {
let info_components = &[
&b"09090909090909090909090909090909090909090909"[..],
&b"8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a8a"[..],
&b"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0"[..],
&b"4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4"[..],
&b"1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d"[..],
];
let hkdf = C::HkdfSha256::new(None, b"some ikm here");
// Compute HKDF-Expand on the concatenation of all the info components
let mut oneshot_res = [0u8; 16];
hkdf.expand(&info_components.concat(), &mut oneshot_res)
.unwrap();
// Now iteratively join the components of info_components until it's all 1 component. The value
// of HKDF-Expand should be the same throughout
let mut num_concatted = 0;
let mut info_head = Vec::new();
while num_concatted < info_components.len() {
info_head.extend(info_components[num_concatted]);
// Build the new input to be the info head followed by the remaining components
let input: Vec<&[u8]> = iter::once(info_head.as_slice())
.chain(info_components.iter().cloned().skip(num_concatted + 1))
.collect();
// Compute and compare to the one-shot answer
let mut multipart_res = [0u8; 16];
hkdf.expand_multi_info(&input, &mut multipart_res).unwrap();
assert_eq!(multipart_res, oneshot_res);
num_concatted += 1;
}
}
///
pub fn run_hkdf_sha256_vectors<C: CryptoProvider>(_: PhantomData<C>) {
run_hkdf_test_vectors::<C::HkdfSha256>(HashAlg::Sha256)
}
///
pub fn run_hkdf_sha512_vectors<C: CryptoProvider>(_: PhantomData<C>) {
run_hkdf_test_vectors::<C::HkdfSha512>(HashAlg::Sha512)
}
enum HashAlg {
Sha256,
Sha512,
}
///
fn run_hkdf_test_vectors<K: Hkdf>(hash: HashAlg) {
let test_name = match hash {
HashAlg::Sha256 => wycheproof::hkdf::TestName::HkdfSha256,
HashAlg::Sha512 => wycheproof::hkdf::TestName::HkdfSha512,
};
let test_set =
wycheproof::hkdf::TestSet::load(test_name).expect("should be able to load test set");
for test_group in test_set.test_groups {
for test in test_group.tests {
let ikm = test.ikm;
let salt = test.salt;
let info = test.info;
let okm = test.okm;
let tc_id = test.tc_id;
if let Some(desc) = run_test::<K>(
ikm.as_slice(),
salt.as_slice(),
info.as_slice(),
okm.as_slice(),
) |
}
}
}
fn run_test<K: Hkdf>(ikm: &[u8], salt: &[u8], info: &[u8], okm: &[u8]) -> Option<&'static str> {
let prk = K::new(Some(salt), ikm);
let mut got_okm = vec![0; okm.len()];
if prk.expand(info, &mut got_okm).is_err() {
return Some("prk expand");
}
if got_okm != okm {
return Some("mismatch in okm");
}
None
}
| {
panic!(
"\n\
Failed test {tc_id}: {desc}\n\
ikm:\t{ikm:?}\n\
salt:\t{salt:?}\n\
info:\t{info:?}\n\
okm:\t{okm:?}\n"
);
} | conditional_block |
msg.rs | use super::{nlmsg_length, nlmsg_header_length};
use std::fmt;
use std::mem::size_of;
use std::slice::from_raw_parts;
use std::io::{self, ErrorKind, Cursor};
use byteorder::{NativeEndian, ReadBytesExt};
#[derive(Clone, Copy, Debug)]
pub enum MsgType {
/// Request
Request,
/// No op
Noop,
/// Error
Error,
/// End of a dump
Done,
/// Data lost
Overrun,
/// minimum type, below 10 for reserved control messages
MinType,
/// User defined type, passed to the user
UserDefined(u16),
}
impl Into<u16> for MsgType {
fn into(self) -> u16 {
use self::MsgType::*;
match self {
Request => 0,
Noop => 1,
Error => 2,
Done => 3,
Overrun => 4,
MinType => 10,
UserDefined(i) => i,
}
}
}
impl From<u16> for MsgType {
fn from(t: u16) -> MsgType {
use self::MsgType::*;
match t {
0 => Request,
1 => Noop,
2 => Error,
3 => Done,
4 => Overrun,
10 => MinType,
i => UserDefined(i),
}
}
}
#[derive(Clone, Copy, Debug)]
enum Flags {
/// It is request message.
Request,
/// Multipart message, terminated by NLMSG_DONE
Multi,
/// Reply with ack, with zero or error code
Ack,
/// Echo this request
Echo,
}
impl Into<u16> for Flags {
fn into(self) -> u16 {
use self::Flags::*;
match self {
Request => 1,
Multi => 2,
Ack => 4,
Echo => 8,
}
}
}
/// Modifiers to GET request
#[derive(Clone, Copy, Debug)]
enum GetFlags {
/// specify tree root
Root,
/// return all matching
Match,
/// atomic GET
Atomic,
/// (Root|Match)
Dump,
}
impl Into<u16> for GetFlags {
fn into(self) -> u16 {
use self::GetFlags::*;
match self {
Root => 0x100,
Match => 0x200,
Atomic => 0x400,
Dump => 0x100 | 0x200,
}
}
}
/// Modifiers to NEW request
#[derive(Clone, Copy, Debug)]
enum NewFlags {
/// Override existing
Replace,
/// Do not touch, if it exists
Excl,
/// Create, if it does not exist
Create,
/// Add to end of list
Append,
}
impl Into<u16> for NewFlags {
fn into(self) -> u16 {
use self::NewFlags::*;
match self {
Replace => 0x100,
Excl => 0x200,
Create => 0x400,
Append => 0x800,
}
}
}
// HEADER FORMAT
// __u32 nlmsg_len; /* Length of message including header. */
// __u16 nlmsg_type; /* Type of message content. */
// __u16 nlmsg_flags; /* Additional flags. */
// __u32 nlmsg_seq; /* Sequence number. */
// __u32 nlmsg_pid; /* Sender port ID. */
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct NlMsgHeader {
msg_length: u32,
nl_type: u16,
flags: u16,
seq: u32,
pid: u32,
}
impl fmt::Debug for NlMsgHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f,
"<NlMsgHeader len={} {:?} flags=[ ",
self.msg_length,
MsgType::from(self.nl_type)));
// output readable flags
if self.flags & 1 != 0 {
try!(write!(f, "Request "));
}
if self.flags & 2 != 0 {
try!(write!(f, "Multi "));
}
if self.flags & 4 != 0 {
try!(write!(f, "Ack "));
}
if self.flags & 8 != 0 {
try!(write!(f, "Echo "));
}
if self.flags >> 4 != 0 {
try!(write!(f, "other({:#X})", self.flags));
}
try!(write!(f, "] seq={} pid={}>", self.seq, self.pid));
Ok(())
}
}
impl NlMsgHeader {
pub fn user_defined(t: u16, data_length: u32) -> NlMsgHeader {
let mut h = NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: t,
flags: Flags::Request.into(),
seq: 0,
pid: 0,
};
h.data_length(data_length);
h
}
pub fn request() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Request.into(),
flags: Flags::Request.into(),
seq: 0,
pid: 0,
}
}
pub fn done() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Done.into(),
flags: Flags::Multi.into(),
seq: 0,
pid: 0,
}
}
pub fn error() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_length(nlmsg_header_length() + 4) as u32, // nlmsgerr
nl_type: MsgType::Error.into(),
flags: 0,
seq: 0,
pid: 0,
}
}
pub fn from_bytes(bytes: &[u8]) -> io::Result<(NlMsgHeader, usize)> {
let mut cursor = Cursor::new(bytes);
let len = try!(cursor.read_u32::<NativeEndian>());
let nl_type = try!(cursor.read_u16::<NativeEndian>());
let flags = try!(cursor.read_u16::<NativeEndian>());
let seq = try!(cursor.read_u32::<NativeEndian>());
let pid = try!(cursor.read_u32::<NativeEndian>());
if len < nlmsg_header_length() as u32 {
Err(io::Error::new(ErrorKind::InvalidInput, "length smaller than msg header size"))
} else {
Ok((NlMsgHeader{
msg_length: len,
nl_type: nl_type,
flags: flags,
seq: seq,
pid: pid,
}, cursor.position() as usize))
}
}
pub fn bytes(&self) -> &[u8] {
let size = size_of::<NlMsgHeader>();
unsafe {
let head = self as *const NlMsgHeader as *const u8;
from_raw_parts(head, size)
}
}
pub fn msg_type(&self) -> MsgType {
self.nl_type.into()
}
pub fn msg_length(&self) -> u32 {
self.msg_length
}
/// Set message length
pub fn data_length(&mut self, len: u32) -> &mut NlMsgHeader {
self.msg_length = nlmsg_length(len as usize) as u32;
self
}
/// Multipart message
pub fn multipart(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Multi.into();
self
}
/// Request acknowledgement
pub fn ack(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Ack.into();
self
}
/// Echo message
pub fn echo(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Echo.into();
self
}
/// Set sequence number
pub fn seq(&mut self, n: u32) -> &mut NlMsgHeader {
self.seq = n;
self
}
/// Set PID number
pub fn pid(&mut self, n: u32) -> &mut NlMsgHeader {
self.pid = n;
self
}
/// Override existing
pub fn replace(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Replace.into();
self
}
/// Do not touch, if it exists
pub fn excl(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Excl.into();
self
}
/// Create, if it does not exist
pub fn create(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Create.into();
self
}
/// Add to end of list
pub fn append(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Append.into();
self
}
/// specify tree root
pub fn root(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Root.into();
self
}
/// return all matching
pub fn match_provided(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Match.into();
self
}
/// atomic GET
pub fn atomic(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Atomic.into();
self
}
/// (Root|Match)
pub fn | (&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Dump.into();
self
}
}
/*
http://linux.die.net/include/linux/netlink.h
/* Flags values */
#define NLM_F_REQUEST 1 /* It is request message. */
#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */
#define NLM_F_ECHO 8 /* Echo this request */
/* Modifiers to GET request */
#define NLM_F_ROOT 0x100 /* specify tree root */
#define NLM_F_MATCH 0x200 /* return all matching */
#define NLM_F_ATOMIC 0x400 /* atomic GET */
#define NLM_F_DUMP (NLM_F_ROOT|NLM_F_MATCH)
/* Modifiers to NEW request */
#define NLM_F_REPLACE 0x100 /* Override existing */
#define NLM_F_EXCL 0x200 /* Do not touch, if it exists */
#define NLM_F_CREATE 0x400 /* Create, if it does not exist */
#define NLM_F_APPEND 0x800 /* Add to end of list */
/*
4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL
4.4BSD CHANGE NLM_F_REPLACE
True CHANGE NLM_F_CREATE|NLM_F_REPLACE
Append NLM_F_CREATE
Check NLM_F_EXCL
*/
#define NLMSG_NOOP 0x1 /* Nothing. */
#define NLMSG_ERROR 0x2 /* Error */
#define NLMSG_DONE 0x3 /* End of a dump */
#define NLMSG_OVERRUN 0x4 /* Data lost */
#define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */
*/
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_encoding() {
// Little endian only right now
let expected = [20, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 9, 0, 0, 0];
let mut hdr = NlMsgHeader::request();
let bytes = hdr.data_length(4).pid(9).seq(1).dump().bytes();
assert_eq!(bytes, expected);
}
#[test]
fn test_decoding() {
// Little endian only right now
let bytes = [16, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 9, 0, 0, 0, 1, 1, 1];
let mut h = NlMsgHeader::request();
let expected = h.data_length(0).pid(9).seq(1).dump();
let (hdr, n) = NlMsgHeader::from_bytes(&bytes).unwrap();
assert_eq!(hdr, *expected);
assert_eq!(n, 16);
}
#[test]
fn test_decoding_error() {
// Little endian only right now
let bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let res = NlMsgHeader::from_bytes(&bytes);
assert!(res.is_err());
}
}
| dump | identifier_name |
msg.rs | use super::{nlmsg_length, nlmsg_header_length};
use std::fmt;
use std::mem::size_of;
use std::slice::from_raw_parts;
use std::io::{self, ErrorKind, Cursor};
use byteorder::{NativeEndian, ReadBytesExt};
#[derive(Clone, Copy, Debug)]
pub enum MsgType {
/// Request
Request,
/// No op
Noop,
/// Error
Error,
/// End of a dump
Done,
/// Data lost
Overrun,
/// minimum type, below 10 for reserved control messages
MinType,
/// User defined type, passed to the user
UserDefined(u16),
}
impl Into<u16> for MsgType {
fn into(self) -> u16 {
use self::MsgType::*;
match self {
Request => 0,
Noop => 1,
Error => 2,
Done => 3,
Overrun => 4,
MinType => 10,
UserDefined(i) => i,
}
}
}
impl From<u16> for MsgType {
fn from(t: u16) -> MsgType {
use self::MsgType::*;
match t {
0 => Request,
1 => Noop,
2 => Error,
3 => Done,
4 => Overrun,
10 => MinType,
i => UserDefined(i),
}
}
}
#[derive(Clone, Copy, Debug)]
enum Flags {
/// It is request message.
Request,
/// Multipart message, terminated by NLMSG_DONE
Multi,
/// Reply with ack, with zero or error code
Ack,
/// Echo this request
Echo,
}
impl Into<u16> for Flags {
fn into(self) -> u16 {
use self::Flags::*;
match self {
Request => 1,
Multi => 2,
Ack => 4,
Echo => 8,
}
}
}
/// Modifiers to GET request
#[derive(Clone, Copy, Debug)]
enum GetFlags {
/// specify tree root
Root,
/// return all matching
Match,
/// atomic GET
Atomic,
/// (Root|Match)
Dump,
}
impl Into<u16> for GetFlags {
fn into(self) -> u16 {
use self::GetFlags::*;
match self {
Root => 0x100,
Match => 0x200,
Atomic => 0x400,
Dump => 0x100 | 0x200,
}
}
}
/// Modifiers to NEW request
#[derive(Clone, Copy, Debug)]
enum NewFlags {
/// Override existing
Replace,
/// Do not touch, if it exists
Excl,
/// Create, if it does not exist
Create,
/// Add to end of list
Append,
}
impl Into<u16> for NewFlags {
fn into(self) -> u16 {
use self::NewFlags::*;
match self {
Replace => 0x100,
Excl => 0x200,
Create => 0x400,
Append => 0x800,
}
}
}
// HEADER FORMAT
// __u32 nlmsg_len; /* Length of message including header. */
// __u16 nlmsg_type; /* Type of message content. */
// __u16 nlmsg_flags; /* Additional flags. */
// __u32 nlmsg_seq; /* Sequence number. */
// __u32 nlmsg_pid; /* Sender port ID. */
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct NlMsgHeader {
msg_length: u32, | nl_type: u16,
flags: u16,
seq: u32,
pid: u32,
}
impl fmt::Debug for NlMsgHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f,
"<NlMsgHeader len={} {:?} flags=[ ",
self.msg_length,
MsgType::from(self.nl_type)));
// output readable flags
if self.flags & 1 != 0 {
try!(write!(f, "Request "));
}
if self.flags & 2 != 0 {
try!(write!(f, "Multi "));
}
if self.flags & 4 != 0 {
try!(write!(f, "Ack "));
}
if self.flags & 8 != 0 {
try!(write!(f, "Echo "));
}
if self.flags >> 4 != 0 {
try!(write!(f, "other({:#X})", self.flags));
}
try!(write!(f, "] seq={} pid={}>", self.seq, self.pid));
Ok(())
}
}
impl NlMsgHeader {
pub fn user_defined(t: u16, data_length: u32) -> NlMsgHeader {
let mut h = NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: t,
flags: Flags::Request.into(),
seq: 0,
pid: 0,
};
h.data_length(data_length);
h
}
pub fn request() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Request.into(),
flags: Flags::Request.into(),
seq: 0,
pid: 0,
}
}
pub fn done() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Done.into(),
flags: Flags::Multi.into(),
seq: 0,
pid: 0,
}
}
pub fn error() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_length(nlmsg_header_length() + 4) as u32, // nlmsgerr
nl_type: MsgType::Error.into(),
flags: 0,
seq: 0,
pid: 0,
}
}
pub fn from_bytes(bytes: &[u8]) -> io::Result<(NlMsgHeader, usize)> {
let mut cursor = Cursor::new(bytes);
let len = try!(cursor.read_u32::<NativeEndian>());
let nl_type = try!(cursor.read_u16::<NativeEndian>());
let flags = try!(cursor.read_u16::<NativeEndian>());
let seq = try!(cursor.read_u32::<NativeEndian>());
let pid = try!(cursor.read_u32::<NativeEndian>());
if len < nlmsg_header_length() as u32 {
Err(io::Error::new(ErrorKind::InvalidInput, "length smaller than msg header size"))
} else {
Ok((NlMsgHeader{
msg_length: len,
nl_type: nl_type,
flags: flags,
seq: seq,
pid: pid,
}, cursor.position() as usize))
}
}
pub fn bytes(&self) -> &[u8] {
let size = size_of::<NlMsgHeader>();
unsafe {
let head = self as *const NlMsgHeader as *const u8;
from_raw_parts(head, size)
}
}
pub fn msg_type(&self) -> MsgType {
self.nl_type.into()
}
pub fn msg_length(&self) -> u32 {
self.msg_length
}
/// Set message length
pub fn data_length(&mut self, len: u32) -> &mut NlMsgHeader {
self.msg_length = nlmsg_length(len as usize) as u32;
self
}
/// Multipart message
pub fn multipart(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Multi.into();
self
}
/// Request acknowledgement
pub fn ack(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Ack.into();
self
}
/// Echo message
pub fn echo(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Echo.into();
self
}
/// Set sequence number
pub fn seq(&mut self, n: u32) -> &mut NlMsgHeader {
self.seq = n;
self
}
/// Set PID number
pub fn pid(&mut self, n: u32) -> &mut NlMsgHeader {
self.pid = n;
self
}
/// Override existing
pub fn replace(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Replace.into();
self
}
/// Do not touch, if it exists
pub fn excl(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Excl.into();
self
}
/// Create, if it does not exist
pub fn create(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Create.into();
self
}
/// Add to end of list
pub fn append(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Append.into();
self
}
/// specify tree root
pub fn root(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Root.into();
self
}
/// return all matching
pub fn match_provided(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Match.into();
self
}
/// atomic GET
pub fn atomic(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Atomic.into();
self
}
/// (Root|Match)
pub fn dump(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Dump.into();
self
}
}
/*
http://linux.die.net/include/linux/netlink.h
/* Flags values */
#define NLM_F_REQUEST 1 /* It is request message. */
#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */
#define NLM_F_ECHO 8 /* Echo this request */
/* Modifiers to GET request */
#define NLM_F_ROOT 0x100 /* specify tree root */
#define NLM_F_MATCH 0x200 /* return all matching */
#define NLM_F_ATOMIC 0x400 /* atomic GET */
#define NLM_F_DUMP (NLM_F_ROOT|NLM_F_MATCH)
/* Modifiers to NEW request */
#define NLM_F_REPLACE 0x100 /* Override existing */
#define NLM_F_EXCL 0x200 /* Do not touch, if it exists */
#define NLM_F_CREATE 0x400 /* Create, if it does not exist */
#define NLM_F_APPEND 0x800 /* Add to end of list */
/*
4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL
4.4BSD CHANGE NLM_F_REPLACE
True CHANGE NLM_F_CREATE|NLM_F_REPLACE
Append NLM_F_CREATE
Check NLM_F_EXCL
*/
#define NLMSG_NOOP 0x1 /* Nothing. */
#define NLMSG_ERROR 0x2 /* Error */
#define NLMSG_DONE 0x3 /* End of a dump */
#define NLMSG_OVERRUN 0x4 /* Data lost */
#define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */
*/
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_encoding() {
// Little endian only right now
let expected = [20, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 9, 0, 0, 0];
let mut hdr = NlMsgHeader::request();
let bytes = hdr.data_length(4).pid(9).seq(1).dump().bytes();
assert_eq!(bytes, expected);
}
#[test]
fn test_decoding() {
// Little endian only right now
let bytes = [16, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 9, 0, 0, 0, 1, 1, 1];
let mut h = NlMsgHeader::request();
let expected = h.data_length(0).pid(9).seq(1).dump();
let (hdr, n) = NlMsgHeader::from_bytes(&bytes).unwrap();
assert_eq!(hdr, *expected);
assert_eq!(n, 16);
}
#[test]
fn test_decoding_error() {
// Little endian only right now
let bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let res = NlMsgHeader::from_bytes(&bytes);
assert!(res.is_err());
}
} | random_line_split |
|
msg.rs | use super::{nlmsg_length, nlmsg_header_length};
use std::fmt;
use std::mem::size_of;
use std::slice::from_raw_parts;
use std::io::{self, ErrorKind, Cursor};
use byteorder::{NativeEndian, ReadBytesExt};
#[derive(Clone, Copy, Debug)]
pub enum MsgType {
/// Request
Request,
/// No op
Noop,
/// Error
Error,
/// End of a dump
Done,
/// Data lost
Overrun,
/// minimum type, below 10 for reserved control messages
MinType,
/// User defined type, passed to the user
UserDefined(u16),
}
impl Into<u16> for MsgType {
fn into(self) -> u16 {
use self::MsgType::*;
match self {
Request => 0,
Noop => 1,
Error => 2,
Done => 3,
Overrun => 4,
MinType => 10,
UserDefined(i) => i,
}
}
}
impl From<u16> for MsgType {
fn from(t: u16) -> MsgType {
use self::MsgType::*;
match t {
0 => Request,
1 => Noop,
2 => Error,
3 => Done,
4 => Overrun,
10 => MinType,
i => UserDefined(i),
}
}
}
#[derive(Clone, Copy, Debug)]
enum Flags {
/// It is request message.
Request,
/// Multipart message, terminated by NLMSG_DONE
Multi,
/// Reply with ack, with zero or error code
Ack,
/// Echo this request
Echo,
}
impl Into<u16> for Flags {
fn into(self) -> u16 {
use self::Flags::*;
match self {
Request => 1,
Multi => 2,
Ack => 4,
Echo => 8,
}
}
}
/// Modifiers to GET request
#[derive(Clone, Copy, Debug)]
enum GetFlags {
/// specify tree root
Root,
/// return all matching
Match,
/// atomic GET
Atomic,
/// (Root|Match)
Dump,
}
impl Into<u16> for GetFlags {
fn into(self) -> u16 {
use self::GetFlags::*;
match self {
Root => 0x100,
Match => 0x200,
Atomic => 0x400,
Dump => 0x100 | 0x200,
}
}
}
/// Modifiers to NEW request
#[derive(Clone, Copy, Debug)]
enum NewFlags {
/// Override existing
Replace,
/// Do not touch, if it exists
Excl,
/// Create, if it does not exist
Create,
/// Add to end of list
Append,
}
impl Into<u16> for NewFlags {
fn into(self) -> u16 {
use self::NewFlags::*;
match self {
Replace => 0x100,
Excl => 0x200,
Create => 0x400,
Append => 0x800,
}
}
}
// HEADER FORMAT
// __u32 nlmsg_len; /* Length of message including header. */
// __u16 nlmsg_type; /* Type of message content. */
// __u16 nlmsg_flags; /* Additional flags. */
// __u32 nlmsg_seq; /* Sequence number. */
// __u32 nlmsg_pid; /* Sender port ID. */
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq)]
pub struct NlMsgHeader {
msg_length: u32,
nl_type: u16,
flags: u16,
seq: u32,
pid: u32,
}
impl fmt::Debug for NlMsgHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f,
"<NlMsgHeader len={} {:?} flags=[ ",
self.msg_length,
MsgType::from(self.nl_type)));
// output readable flags
if self.flags & 1 != 0 {
try!(write!(f, "Request "));
}
if self.flags & 2 != 0 {
try!(write!(f, "Multi "));
}
if self.flags & 4 != 0 {
try!(write!(f, "Ack "));
}
if self.flags & 8 != 0 {
try!(write!(f, "Echo "));
}
if self.flags >> 4 != 0 {
try!(write!(f, "other({:#X})", self.flags));
}
try!(write!(f, "] seq={} pid={}>", self.seq, self.pid));
Ok(())
}
}
impl NlMsgHeader {
pub fn user_defined(t: u16, data_length: u32) -> NlMsgHeader {
let mut h = NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: t,
flags: Flags::Request.into(),
seq: 0,
pid: 0,
};
h.data_length(data_length);
h
}
pub fn request() -> NlMsgHeader |
pub fn done() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Done.into(),
flags: Flags::Multi.into(),
seq: 0,
pid: 0,
}
}
pub fn error() -> NlMsgHeader {
NlMsgHeader {
msg_length: nlmsg_length(nlmsg_header_length() + 4) as u32, // nlmsgerr
nl_type: MsgType::Error.into(),
flags: 0,
seq: 0,
pid: 0,
}
}
pub fn from_bytes(bytes: &[u8]) -> io::Result<(NlMsgHeader, usize)> {
let mut cursor = Cursor::new(bytes);
let len = try!(cursor.read_u32::<NativeEndian>());
let nl_type = try!(cursor.read_u16::<NativeEndian>());
let flags = try!(cursor.read_u16::<NativeEndian>());
let seq = try!(cursor.read_u32::<NativeEndian>());
let pid = try!(cursor.read_u32::<NativeEndian>());
if len < nlmsg_header_length() as u32 {
Err(io::Error::new(ErrorKind::InvalidInput, "length smaller than msg header size"))
} else {
Ok((NlMsgHeader{
msg_length: len,
nl_type: nl_type,
flags: flags,
seq: seq,
pid: pid,
}, cursor.position() as usize))
}
}
pub fn bytes(&self) -> &[u8] {
let size = size_of::<NlMsgHeader>();
unsafe {
let head = self as *const NlMsgHeader as *const u8;
from_raw_parts(head, size)
}
}
pub fn msg_type(&self) -> MsgType {
self.nl_type.into()
}
pub fn msg_length(&self) -> u32 {
self.msg_length
}
/// Set message length
pub fn data_length(&mut self, len: u32) -> &mut NlMsgHeader {
self.msg_length = nlmsg_length(len as usize) as u32;
self
}
/// Multipart message
pub fn multipart(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Multi.into();
self
}
/// Request acknowledgement
pub fn ack(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Ack.into();
self
}
/// Echo message
pub fn echo(&mut self) -> &mut NlMsgHeader {
self.flags |= Flags::Echo.into();
self
}
/// Set sequence number
pub fn seq(&mut self, n: u32) -> &mut NlMsgHeader {
self.seq = n;
self
}
/// Set PID number
pub fn pid(&mut self, n: u32) -> &mut NlMsgHeader {
self.pid = n;
self
}
/// Override existing
pub fn replace(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Replace.into();
self
}
/// Do not touch, if it exists
pub fn excl(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Excl.into();
self
}
/// Create, if it does not exist
pub fn create(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Create.into();
self
}
/// Add to end of list
pub fn append(&mut self) -> &mut NlMsgHeader {
self.flags |= NewFlags::Append.into();
self
}
/// specify tree root
pub fn root(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Root.into();
self
}
/// return all matching
pub fn match_provided(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Match.into();
self
}
/// atomic GET
pub fn atomic(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Atomic.into();
self
}
/// (Root|Match)
pub fn dump(&mut self) -> &mut NlMsgHeader {
self.flags |= GetFlags::Dump.into();
self
}
}
/*
http://linux.die.net/include/linux/netlink.h
/* Flags values */
#define NLM_F_REQUEST 1 /* It is request message. */
#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */
#define NLM_F_ECHO 8 /* Echo this request */
/* Modifiers to GET request */
#define NLM_F_ROOT 0x100 /* specify tree root */
#define NLM_F_MATCH 0x200 /* return all matching */
#define NLM_F_ATOMIC 0x400 /* atomic GET */
#define NLM_F_DUMP (NLM_F_ROOT|NLM_F_MATCH)
/* Modifiers to NEW request */
#define NLM_F_REPLACE 0x100 /* Override existing */
#define NLM_F_EXCL 0x200 /* Do not touch, if it exists */
#define NLM_F_CREATE 0x400 /* Create, if it does not exist */
#define NLM_F_APPEND 0x800 /* Add to end of list */
/*
4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL
4.4BSD CHANGE NLM_F_REPLACE
True CHANGE NLM_F_CREATE|NLM_F_REPLACE
Append NLM_F_CREATE
Check NLM_F_EXCL
*/
#define NLMSG_NOOP 0x1 /* Nothing. */
#define NLMSG_ERROR 0x2 /* Error */
#define NLMSG_DONE 0x3 /* End of a dump */
#define NLMSG_OVERRUN 0x4 /* Data lost */
#define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */
*/
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_encoding() {
// Little endian only right now
let expected = [20, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 9, 0, 0, 0];
let mut hdr = NlMsgHeader::request();
let bytes = hdr.data_length(4).pid(9).seq(1).dump().bytes();
assert_eq!(bytes, expected);
}
#[test]
fn test_decoding() {
// Little endian only right now
let bytes = [16, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 9, 0, 0, 0, 1, 1, 1];
let mut h = NlMsgHeader::request();
let expected = h.data_length(0).pid(9).seq(1).dump();
let (hdr, n) = NlMsgHeader::from_bytes(&bytes).unwrap();
assert_eq!(hdr, *expected);
assert_eq!(n, 16);
}
#[test]
fn test_decoding_error() {
// Little endian only right now
let bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let res = NlMsgHeader::from_bytes(&bytes);
assert!(res.is_err());
}
}
| {
NlMsgHeader {
msg_length: nlmsg_header_length() as u32,
nl_type: MsgType::Request.into(),
flags: Flags::Request.into(),
seq: 0,
pid: 0,
}
} | identifier_body |
model_infer.py | # -*- coding:utf-8 -*-
from collections import namedtuple
import os
import tensorflow as tf
import config
from model.inits import glorot, zeros
import model.layers as layers
from model.aggregators import CrossAggregator
flags = tf.app.flags
FLAGS = flags.FLAGS
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
GPU_MEM_FRACTION = 0.8
sess_config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
sess_config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION
sess_config.allow_soft_placement = True
SAGEInfo = namedtuple("SAGEInfo",
['layer_name', # name of the layer (to get feature embedding etc.)
# 'neigh_sampler', # callable neigh_sampler constructor
'num_samples',
'output_dim' # the output (i.e., hidden) dimension
])
layer_infos = [SAGEInfo("node", FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", FLAGS.samples_2, FLAGS.dim_2)]
# === 预测阶段确定的参数 ===
num_classes = 2 # label_map.shape[1]
feats_dim = 106 #features.shape[1]
aggregator_type = 'cross'
concat = True
model_size = FLAGS.model_size
sigmoid_loss = FLAGS.sigmoid
identity_dim = FLAGS.identity_dim
class SupervisedGraphsage(object):
"""Implementation | of supervised GraphSAGE."""
def __init__(self, **kwargs):
# === from model.py ===
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
# logging = kwargs.get('logging', False)
# self.logging = logging
self.vars = {}
# self.placeholders = {}
self.layers = []
self.activations = []
self.inputs = None
self.outputs = None
self.loss = 0
self.accuracy = 0
self.optimizer = None
self.opt_op = None
# === set aggregator ===
# 增加了两个cross, cross geniepath
if aggregator_type == 'cross':
self.aggregator_cls = CrossAggregator
else:
raise Exception("Unknown aggregator: ", aggregator_type)
self.input_dim = feats_dim
self.output_dim = num_classes # 2
# self.sampler = sampler
# self.adj_info = adj
self.layer_infos = layer_infos
self.concat = concat
self.model_size = model_size
self.sigmoid_loss = sigmoid_loss
self.dims = [(self.input_dim) + identity_dim]
self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])# 102, 64, 32
self.aggregator_type = aggregator_type
# === get info from placeholders ===
# get info from placeholders...
self.placeholders = self.construct_placeholders(self.input_dim, self.output_dim)
# self.labels = self.placeholders['labels']
# self.batch_nodes = placeholders["batch_nodes"]
self.batch_size = self.placeholders["batch_size"]
# self.support_size = placeholders['support_size']
# self.features = placeholders['features']
sampled_weight = [self.placeholders['sampled_weight_0'],
self.placeholders['sampled_weight_1'],
self.placeholders['sampled_weight_2']]
sampled_column = [self.placeholders['sampled_column_0'],
self.placeholders['sampled_column_1'],
self.placeholders['sampled_column_2']]
sampled_feats = [self.placeholders['sampled_feats_0'],
self.placeholders['sampled_feats_1'],
self.placeholders['sampled_feats_2']]
self.data_sampled = [sampled_feats, sampled_weight, sampled_column]
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
self.var_list = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=self.var_list)
self.sess = tf.Session(config=sess_config)
self.sess.run(tf.global_variables_initializer())
self.load(self.sess)
def construct_placeholders(self, num_classes, feats_dim):
# Define placeholders
# 这里的key 是供 model init 用的
# feed_dict = {placeholders: data}
placeholders = {
# 'features': tf.placeholder(tf.float32, shape=(None, feats_dim)),
# 'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
# 'batch_nodes': tf.placeholder(tf.int32, shape=(None), name='batch_nodes'),
'batch_size': tf.placeholder(tf.int32, name='batch_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'sampled_weight_0': tf.placeholder(tf.float32, name='sampled_weight_0'),
'sampled_column_0': tf.placeholder(tf.int32, name='sampled_column_0'),
'sampled_feats_0': tf.placeholder(tf.float32, name='sampled_feats_0'),
'sampled_weight_1': tf.placeholder(tf.float32, name='sampled_weight_1'),
'sampled_column_1': tf.placeholder(tf.int32, name='sampled_column_1'),
'sampled_feats_1': tf.placeholder(tf.float32, name='sampled_feats_1'),
'sampled_weight_2': tf.placeholder(tf.float32, name='sampled_weight_2'),
'sampled_column_2': tf.placeholder(tf.int32, name='sampled_column_2'),
'sampled_feats_2': tf.placeholder(tf.float32, name='sampled_feats_2')
}
return placeholders
# === build computation graph ===
def build(self):
# data_sampled, support_sizes = self.sample(self.batch_nodes, self.layer_infos)
support_size = 1 # [1, 8, 8*16]
support_sizes = [support_size]
for k in range(len(self.layer_infos)):
t = len(self.layer_infos) - k -1
support_size *= self.layer_infos[t].num_samples
support_sizes.append(support_size)
sample_size = [layer_info.num_samples for layer_info in self.layer_infos] # 16, 8
self.outputs, self.aggregators = self.aggregate(
self.data_sampled, self.dims, sample_size,
support_sizes, concat=self.concat, model_size=self.model_size)
# data_sampled, [self.features], self.dims, num_samples,
# support_sizes, concat=self.concat, model_size=self.model_size)
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
dim_mult = 2 if self.concat else 1
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.output_dim,
dropout=self.placeholders['dropout'],
act=lambda x : x) # no non-linear activation
# TF graph management
self.node_preds = self.node_pred(self.outputs)
# self._loss()
# 不进行梯度修建
# grads_and_vars = self.optimizer.compute_gradients(self.loss)
# clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
# for grad, var in grads_and_vars]
# self.grad, _ = clipped_grads_and_vars[0]
# self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
# self.opt_op = self.optimizer.minimize(self.loss)
self._predict()
def aggregate(self, data_sampled, dims, num_samples, support_sizes, batch_size=None,
aggregators=None, name='aggregate', concat=False, model_size="small"):
if batch_size is None:
batch_size = self.batch_size
# length: number of layers + 1
# hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]
feats_hidden = data_sampled[0] # 根据index取feats
weight_hidden = data_sampled[1]
column_hidden = data_sampled[2]
# feats_hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples[0]] # 根据index取feats
# feats_hidden = [feat_samples for feat_samples in data_sampled[0]] # 根据index取feats
# weight_hidden = [weight_samples for weight_samples in data_sampled[1]]
# column_hidden = [column_samples for column_samples in data_sampled[2]]
new_agg = aggregators is None
if new_agg:
aggregators = []
# c_list = [] # 增加
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1: # 2*64, 32
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1], act=lambda x : x, # no non-linear activation
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
else: # 这里aggregator.__init__() # 106 -> 64
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1],
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [batch_size * support_sizes[hop], # 1, 8; 1
num_samples[len(num_samples) - hop - 1], # 8, 16; 8
dim_mult*dims[layer]] # 106, 106; 2 * 64
weight_neigh_dims = [batch_size * support_sizes[hop],
num_samples[len(num_samples)- hop -1],
1]
# h = aggregator((hidden[hop],
# tf.reshape(hidden[hop + 1], neigh_dims)))
# call aggregator
# self_vecs, neigh_vecs, neigh_weight, neigh_column
h = aggregator((
feats_hidden[hop],
tf.reshape(feats_hidden[hop + 1], neigh_dims), # [1,8,106], [8, 16, 106], [1, 8, 2*64]
tf.reshape(weight_hidden[hop + 1], weight_neigh_dims),
tf.reshape(column_hidden[hop + 1], weight_neigh_dims)))
next_hidden.append(h)
feats_hidden = next_hidden
#self.hiddenOutput.append(hidden[0])
return feats_hidden[0], aggregators
# def _loss(self):
# # Weight decay loss
# for aggregator in self.aggregators:
# for var in aggregator.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# for var in self.node_pred.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# # classification loss
# if self.sigmoid_loss:
# self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
# logits=self.node_preds,
# labels=self.labels))
# else:
# # 变成v2
# self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
# logits=self.node_preds,
# labels=self.labels))
# # tf.summary.scalar('loss', self.loss)
def _predict(self):
if self.sigmoid_loss:
self.preds = tf.nn.sigmoid(self.node_preds)
else:
self.preds = tf.nn.softmax(self.node_preds)
# === 以上是计算图部分 ===
def predict(self, feed_dict):
preds = self.sess.run([self.preds],
feed_dict=feed_dict)
return preds
def close_sess(self):
self.sess.close()
# def save(self, sess=None):
# if not sess:
# raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(var_list=self.var_list)
# save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
# saver.restore(sess, save_path)
# print("Model saved in file: %s" % save_path)
def load(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(reshape=True)
# saver = tf.train.Saver(var_list=self.var_list)
# saver = tf.train.Saver()/
# 不能硬编码啊
save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
self.saver.restore(sess, save_path)
print("Model restored from file: %s" % save_path)
# ckpt_path = './data/model/%s.ckpt'%(self.aggregator_type)
# meta_path = ckpt_path + '.meta'
| identifier_body |
|
model_infer.py | # -*- coding:utf-8 -*-
from collections import namedtuple
import os
import tensorflow as tf
import config
from model.inits import glorot, zeros
import model.layers as layers
from model.aggregators import CrossAggregator
flags = tf.app.flags
FLAGS = flags.FLAGS
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
GPU_MEM_FRACTION = 0.8
sess_config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
sess_config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION
sess_config.allow_soft_placement = True
SAGEInfo = namedtuple("SAGEInfo",
['layer_name', # name of the layer (to get feature embedding etc.)
# 'neigh_sampler', # callable neigh_sampler constructor
'num_samples',
'output_dim' # the output (i.e., hidden) dimension
])
layer_infos = [SAGEInfo("node", FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", FLAGS.samples_2, FLAGS.dim_2)]
# === 预测阶段确定的参数 ===
num_classes = 2 # label_map.shape[1]
feats_dim = 106 #features.shape[1]
aggregator_type = 'cross'
concat = True
model_size = FLAGS.model_size
sigmoid_loss = FLAGS.sigmoid
identity_dim = FLAGS.identity_dim
class SupervisedGraphsage(object):
"""Implementation of supervised GraphSAGE."""
def __init__(self, **kwargs):
# === from model.py ===
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
# logging = kwargs.get('logging', False)
# self.logging = logging
self.vars = {}
# self.placeholders = {}
self.layers = []
self.activations = []
self.inputs = None
self.outputs = None
self.loss = 0
self.accuracy = 0
self.optimizer = None
self.opt_op = None
# === set aggregator ===
# 增加了两个cross, cross geniepath
if aggregator_type == 'cross':
self.aggregator_cls = CrossAggregator
else:
raise Exception("Unknown aggregator: ", aggregator_type)
self.input_dim = feats_dim
self.output_dim = num_classes # 2
# self.sampler = sampler
# self.adj_info = adj
self.layer_infos = layer_infos
self.concat = concat
self.model_size = model_size
self.sigmoid_loss = sigmoid_loss
self.dims = [(self.input_dim) + identity_dim]
self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])# 102, 64, 32
self.aggregator_type = aggregator_type
# === get info from placeholders ===
# get info from placeholders...
self.placeholders = self.construct_placeholders(self.input_dim, self.output_dim)
# self.labels = self.placeholders['labels']
# self.batch_nodes = placeholders["batch_nodes"]
self.batch_size = self.placeholders["batch_size"]
# self.support_size = placeholders['support_size']
# self.features = placeholders['features']
sampled_weight = [self.placeholders['sampled_weight_0'],
self.placeholders['sampled_weight_1'],
self.placeholders['sampled_weight_2']]
sampled_column = [self.placeholders['sampled_column_0'],
self.placeholders['sampled_column_1'],
self.placeholders['sampled_column_2']]
sampled_feats = [self.placeholders['sampled_feats_0'],
self.placeholders['sampled_feats_1'],
self.placeholders['sampled_feats_2']]
self.data_sampled = [sampled_feats, sampled_weight, sampled_column]
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
self.var_list = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=self.var_list)
self.sess = tf.Session(config=sess_config)
self.sess.run(tf.global_variables_initializer())
self.load(self.sess)
def construct_placeholders(self, num_classes, feats_dim):
# Define placeholders
# 这里的key 是供 model init 用的
# feed_dict = {placeholders: data}
placeholders = {
# 'features': tf.placeholder(tf.float32, shape=(None, feats_dim)),
# 'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
# 'batch_nodes': tf.placeholder(tf.int32, shape=(None), name='batch_nodes'),
'batch_size': tf.placeholder(tf.int32, name='batch_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'sampled_weight_0': tf.placeholder(tf.float32, name='sampled_weight_0'),
'sampled_column_0': tf.placeholder(tf.int32, name='sampled_column_0'),
'sampled_feats_0': tf.placeholder(tf.float32, name='sampled_feats_0'),
'sampled_weight_1': tf.placeholder(tf.float32, name='sampled_weight_1'),
'sampled_column_1': tf.placeholder(tf.int32, name='sampled_column_1'),
'sampled_feats_1': tf.placeholder(tf.float32, name='sampled_feats_1'),
'sampled_weight_2': tf.placeholder(tf.float32, name='sampled_weight_2'),
'sampled_column_2': tf.placeholder(tf.int32, name='sampled_column_2'),
'sampled_feats_2': tf.placeholder(tf.float32, name='sampled_feats_2')
}
return placeholders
# === build computation graph ===
def build(self):
# data_sampled, support_sizes = self.sample(self.batch_nodes, self.layer_infos)
support_size = 1 # [1, 8, 8*16]
support_sizes = [support_size]
for k in range(len(self.layer_infos)):
t = len(self.layer_infos) - k -1
support_size *= self.layer_infos[t].num_samples
support_sizes.append(support_size)
sample_size = [layer_info.num_samples for layer_info in self.layer_infos] # 16, 8
self.outputs, self.aggregators = self.aggregate(
self.data_sampled, self.dims, sample_size,
support_sizes, concat=self.concat, model_size=self.model_size)
# data_sampled, [self.features], self.dims, num_samples,
# support_sizes, concat=self.concat, model_size=self.model_size)
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
dim_mult = 2 if self.concat else 1
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.output_dim,
dropout=self.placeholders['dropout'],
act=lambda x : x) # no non-linear activation
# TF graph management
self.node_preds = self.node_pred(self.outputs)
# self._loss()
# 不进行梯度修建
# grads_and_vars = self.optimizer.compute_gradients(self.loss)
# clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
# for grad, var in grads_and_vars]
# self.grad, _ = clipped_grads_and_vars[0]
# self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
# self.opt_op = self.optimizer.minimize(self.loss)
self._predict()
def aggregate(self, data_sampled, dims, num_samples, support_sizes, batch_size=None,
aggregators=None, name='aggregate', concat=False, model_size="small"):
if batch_size is None:
batch_size = self.batch_size
# length: number of layers + 1
# hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]
feats_hidden = data_sampled[0] # 根据index取feats
weight_hidden = data_sampled[1]
column_hidden = data_sampled[2]
# feats_hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples[0]] # 根据index取feats
# feats_hidden = [feat_samples for feat_samples in data_sampled[0]] # 根据index取feats
# weight_hidden = [weight_samples for weight_samples in data_sampled[1]]
# column_hidden = [column_samples for column_samples in data_sampled[2]]
new_agg = aggregators is None
if new_agg:
aggregators = []
# c_list = [] # 增加
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1: # 2*64, 32
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1], act=lambda x : x, # no non-linear activation
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
else: # 这里aggregator.__init__() # 106 -> 64
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1],
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [batch_size * support_sizes[hop], # 1, 8; 1
num_samples[len(num_samples) - hop - 1], # 8, 16; 8
dim_mult*dims[layer]] # 106, 106; 2 * 64
weight_neigh_dims = [batch_size * support_sizes[hop],
num_samples[len(num_samples)- hop -1],
1]
# h = aggregator((hidden[hop],
# tf.reshape(hidden[hop + 1], neigh_dims)))
# call aggregator
# self_vecs, neigh_vecs, neigh_weight, neigh_column
h = aggregator((
feats_hidden[hop],
tf.reshape(feats_hidden[hop + 1], neigh_dims), # [1,8,106], [8, 16, 106], [1, 8, 2*64]
tf.reshape(weight_hidden[hop + 1], weight_neigh_dims),
tf.reshape(column_hidden[hop + 1], weight_neigh_dims)))
next_hidden.append(h)
feats_hidden = next_hidden
#self.hiddenOutput.append(hidden[0])
return feats_hidden[0], aggregators
# def _loss(self):
# # Weight decay loss
# for aggregator in self.aggregators:
# for var in aggregator.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# for var in self.node_pred.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# # classification loss
# if self.sigmoid_loss:
# self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
# logits=self.node_preds,
# labels=self.labels))
# else:
# # 变成v2
# self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
# logits=self.node_preds,
# labels=self.labels))
# # tf.summary.scalar('loss', self.loss)
def _predict(self):
if self.sigmoid_loss:
self.preds = tf.nn.sigmoid(self.node_preds)
else:
self.preds = tf.nn.softmax(self.node_preds)
# === 以上是计算图部分 ===
def predict(self, feed_dict):
preds = self.sess.run([self.preds],
feed_dict=feed_dict)
return preds
def close_sess(self):
self.sess.close()
# def save(self, sess=None):
# if not sess:
# raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(var_list=self.var_list)
# save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
# saver.restore(sess, save_path)
# print("Model saved in file: %s" % save_path)
def load(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(reshape=True)
|
# saver = tf.train.Saver()/
# 不能硬编码啊
save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
self.saver.restore(sess, save_path)
print("Model restored from file: %s" % save_path)
# ckpt_path = './data/model/%s.ckpt'%(self.aggregator_type)
# meta_path = ckpt_path + '.meta' | # saver = tf.train.Saver(var_list=self.var_list) | random_line_split |
model_infer.py | # -*- coding:utf-8 -*-
from collections import namedtuple
import os
import tensorflow as tf
import config
from model.inits import glorot, zeros
import model.layers as layers
from model.aggregators import CrossAggregator
flags = tf.app.flags
FLAGS = flags.FLAGS
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
GPU_MEM_FRACTION = 0.8
sess_config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
sess_config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION
sess_config.allow_soft_placement = True
SAGEInfo = namedtuple("SAGEInfo",
['layer_name', # name of the layer (to get feature embedding etc.)
# 'neigh_sampler', # callable neigh_sampler constructor
'num_samples',
'output_dim' # the output (i.e., hidden) dimension
])
layer_infos = [SAGEInfo("node", FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", FLAGS.samples_2, FLAGS.dim_2)]
# === 预测阶段确定的参数 ===
num_classes = 2 # label_map.shape[1]
feats_dim = 106 #features.shape[1]
aggregator_type = 'cross'
concat = True
model_size = FLAGS.model_size
sigmoid_loss = FLAGS.sigmoid
identity_dim = FLAGS.identity_dim
class SupervisedGraphsage(object):
"""Implementation of supervised GraphSAGE."""
def __init__(self, **kwargs):
# === from model.py ===
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
# logging = kwargs.get('logging', False)
# self.logging = logging
self.vars = {}
# self.placeholders = {}
self.layers = []
self.activations = []
self.inputs = None
self.outputs = None
self.loss = 0
self.accuracy = 0
self.optimizer = None
self.opt_op = None
# === set aggregator ===
# 增加了两个cross, cross geniepath
if aggregator_type == 'cross':
self.aggregator_cls = CrossAggregator
else:
raise Exception("Unknown aggregator: ", aggregator_type)
self.input_dim = feats_dim
self.output_dim = num_classes # 2
# self.sampler = sampler
# self.adj_info = adj
self.layer_infos = layer_infos
self.concat = concat
self.model_size = model_size
self.sigmoid_loss = sigmoid_loss
self.dims = [(self.input_dim) + identity_dim]
self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])# 102, 64, 32
self.aggregator_type = aggregator_type
# === get info from placeholders ===
# get info from placeholders...
self.placeholders = self.construct_placeholders(self.input_dim, self.output_dim)
# self.labels = self.placeholders['labels']
# self.batch_nodes = placeholders["batch_nodes"]
self.batch_size = self.placeholders["batch_size"]
# self.support_size = placeholders['support_size']
# self.features = placeholders['features']
sampled_weight = [self.placeholders['sampled_weight_0'],
self.placeholders['sampled_weight_1'],
self.placeholders['sampled_weight_2']]
sampled_column = [self.placeholders['sampled_column_0'],
self.placeholders['sampled_column_1'],
self.placeholders['sampled_column_2']]
sampled_feats = [self.placeholders['sampled_feats_0'],
self.placeholders['sampled_feats_1'],
self.placeholders['sampled_feats_2']]
self.data_sampled = [sampled_feats, sampled_weight, sampled_column]
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
self.var_list = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=self.var_list)
self.sess = tf.Session(config=sess_config)
self.sess.run(tf.global_variables_initializer())
self.load(self.sess)
def construct_placeholders(self, num_classes, feats_dim):
# Define placeholders
# 这里的key 是供 model init 用的
# feed_dict = {placeholders: data}
placeholders = {
# 'features': tf.placeholder(tf.float32, shape=(None, feats_dim)),
# 'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
# 'batch_nodes': tf.placeholder(tf.int32, shape=(None), name='batch_nodes'),
'batch_size': tf.placeholder(tf.int32, name='batch_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'sampled_weight_0': tf.placeholder(tf.float32, name='sampled_weight_0'),
'sampled_column_0': tf.placeholder(tf.int32, name='sampled_column_0'),
'sampled_feats_0': tf.placeholder(tf.float32, name='sampled_feats_0'),
'sampled_weight_1': tf.placeholder(tf.float32, name='sampled_weight_1'),
'sampled_column_1': tf.placeholder(tf.int32, name='sampled_column_1'),
'sampled_feats_1': tf.placeholder(tf.float32, name='sampled_feats_1'),
'sampled_weight_2': tf.placeholder(tf.float32, name='sampled_weight_2'),
'sampled_column_2': tf.placeholder(tf.int32, name='sampled_column_2'),
'sampled_feats_2': tf.placeholder(tf.float32, name='sampled_feats_2')
}
return placeholders
# === build computation graph ===
def build(self):
# data_sampled, support_sizes = self.sample(self.batch_nodes, self.layer_infos)
support_size = 1 # [1, 8, 8*16]
support_sizes = [support_size]
for k in range(len(self.layer_infos)):
t = len(self.layer_infos) - k -1
support_size *= self.layer_infos[t].num_samples
support_sizes.append(support_size)
sample_size = [layer_info.num_samples for layer_info in self.layer_infos] # 16, 8
self.outputs, self.aggregators = self.aggregate(
self.data_sampled, self.dims, sample_size,
support_sizes, concat=self.concat, model_size=self.model_size)
# data_sampled, [self.features], self.dims, num_samples,
# support_sizes, concat=self.concat, model_size=self.model_size)
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
dim_mult = 2 if self.concat else 1
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.output_dim,
dropout=self.placeholders['dropout'],
act=lambda x : x) # no non-linear activation
# TF graph management
self.node_preds = self.node_pred(self.outputs)
# self._loss()
# 不进行梯度修建
# grads_and_vars = self.optimizer.compute_gradients(self.loss)
# clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
# for grad, var in grads_and_vars]
# self.grad, _ = clipped_grads_and_vars[0]
# self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
# self.opt_op = self.optimizer.minimize(self.loss)
self._predict()
def aggregate(self, data_sampled, dims, num_samples, support_sizes, batch_size=None,
aggregators=None, name='aggregate', concat=False, model_size="small"):
if batch_size is None:
batch_size = self.batch_size
# length: number of layers + 1
# hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]
feats_hidden = data_sampled[0] # 根据index取feats
weight_hidden = data_sampled[1]
column_hidden = data_sampled[2]
# feats_hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples[0]] # 根据index取feats
# feats_hidden = [feat_samples for feat_samples in data_sampled[0]] # 根据index取feats
# weight_hidden = [weight_samples for weight_samples in data_sampled[1]]
# column_hidden = [column_samples for column_samples in data_sampled[2]]
new_agg = aggregators is None
if new_agg:
aggregators = []
# c_list = [] # 增加
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1: # 2*64, 32
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1], act=lambda x : x, # no non-linear activation
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
else: # 这里aggregator.__init__() # 106 -> 64
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1],
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [batch_size * support_sizes[hop], # 1, 8; 1
num_samples[len(num_samples) - hop - 1], # 8, 16; 8
dim_mult*dims[layer]] # 106, 106; 2 * 64
weight_neigh_dims = [batch_size * support_sizes[hop],
num_samples[len(num_samples)- hop -1],
1]
# h = aggregator((hidden[hop],
# tf.reshape(hidden[hop + 1], neigh_dims)))
# call aggregator
# self_vecs, neigh_vecs, neigh_weight, neigh_column
h = aggregator((
feats_hidden[hop],
tf.reshape(feats_hidden[hop + 1], neigh_dims), # [1,8,106], [8, 16, 106], [1, 8, 2*64]
tf.reshape(weight_hidden[hop + 1], weight_neigh_dims),
tf.reshape(column_hidden[hop + 1], weight_neigh_dims)))
next_hidden.append(h)
feats_hidden = next_hidden
#self.hiddenOutput.append(hidden[0])
return feats_hidden[0], aggregators
# def _loss(self):
# # Weight decay loss
# for aggregator in self.aggregators:
# for var in aggregator.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# for var in self.node_pred.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# # classification loss
# if self.sigmoid_loss:
# self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
# logits=self.node_preds,
# labels=self.labels))
# else:
# # 变成v2
# self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
# logits=self.node_preds,
# labels=self.labels))
# # tf.summary.scalar('loss', self.loss)
def _predict(self):
if self.sigmoid_loss:
self.preds = tf.nn.sigmoid(self.node_preds)
else:
self.preds = tf.nn.softmax(self.node_preds)
# === 以上是计算图部分 ===
def predict(s | run([self.preds],
feed_dict=feed_dict)
return preds
def close_sess(self):
self.sess.close()
# def save(self, sess=None):
# if not sess:
# raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(var_list=self.var_list)
# save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
# saver.restore(sess, save_path)
# print("Model saved in file: %s" % save_path)
def load(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(reshape=True)
# saver = tf.train.Saver(var_list=self.var_list)
# saver = tf.train.Saver()/
# 不能硬编码啊
save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
self.saver.restore(sess, save_path)
print("Model restored from file: %s" % save_path)
# ckpt_path = './data/model/%s.ckpt'%(self.aggregator_type)
# meta_path = ckpt_path + '.meta'
| elf, feed_dict):
preds = self.sess. | conditional_block |
model_infer.py | # -*- coding:utf-8 -*-
from collections import namedtuple
import os
import tensorflow as tf
import config
from model.inits import glorot, zeros
import model.layers as layers
from model.aggregators import CrossAggregator
flags = tf.app.flags
FLAGS = flags.FLAGS
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
GPU_MEM_FRACTION = 0.8
sess_config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
sess_config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION
sess_config.allow_soft_placement = True
SAGEInfo = namedtuple("SAGEInfo",
['layer_name', # name of the layer (to get feature embedding etc.)
# 'neigh_sampler', # callable neigh_sampler constructor
'num_samples',
'output_dim' # the output (i.e., hidden) dimension
])
layer_infos = [SAGEInfo("node", FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", FLAGS.samples_2, FLAGS.dim_2)]
# === 预测阶段确定的参数 ===
num_classes = 2 # label_map.shape[1]
feats_dim = 106 #features.shape[1]
aggregator_type = 'cross'
concat = True
model_size = FLAGS.model_size
sigmoid_loss = FLAGS.sigmoid
identity_dim = FLAGS.identity_dim
class SupervisedGraphsage(object):
"""Implementation of supervised GraphSAGE."""
def __init__(self, **kwargs):
# === from model.py ===
allowed_kwargs = {'name', 'logging', 'model_size'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
# logging = kwargs.get('logging', False)
# self.logging = logging
self.vars = {}
# self.placeholders = {}
self.layers = []
self.activations = []
self.inputs = None
self.outputs = None
self.loss = 0
self.accuracy = 0
self.optimizer = None
self.opt_op = None
# === set aggregator ===
# 增加了两个cross, cross geniepath
if aggregator_type == 'cross':
self.aggregator_cls = CrossAggregator
else:
raise Exception("Unknown aggregator: ", aggregator_type)
self.input_dim = feats_dim
self.output_dim = num_classes # 2
# self.sampler = sampler
# self.adj_info = adj
self.layer_infos = layer_infos
self.concat = concat
self.model_size = model_size
self.sigmoid_loss = sigmoid_loss
self.dims = [(self.input_dim) + identity_dim]
self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])# 102, 64, 32
self.aggregator_type = aggregator_type
# === get info from placeholders ===
# get info from placeholders...
self.placeholders = self.construct_placeholders(self.input_dim, self.output_dim)
# self.labels = self.placeholders['labels']
# self.batch_nodes = placeholders["batch_nodes"]
self.batch_size = self.placeholders["batch_size"]
# self.support_size = placeholders['support_size']
# self.features = placeholders['features']
sampled_weight = [self.placeholders['sampled_weight_0'],
self.placeholders['sampled_weight_1'],
self.placeholders['sampled_weight_2']]
sampled_column = [self.placeholders['sampled_column_0'],
self.placeholders['sampled_column_1'],
self.placeholders['sampled_column_2']]
sampled_feats = [self.placeholders['sampled_feats_0'],
self.placeholders['sampled_feats_1'],
self.placeholders['sampled_feats_2']]
self.data_sampled = [sampled_feats, sampled_weight, sampled_column]
self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
self.build()
self.var_list = tf.trainable_variables()
self.saver = tf.train.Saver(var_list=self.var_list)
self.sess = tf.Session(config=sess_config)
self.sess.run(tf.global_variables_initializer())
self.load(self.sess)
def construct_placeholders(self, num_classes, feats_dim):
# Define placeholders
# 这里的key 是供 model init 用的
# feed_dict = {placeholders: data}
placeholders = {
# 'features': tf.placeholder(tf.float32, shape=(None, feats_dim)),
# 'labels': tf.placeholder(tf.float32, shape=(None, num_classes), name='labels'),
# 'batch_nodes': tf.placeholder(tf.int32, shape=(None), name='batch_nodes'),
'batch_size': tf.placeholder(tf.int32, name='batch_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'sampled_weight_0': tf.placeholder(tf.float32, name='sampled_weight_0'),
'sampled_column_0': tf.placeholder(tf.int32, name='sampled_column_0'),
'sampled_feats_0': tf.placeholder(tf.float32, name='sampled_feats_0'),
'sampled_weight_1': tf.placeholder(tf.float32, name='sampled_weight_1'),
'sampled_column_1': tf.placeholder(tf.int32, name='sampled_column_1'),
'sampled_feats_1': tf.placeholder(tf.float32, name='sampled_feats_1'),
'sampled_weight_2': tf.placeholder(tf.float32, name='sampled_weight_2'),
'sampled_column_2': tf.placeholder(tf.int32, name='sampled_column_2'),
'sampled_feats_2': tf.placeholder(tf.float32, name='sampled_feats_2')
}
return placeholders
# === build computation graph ===
def build(self):
# data_sampled, support_sizes = self.sample(self.batch_nodes, self.layer_infos)
support_size = 1 # [1, 8, 8*16]
support_sizes = [support_size]
for k in range(len(self.layer_infos)):
t = len(self.layer_infos) - k -1
support_size *= self.layer_infos[t].num_samples
support_sizes.append(support_size)
sample_size = [layer_info.num_samples for layer_info in self.layer_infos] # 16, 8
self.outputs, self.aggregators = self.aggregate(
self.data_sampled, self.dims, sample_size,
support_sizes, concat=self.concat, model_size=self.model_size)
# data_sampled, [self.features], self.dims, num_samples,
# support_sizes, concat=self.concat, model_size=self.model_size)
self.outputs = tf.nn.l2_normalize(self.outputs, 1)
dim_mult = 2 if self.concat else 1
self.node_pred = layers.Dense(dim_mult*self.dims[-1], self.output_dim,
dropout=self.placeholders['dropout'],
act=lambda x : x) # no non-linear activation
# TF graph management
self.node_preds = self.node_pred(self.outputs)
# self._loss()
# 不进行梯度修建
# grads_and_vars = self.optimizer.compute_gradients(self.loss)
# clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
# for grad, var in grads_and_vars]
# self.grad, _ = clipped_grads_and_vars[0]
# self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)
# self.opt_op = self.optimizer.minimize(self.loss)
self._predict()
def aggregate(self, data_sampled, dims, num_samples, support | atch_size=None,
aggregators=None, name='aggregate', concat=False, model_size="small"):
if batch_size is None:
batch_size = self.batch_size
# length: number of layers + 1
# hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]
feats_hidden = data_sampled[0] # 根据index取feats
weight_hidden = data_sampled[1]
column_hidden = data_sampled[2]
# feats_hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples[0]] # 根据index取feats
# feats_hidden = [feat_samples for feat_samples in data_sampled[0]] # 根据index取feats
# weight_hidden = [weight_samples for weight_samples in data_sampled[1]]
# column_hidden = [column_samples for column_samples in data_sampled[2]]
new_agg = aggregators is None
if new_agg:
aggregators = []
# c_list = [] # 增加
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1: # 2*64, 32
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1], act=lambda x : x, # no non-linear activation
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
else: # 这里aggregator.__init__() # 106 -> 64
aggregator = self.aggregator_cls(
dim_mult*dims[layer], dims[layer+1],
dropout=self.placeholders['dropout'],
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [batch_size * support_sizes[hop], # 1, 8; 1
num_samples[len(num_samples) - hop - 1], # 8, 16; 8
dim_mult*dims[layer]] # 106, 106; 2 * 64
weight_neigh_dims = [batch_size * support_sizes[hop],
num_samples[len(num_samples)- hop -1],
1]
# h = aggregator((hidden[hop],
# tf.reshape(hidden[hop + 1], neigh_dims)))
# call aggregator
# self_vecs, neigh_vecs, neigh_weight, neigh_column
h = aggregator((
feats_hidden[hop],
tf.reshape(feats_hidden[hop + 1], neigh_dims), # [1,8,106], [8, 16, 106], [1, 8, 2*64]
tf.reshape(weight_hidden[hop + 1], weight_neigh_dims),
tf.reshape(column_hidden[hop + 1], weight_neigh_dims)))
next_hidden.append(h)
feats_hidden = next_hidden
#self.hiddenOutput.append(hidden[0])
return feats_hidden[0], aggregators
# def _loss(self):
# # Weight decay loss
# for aggregator in self.aggregators:
# for var in aggregator.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# for var in self.node_pred.vars.values():
# self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
# # classification loss
# if self.sigmoid_loss:
# self.loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
# logits=self.node_preds,
# labels=self.labels))
# else:
# # 变成v2
# self.loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
# logits=self.node_preds,
# labels=self.labels))
# # tf.summary.scalar('loss', self.loss)
def _predict(self):
if self.sigmoid_loss:
self.preds = tf.nn.sigmoid(self.node_preds)
else:
self.preds = tf.nn.softmax(self.node_preds)
# === 以上是计算图部分 ===
def predict(self, feed_dict):
preds = self.sess.run([self.preds],
feed_dict=feed_dict)
return preds
def close_sess(self):
self.sess.close()
# def save(self, sess=None):
# if not sess:
# raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(var_list=self.var_list)
# save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
# saver.restore(sess, save_path)
# print("Model saved in file: %s" % save_path)
def load(self, sess=None):
if not sess:
raise AttributeError("TensorFlow session not provided.")
# saver = tf.train.Saver(reshape=True)
# saver = tf.train.Saver(var_list=self.var_list)
# saver = tf.train.Saver()/
# 不能硬编码啊
save_path = "./data/model/%s.ckpt" %(self.aggregator_type)
self.saver.restore(sess, save_path)
print("Model restored from file: %s" % save_path)
# ckpt_path = './data/model/%s.ckpt'%(self.aggregator_type)
# meta_path = ckpt_path + '.meta'
| _sizes, b | identifier_name |
plot2DlimitsAll.py | import os
import sys
import math
import ROOT
#from ROOT import TColor
from array import array
from optparse import OptionParser
from CMS_lumi import CMS_lumi
import plotting_interp as plot
import re
import json
import types
import numpy
def run(opts):
# --- read in options
model = opts.model
which = opts.which
outdir = opts.outdir
do90 = opts.do90
dowgt = opts.dowgt
dosmth = opts.dosmth
smthfnc = opts.smthfnc
#if dosmth: addtxt = '_smth'
# --- read in files
indir = '/eos/cms/store/group/phys_exotica/MonoHgg/MonoH-COMBO-2016/'+model+'_jsons/'
if dowgt: wfile = ''
else: wfile = '_weighted'
if do90: indir += which+'_'+model+wfile+'_results_90CL/'
else: indir += which+'_'+model+wfile+'_results/'
# --- options for plot averaging
doFillAvgLow = True # do averaging below mMed = 2*mDM line
doFillAvgHigh = True # do averaging above mMed = 2*mDM line
if model=="2HDM": doFillAvgLow = False
if model=="2HDM": doFillAvgHigh = False
doFillAvgRest = True # do averaging at line or average normally
doFillAvgAll = True # do averaging for all plots not just observed
# --- setup general style
ROOT.gROOT.SetBatch(ROOT.kTRUE)
ROOT.gStyle.SetOptStat(0)
plot.ModTDRStyle()
canv = ROOT.TCanvas()
canv.SetLogz()
canv.SetTicks()
canv.SetRightMargin(0.16) # allow enough space for z axis label
canv.cd()
# --- setup palette
ROOT.gStyle.SetPalette(57) # palette normal
InvertPalette() # palette inverted
ROOT.gStyle.SetNumberContours(255)
A=[]; Z=[]
# --- mass points
if model=="2HDM":
A=[300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675]
Z=[450,500,550,600,650,700,750,800,850,900,950,1000,1050,1100,1150,1200,1250,1300,1350,1400,1450,1500,1550,1600,1650,1700,1750,1800,1850,1900,1950]
if model=="BARY":
A=[1,35,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675,700,725,750,775,800,825,850,875,900,925,950,975,1000]
Z=[10,50,100,200,250,300,350,400,450,500,550,600,675,750,800,850,900,950,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000]
if model=="BARY" and which=='combo':
A=[1,35,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675,700,725,750,775,800,825,850,900,925,950,975,1000]
Z=[10,50,100,200,250,300,350,400,450,500,550,600,650,700,900,950,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000]
# --- binning for BARY model
# Y axis
BinningA = [0.5,1.5]
BinAAxis = [1.0,47.5]
for i in range(1, len(A)-1):
BinningA.append( (A[i] + A[i+1])/2.0 )
BinAAxis.append( (A[i] + A[i+1])/2.0 )
BinningA.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
BinAAxis.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
# X axis
BinningZ = [9,11]
BinZAxis = [10,75]
for i in range(1, len(Z)-1):
BinningZ.append( (Z[i] + Z[i+1])/2.0 )
BinZAxis.append( (Z[i] + Z[i+1])/2.0 )
BinningZ.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
BinZAxis.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
# --- setup histograms (different models have different binning)
if model=="2HDM":
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z), Z[0], Z[-1]+50, len(A), A[0], A[-1]+25)
limitPlot = ROOT.TH2F("lplot", "lplot", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
if model=="BARY": # variable binning
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z)-1, array('d',BinZAxis), len(A)-1, array('d',BinAAxis))
limitPlot = ROOT.TH2F("lplot", "lplot", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
# --- read in json files
for a in A:
for z in Z:
data = {}
filename = indir+'Zprime'+str(z)+'A'+str(a)+'.json'
if which=='gg' and model=='BARY': # BARY gg ONLY has DM instead of A in filename
filename = indir+'Zprime'+str(z)+'DM'+str(a)+'.json'
scale = 1.
if dowgt: scale = scaleXS(model,z,a)
if os.path.isfile(filename) and scale != "99999":
with open(filename) as jsonfile:
data = json.load(jsonfile)
for key in data: # fill plots from json
limitPlot.SetBinContent(limitPlot.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp0'])
limitPlotUp.SetBinContent(limitPlotUp.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp+1'])
limitPlotDown.SetBinContent(limitPlotDown.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp-1'])
limitPlotUp2.SetBinContent(limitPlotUp2.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp+2'])
limitPlotDown2.SetBinContent(limitPlotDown2.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp-2'])
limitPlotObs.SetBinContent(limitPlotObs.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'obs'])
# --- average plots to make smooth contours
fillAvg(limitPlotObs, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotObs, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotObs, A, Z, False, False, doFillAvgRest)
if doFillAvgAll:
fillAvg(limitPlot, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotUp, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotDown, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotUp2, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotDown2, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlot, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotUp, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotDown, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotUp2, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotDown2, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlot, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotUp, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotDown, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotUp2, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotDown2, A, Z, False, False, doFillAvgRest)
# --- axis labels
limitPlotAxis.GetXaxis().SetTitle("m_{Z'} [GeV]")
limitPlotObs.GetZaxis().SetTitle("#sigma_{95% CL}/#sigma_{th}")
if model=="2HDM": limitPlotAxis.GetYaxis().SetTitle("m_{A} [GeV]")
if model=="BARY": limitPlotAxis.GetYaxis().SetTitle("m_{DM} [GeV]")
# --- clone obs to get contour
limitPlotObsCopy = limitPlotObs.Clone()
# --- set up min and max of z axis
limitPlotObs.SetMaximum(100)
limitPlotObs.SetMinimum(0.3)
# --- set range of x and y axis
if model=="BARY": limitPlotObs.GetXaxis().SetRangeUser(10,2001)
if model=="BARY": limitPlotObs.GetYaxis().SetRangeUser(1,1001)
if model=="2HDM": limitPlotObs.GetXaxis().SetRangeUser(450,2000)
if model=="2HDM": limitPlotObs.GetYaxis().SetRangeUser(300,700)
# --- style plot
limitPlotObs.GetYaxis().SetTitleOffset(0.95) # format axis labels
limitPlotObs.GetZaxis().SetTitleOffset(0.95)
limitPlotObs.GetXaxis().SetLabelSize(0.035) # format axis ticks
limitPlotObs.GetYaxis().SetLabelSize(0.035)
if model=="2HDM": limitPlotAxis.GetXaxis().SetNdivisions(9)
if model=="2HDM": limitPlotAxis.GetYaxis().SetNdivisions(8)
if model=="BARY": limitPlotAxis.GetXaxis().SetNdivisions(10)
if model=="BARY": limitPlotAxis.GetYaxis().SetNdivisions(16)
# --- smooth
if dosmth:
limitPlot.GetXaxis().SetRange(3,limitPlot.GetNbinsX())
limitPlot.Smooth(1,smthfnc)
limitPlotObsCopy.GetXaxis().SetRange(3,limitPlotObsCopy.GetNbinsX())
limitPlotObsCopy.Smooth(1,smthfnc)
limitPlotUp.GetXaxis().SetRange(3,limitPlotUp.GetNbinsX())
limitPlotUp.Smooth(1,smthfnc)
limitPlotDown.GetXaxis().SetRange(3,limitPlotDown.GetNbinsX())
limitPlotDown.Smooth(1,smthfnc)
limitPlot.GetXaxis().SetRange(0,limitPlot.GetNbinsX())
limitPlotObsCopy.GetXaxis().SetRange(0,limitPlotObsCopy.GetNbinsX())
limitPlotUp.GetXaxis().SetRange(0,limitPlotUp.GetNbinsX())
limitPlotDown.GetXaxis().SetRange(0,limitPlotDown.GetNbinsX())
#limitPlot.Smooth(1,smthfnc)
#limitPlotObsCopy.Smooth(1,smthfnc)
#limitPlotUp.Smooth(1,smthfnc)
#limitPlotDown.Smooth(1,smthfnc)
# --- get and style each contour
# 1 sigma up
limitPlotUp.SetMinimum(1)
limitPlotUp.SetContour(1)
limitPlotUp.SetLineWidth(1)
# 1 sigma down
limitPlotDown.SetMinimum(1)
limitPlotDown.SetContour(1)
limitPlotDown.SetLineWidth(1)
# observed
limitPlotObs.SetLineWidth(3)
limitPlotObs.SetLineColor(2)
limitPlotObsCopy.SetMinimum(1)
limitPlotObsCopy.SetContour(1)
limitPlotObsCopy.SetLineWidth(3)
limitPlotObsCopy.SetLineColor(2)
# expected
limitPlot.SetMinimum(1)
limitPlot.SetContour(1)
limitPlot.SetLineStyle(7)
limitPlot.SetLineWidth(3)
# --- draw plots
limitPlotAxis.Draw("COLZ")
limitPlotObs.Draw("COLZ SAME")
limitPlotUp.Draw("CONT3 SAME")
limitPlotDown.Draw("CONT3 SAME")
limitPlotObsCopy.Draw("CONT3 SAME")
limitPlot.Draw("CONT3 SAME")
# --- legend and extra text box
x1 = 0.18
y1 = 0.55
x2 = x1+0.42
y2 = y1+0.35
# --- add white box below bary
white = ROOT.TPaveText(x1,y1,x2,y2,"NDC")
white.AddText("")
white.SetFillColor(0)
white.Draw("SAME")
# --- latex
if model=="2HDM": txt1 = "#bf{Z'-2HDM}"
if model=="BARY": txt1 = "#bf{Baryonic Z'}"
txt1 += "#bf{, Z' #rightarrow DM + h"
if which=='gg': txt1 += "(#gamma#gamma)}"
if which=='tt': txt1 += "(#tau#tau)} "
if which=='combo': txt1 += "(#gamma#gamma + #tau#tau)}"
if model=="2HDM": txt2 = "#bf{Dirac DM, m_{DM} = 100 GeV}"
if model=="BARY": txt2 = "#bf{Dirac DM, g_{q} = 0.25, g_{DM} = 1.0 }"
if model=="2HDM": txt3 = "#bf{g_{Z'} = 0.8, g_{DM} = 1.0}"
if model=="BARY": txt3 = ""
txt = ROOT.TPaveText(x1,y1+0.15,x2,y2,"NDC")
txt.AddText(txt1)
txt.AddText(txt2)
txt.AddText(txt3)
txt.SetTextAlign(12)
txt.SetTextSize(0.04)
txt.Draw("SAME")
# --- legend
if model=="2HDM": leg = ROOT.TLegend(x1,y1,x2,y1+0.15)
if model=="BARY": leg = ROOT.TLegend(x1,y1+0.05,x2,y1+0.2)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.040)
leg.AddEntry(limitPlotObs,"Observed 95% CL","L")
leg.AddEntry(limitPlot,"Expected 95% CL","L")
leg.AddEntry(limitPlotUp,"#pm 1 s.d.","L")
leg.Draw()
canv.cd()
canv.Update()
CMS_lumi(canv,4,0)
canv.RedrawAxis()
canv.Update()
# --- save
outname = outdir+'contours_'
if do90: outname += '90CL'
else: outname += '95CL'
outname += '_'+model+'_'+which+'.root'
outfile = ROOT.TFile(outname,'RECREATE')
outfile.cd()
limitPlot.Write()
limitPlotObs.Write()
if do90:
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.png')
else:
canv.Print(outdir+'limits2D_'+model+'_'+which+'.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'.png')
def fillAvg(limitPlot,A,Z,doL,doH,doR):
# --- ordering for each option
irange = range(1,limitPlot.GetNbinsY()+1)
jrange = range(1,limitPlot.GetNbinsX()+1)
if doL: jrange = list(reversed(range(1,limitPlot.GetNbinsX()+1)))
if doH: irange = list(reversed(range(1,limitPlot.GetNbinsY()+1)))
# --- average over 4 adjacent bins
for i in irange:
for j in jrange:
aVal = A[i-1]
zVal = Z[j-1]
binVal = str(limitPlot.GetBinContent(j,i))
# --- only if bin is 0 do averaging
if binVal == "0.0" and ((doL and 2*float(aVal) < float(zVal)) or (doH and 2*float(aVal) > float(zVal)) or (doR)):
avg = 0.0
div = 0.0
back = limitPlot.GetBinContent(j-1,i)
forw = limitPlot.GetBinContent(j+1,i)
down = limitPlot.GetBinContent(j,i-1)
abov = limitPlot.GetBinContent(j,i+1)
if back != 0.0 and ((doL and back < 50.) or (doH and back > 50.) or (doR)):
avg += back
div += 1
if forw != 0.0 and ((doL and forw < 50.) or (doH and forw > 50.) or (doR)):
avg += forw
div += 1
if down != 0.0 and ((doL and down < 50.) or (doH and down > 50.) or (doR)):
avg += down
div += 1
if abov != 0.0 and ((doL and abov < 50.) or (doH and abov > 50.) or (doR)):
avg += abov
div += 1
if div != 0:
avg = avg/div
limitPlot.SetBinContent(j,i,avg)
def InvertPalette():
# --- Function to make inverted kBird palette
alpha=1
stops = array('d', ([ 0.0000, 0.1250, 0.2500, 0.3750, 0.5000, 0.6250, 0.7500, 0.8750, 1.0000]) )
red = array('d', ([ 0.2082, 0.0592, 0.0780, 0.0232, 0.1802, 0.5301, 0.8186, 0.9956, 0.9764]) )
green = array('d', ([ 0.1664, 0.3599, 0.5041, 0.6419, 0.7178, 0.7492, 0.7328, 0.7862, 0.9832]) )
blue = array('d', ([ 0.5293, 0.8684, 0.8385, 0.7914, 0.6425, 0.4662, 0.3499, 0.1968, 0.0539]) )
invred = numpy.asarray(list(reversed(red)))
invgreen = numpy.asarray(list(reversed(green)))
invblue = numpy.asarray(list(reversed(blue)))
ROOT.TColor.CreateGradientColorTable(9, stops, invred, invgreen, invblue, 255, alpha);
def scaleXS(model,Z,A):
# --- Function to scale point by 1/xsec
if model=="2HDM": xsRef = open("crosssectionZp2HDM.txt")
if model=="BARY": xsRef = open("crosssectionZpBaryonic.txt")
returnString = "99999"
for line in xsRef:
if (str(line.split(' ')[0]) == str(Z) and str(line.split(' ')[1]) == str(A)):
returnString = str(1./float(line.split(' ')[2]))
print returnString
return returnString
def init():
# options
parser = OptionParser("usage: %prog [options]")
parser.add_option("-O",action="store",dest="outdir",type="string",
default="",help="Output directory [default = %default]"),
parser.add_option("-m",action="store",dest="model",type="string",
default="",help="Which model (2HDM or BARY)"),
parser.add_option("-w",action="store",dest="which",type="string",
default="",help="Which channel (gg, tt, combo)"),
parser.add_option("--dowgt",action="store_true",dest="dowgt",
default=False,help="Weight by 1/xsec (if not already done) [default = %default]"),
parser.add_option("--do90",action="store_true",dest="do90",
default=False,help="Store 90%CL root file [default = %default]"),
parser.add_option("--dosmooth",action="store_true",dest="dosmth",
default=False,help="Smooth TH2 after filling and avg. [default = %default]"),
parser.add_option("--smth",action="store",dest="smthfnc",type="string",
default="k5a",help="Smoothing function to apply [default = %default]"),
(options, args) = parser.parse_args()
if options.model!="2HDM" and options.model!="BARY":
print "Model "+options.model+" is NOT a valid option."
sys.exit()
if options.which!="gg" and options.which!="tt" and options.which!="combo":
print "Channel "+options.which+" is NOT a valid option."
sys.exit()
| #run
print "Making 2D limit plot for: "+options.model+" "+options.which
if options.do90: print "Using 90CL limits"
if options.dowgt: print "Weighting by 1/xsec"
if options.dosmth: print "Smoothing applied"
run(options)
if __name__=="__main__":
init() | random_line_split |
|
plot2DlimitsAll.py | import os
import sys
import math
import ROOT
#from ROOT import TColor
from array import array
from optparse import OptionParser
from CMS_lumi import CMS_lumi
import plotting_interp as plot
import re
import json
import types
import numpy
def run(opts):
# --- read in options
model = opts.model
which = opts.which
outdir = opts.outdir
do90 = opts.do90
dowgt = opts.dowgt
dosmth = opts.dosmth
smthfnc = opts.smthfnc
#if dosmth: addtxt = '_smth'
# --- read in files
indir = '/eos/cms/store/group/phys_exotica/MonoHgg/MonoH-COMBO-2016/'+model+'_jsons/'
if dowgt: wfile = ''
else: wfile = '_weighted'
if do90: indir += which+'_'+model+wfile+'_results_90CL/'
else: indir += which+'_'+model+wfile+'_results/'
# --- options for plot averaging
doFillAvgLow = True # do averaging below mMed = 2*mDM line
doFillAvgHigh = True # do averaging above mMed = 2*mDM line
if model=="2HDM": doFillAvgLow = False
if model=="2HDM": doFillAvgHigh = False
doFillAvgRest = True # do averaging at line or average normally
doFillAvgAll = True # do averaging for all plots not just observed
# --- setup general style
ROOT.gROOT.SetBatch(ROOT.kTRUE)
ROOT.gStyle.SetOptStat(0)
plot.ModTDRStyle()
canv = ROOT.TCanvas()
canv.SetLogz()
canv.SetTicks()
canv.SetRightMargin(0.16) # allow enough space for z axis label
canv.cd()
# --- setup palette
ROOT.gStyle.SetPalette(57) # palette normal
InvertPalette() # palette inverted
ROOT.gStyle.SetNumberContours(255)
A=[]; Z=[]
# --- mass points
if model=="2HDM":
A=[300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675]
Z=[450,500,550,600,650,700,750,800,850,900,950,1000,1050,1100,1150,1200,1250,1300,1350,1400,1450,1500,1550,1600,1650,1700,1750,1800,1850,1900,1950]
if model=="BARY":
A=[1,35,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675,700,725,750,775,800,825,850,875,900,925,950,975,1000]
Z=[10,50,100,200,250,300,350,400,450,500,550,600,675,750,800,850,900,950,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000]
if model=="BARY" and which=='combo':
A=[1,35,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675,700,725,750,775,800,825,850,900,925,950,975,1000]
Z=[10,50,100,200,250,300,350,400,450,500,550,600,650,700,900,950,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000]
# --- binning for BARY model
# Y axis
BinningA = [0.5,1.5]
BinAAxis = [1.0,47.5]
for i in range(1, len(A)-1):
BinningA.append( (A[i] + A[i+1])/2.0 )
BinAAxis.append( (A[i] + A[i+1])/2.0 )
BinningA.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
BinAAxis.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
# X axis
BinningZ = [9,11]
BinZAxis = [10,75]
for i in range(1, len(Z)-1):
BinningZ.append( (Z[i] + Z[i+1])/2.0 )
BinZAxis.append( (Z[i] + Z[i+1])/2.0 )
BinningZ.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
BinZAxis.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
# --- setup histograms (different models have different binning)
if model=="2HDM":
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z), Z[0], Z[-1]+50, len(A), A[0], A[-1]+25)
limitPlot = ROOT.TH2F("lplot", "lplot", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
if model=="BARY": # variable binning
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z)-1, array('d',BinZAxis), len(A)-1, array('d',BinAAxis))
limitPlot = ROOT.TH2F("lplot", "lplot", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
# --- read in json files
for a in A:
for z in Z:
data = {}
filename = indir+'Zprime'+str(z)+'A'+str(a)+'.json'
if which=='gg' and model=='BARY': # BARY gg ONLY has DM instead of A in filename
filename = indir+'Zprime'+str(z)+'DM'+str(a)+'.json'
scale = 1.
if dowgt: scale = scaleXS(model,z,a)
if os.path.isfile(filename) and scale != "99999":
with open(filename) as jsonfile:
data = json.load(jsonfile)
for key in data: # fill plots from json
limitPlot.SetBinContent(limitPlot.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp0'])
limitPlotUp.SetBinContent(limitPlotUp.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp+1'])
limitPlotDown.SetBinContent(limitPlotDown.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp-1'])
limitPlotUp2.SetBinContent(limitPlotUp2.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp+2'])
limitPlotDown2.SetBinContent(limitPlotDown2.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp-2'])
limitPlotObs.SetBinContent(limitPlotObs.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'obs'])
# --- average plots to make smooth contours
fillAvg(limitPlotObs, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotObs, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotObs, A, Z, False, False, doFillAvgRest)
if doFillAvgAll:
fillAvg(limitPlot, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotUp, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotDown, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotUp2, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotDown2, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlot, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotUp, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotDown, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotUp2, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotDown2, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlot, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotUp, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotDown, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotUp2, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotDown2, A, Z, False, False, doFillAvgRest)
# --- axis labels
limitPlotAxis.GetXaxis().SetTitle("m_{Z'} [GeV]")
limitPlotObs.GetZaxis().SetTitle("#sigma_{95% CL}/#sigma_{th}")
if model=="2HDM": limitPlotAxis.GetYaxis().SetTitle("m_{A} [GeV]")
if model=="BARY": limitPlotAxis.GetYaxis().SetTitle("m_{DM} [GeV]")
# --- clone obs to get contour
limitPlotObsCopy = limitPlotObs.Clone()
# --- set up min and max of z axis
limitPlotObs.SetMaximum(100)
limitPlotObs.SetMinimum(0.3)
# --- set range of x and y axis
if model=="BARY": limitPlotObs.GetXaxis().SetRangeUser(10,2001)
if model=="BARY": limitPlotObs.GetYaxis().SetRangeUser(1,1001)
if model=="2HDM": limitPlotObs.GetXaxis().SetRangeUser(450,2000)
if model=="2HDM": limitPlotObs.GetYaxis().SetRangeUser(300,700)
# --- style plot
limitPlotObs.GetYaxis().SetTitleOffset(0.95) # format axis labels
limitPlotObs.GetZaxis().SetTitleOffset(0.95)
limitPlotObs.GetXaxis().SetLabelSize(0.035) # format axis ticks
limitPlotObs.GetYaxis().SetLabelSize(0.035)
if model=="2HDM": limitPlotAxis.GetXaxis().SetNdivisions(9)
if model=="2HDM": limitPlotAxis.GetYaxis().SetNdivisions(8)
if model=="BARY": limitPlotAxis.GetXaxis().SetNdivisions(10)
if model=="BARY": limitPlotAxis.GetYaxis().SetNdivisions(16)
# --- smooth
if dosmth:
limitPlot.GetXaxis().SetRange(3,limitPlot.GetNbinsX())
limitPlot.Smooth(1,smthfnc)
limitPlotObsCopy.GetXaxis().SetRange(3,limitPlotObsCopy.GetNbinsX())
limitPlotObsCopy.Smooth(1,smthfnc)
limitPlotUp.GetXaxis().SetRange(3,limitPlotUp.GetNbinsX())
limitPlotUp.Smooth(1,smthfnc)
limitPlotDown.GetXaxis().SetRange(3,limitPlotDown.GetNbinsX())
limitPlotDown.Smooth(1,smthfnc)
limitPlot.GetXaxis().SetRange(0,limitPlot.GetNbinsX())
limitPlotObsCopy.GetXaxis().SetRange(0,limitPlotObsCopy.GetNbinsX())
limitPlotUp.GetXaxis().SetRange(0,limitPlotUp.GetNbinsX())
limitPlotDown.GetXaxis().SetRange(0,limitPlotDown.GetNbinsX())
#limitPlot.Smooth(1,smthfnc)
#limitPlotObsCopy.Smooth(1,smthfnc)
#limitPlotUp.Smooth(1,smthfnc)
#limitPlotDown.Smooth(1,smthfnc)
# --- get and style each contour
# 1 sigma up
limitPlotUp.SetMinimum(1)
limitPlotUp.SetContour(1)
limitPlotUp.SetLineWidth(1)
# 1 sigma down
limitPlotDown.SetMinimum(1)
limitPlotDown.SetContour(1)
limitPlotDown.SetLineWidth(1)
# observed
limitPlotObs.SetLineWidth(3)
limitPlotObs.SetLineColor(2)
limitPlotObsCopy.SetMinimum(1)
limitPlotObsCopy.SetContour(1)
limitPlotObsCopy.SetLineWidth(3)
limitPlotObsCopy.SetLineColor(2)
# expected
limitPlot.SetMinimum(1)
limitPlot.SetContour(1)
limitPlot.SetLineStyle(7)
limitPlot.SetLineWidth(3)
# --- draw plots
limitPlotAxis.Draw("COLZ")
limitPlotObs.Draw("COLZ SAME")
limitPlotUp.Draw("CONT3 SAME")
limitPlotDown.Draw("CONT3 SAME")
limitPlotObsCopy.Draw("CONT3 SAME")
limitPlot.Draw("CONT3 SAME")
# --- legend and extra text box
x1 = 0.18
y1 = 0.55
x2 = x1+0.42
y2 = y1+0.35
# --- add white box below bary
white = ROOT.TPaveText(x1,y1,x2,y2,"NDC")
white.AddText("")
white.SetFillColor(0)
white.Draw("SAME")
# --- latex
if model=="2HDM": txt1 = "#bf{Z'-2HDM}"
if model=="BARY": txt1 = "#bf{Baryonic Z'}"
txt1 += "#bf{, Z' #rightarrow DM + h"
if which=='gg': txt1 += "(#gamma#gamma)}"
if which=='tt': txt1 += "(#tau#tau)} "
if which=='combo': txt1 += "(#gamma#gamma + #tau#tau)}"
if model=="2HDM": txt2 = "#bf{Dirac DM, m_{DM} = 100 GeV}"
if model=="BARY": txt2 = "#bf{Dirac DM, g_{q} = 0.25, g_{DM} = 1.0 }"
if model=="2HDM": txt3 = "#bf{g_{Z'} = 0.8, g_{DM} = 1.0}"
if model=="BARY": txt3 = ""
txt = ROOT.TPaveText(x1,y1+0.15,x2,y2,"NDC")
txt.AddText(txt1)
txt.AddText(txt2)
txt.AddText(txt3)
txt.SetTextAlign(12)
txt.SetTextSize(0.04)
txt.Draw("SAME")
# --- legend
if model=="2HDM": leg = ROOT.TLegend(x1,y1,x2,y1+0.15)
if model=="BARY": leg = ROOT.TLegend(x1,y1+0.05,x2,y1+0.2)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.040)
leg.AddEntry(limitPlotObs,"Observed 95% CL","L")
leg.AddEntry(limitPlot,"Expected 95% CL","L")
leg.AddEntry(limitPlotUp,"#pm 1 s.d.","L")
leg.Draw()
canv.cd()
canv.Update()
CMS_lumi(canv,4,0)
canv.RedrawAxis()
canv.Update()
# --- save
outname = outdir+'contours_'
if do90: outname += '90CL'
else: outname += '95CL'
outname += '_'+model+'_'+which+'.root'
outfile = ROOT.TFile(outname,'RECREATE')
outfile.cd()
limitPlot.Write()
limitPlotObs.Write()
if do90:
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.png')
else:
canv.Print(outdir+'limits2D_'+model+'_'+which+'.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'.png')
def fillAvg(limitPlot,A,Z,doL,doH,doR):
# --- ordering for each option
irange = range(1,limitPlot.GetNbinsY()+1)
jrange = range(1,limitPlot.GetNbinsX()+1)
if doL: jrange = list(reversed(range(1,limitPlot.GetNbinsX()+1)))
if doH: irange = list(reversed(range(1,limitPlot.GetNbinsY()+1)))
# --- average over 4 adjacent bins
for i in irange:
for j in jrange:
aVal = A[i-1]
zVal = Z[j-1]
binVal = str(limitPlot.GetBinContent(j,i))
# --- only if bin is 0 do averaging
if binVal == "0.0" and ((doL and 2*float(aVal) < float(zVal)) or (doH and 2*float(aVal) > float(zVal)) or (doR)):
avg = 0.0
div = 0.0
back = limitPlot.GetBinContent(j-1,i)
forw = limitPlot.GetBinContent(j+1,i)
down = limitPlot.GetBinContent(j,i-1)
abov = limitPlot.GetBinContent(j,i+1)
if back != 0.0 and ((doL and back < 50.) or (doH and back > 50.) or (doR)):
avg += back
div += 1
if forw != 0.0 and ((doL and forw < 50.) or (doH and forw > 50.) or (doR)):
avg += forw
div += 1
if down != 0.0 and ((doL and down < 50.) or (doH and down > 50.) or (doR)):
avg += down
div += 1
if abov != 0.0 and ((doL and abov < 50.) or (doH and abov > 50.) or (doR)):
avg += abov
div += 1
if div != 0:
avg = avg/div
limitPlot.SetBinContent(j,i,avg)
def InvertPalette():
# --- Function to make inverted kBird palette
alpha=1
stops = array('d', ([ 0.0000, 0.1250, 0.2500, 0.3750, 0.5000, 0.6250, 0.7500, 0.8750, 1.0000]) )
red = array('d', ([ 0.2082, 0.0592, 0.0780, 0.0232, 0.1802, 0.5301, 0.8186, 0.9956, 0.9764]) )
green = array('d', ([ 0.1664, 0.3599, 0.5041, 0.6419, 0.7178, 0.7492, 0.7328, 0.7862, 0.9832]) )
blue = array('d', ([ 0.5293, 0.8684, 0.8385, 0.7914, 0.6425, 0.4662, 0.3499, 0.1968, 0.0539]) )
invred = numpy.asarray(list(reversed(red)))
invgreen = numpy.asarray(list(reversed(green)))
invblue = numpy.asarray(list(reversed(blue)))
ROOT.TColor.CreateGradientColorTable(9, stops, invred, invgreen, invblue, 255, alpha);
def scaleXS(model,Z,A):
# --- Function to scale point by 1/xsec
if model=="2HDM": xsRef = open("crosssectionZp2HDM.txt")
if model=="BARY": xsRef = open("crosssectionZpBaryonic.txt")
returnString = "99999"
for line in xsRef:
if (str(line.split(' ')[0]) == str(Z) and str(line.split(' ')[1]) == str(A)):
returnString = str(1./float(line.split(' ')[2]))
print returnString
return returnString
def init():
# options
|
if __name__=="__main__":
init()
| parser = OptionParser("usage: %prog [options]")
parser.add_option("-O",action="store",dest="outdir",type="string",
default="",help="Output directory [default = %default]"),
parser.add_option("-m",action="store",dest="model",type="string",
default="",help="Which model (2HDM or BARY)"),
parser.add_option("-w",action="store",dest="which",type="string",
default="",help="Which channel (gg, tt, combo)"),
parser.add_option("--dowgt",action="store_true",dest="dowgt",
default=False,help="Weight by 1/xsec (if not already done) [default = %default]"),
parser.add_option("--do90",action="store_true",dest="do90",
default=False,help="Store 90%CL root file [default = %default]"),
parser.add_option("--dosmooth",action="store_true",dest="dosmth",
default=False,help="Smooth TH2 after filling and avg. [default = %default]"),
parser.add_option("--smth",action="store",dest="smthfnc",type="string",
default="k5a",help="Smoothing function to apply [default = %default]"),
(options, args) = parser.parse_args()
if options.model!="2HDM" and options.model!="BARY":
print "Model "+options.model+" is NOT a valid option."
sys.exit()
if options.which!="gg" and options.which!="tt" and options.which!="combo":
print "Channel "+options.which+" is NOT a valid option."
sys.exit()
#run
print "Making 2D limit plot for: "+options.model+" "+options.which
if options.do90: print "Using 90CL limits"
if options.dowgt: print "Weighting by 1/xsec"
if options.dosmth: print "Smoothing applied"
run(options) | identifier_body |
plot2DlimitsAll.py | import os
import sys
import math
import ROOT
#from ROOT import TColor
from array import array
from optparse import OptionParser
from CMS_lumi import CMS_lumi
import plotting_interp as plot
import re
import json
import types
import numpy
def | (opts):
# --- read in options
model = opts.model
which = opts.which
outdir = opts.outdir
do90 = opts.do90
dowgt = opts.dowgt
dosmth = opts.dosmth
smthfnc = opts.smthfnc
#if dosmth: addtxt = '_smth'
# --- read in files
indir = '/eos/cms/store/group/phys_exotica/MonoHgg/MonoH-COMBO-2016/'+model+'_jsons/'
if dowgt: wfile = ''
else: wfile = '_weighted'
if do90: indir += which+'_'+model+wfile+'_results_90CL/'
else: indir += which+'_'+model+wfile+'_results/'
# --- options for plot averaging
doFillAvgLow = True # do averaging below mMed = 2*mDM line
doFillAvgHigh = True # do averaging above mMed = 2*mDM line
if model=="2HDM": doFillAvgLow = False
if model=="2HDM": doFillAvgHigh = False
doFillAvgRest = True # do averaging at line or average normally
doFillAvgAll = True # do averaging for all plots not just observed
# --- setup general style
ROOT.gROOT.SetBatch(ROOT.kTRUE)
ROOT.gStyle.SetOptStat(0)
plot.ModTDRStyle()
canv = ROOT.TCanvas()
canv.SetLogz()
canv.SetTicks()
canv.SetRightMargin(0.16) # allow enough space for z axis label
canv.cd()
# --- setup palette
ROOT.gStyle.SetPalette(57) # palette normal
InvertPalette() # palette inverted
ROOT.gStyle.SetNumberContours(255)
A=[]; Z=[]
# --- mass points
if model=="2HDM":
A=[300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675]
Z=[450,500,550,600,650,700,750,800,850,900,950,1000,1050,1100,1150,1200,1250,1300,1350,1400,1450,1500,1550,1600,1650,1700,1750,1800,1850,1900,1950]
if model=="BARY":
A=[1,35,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675,700,725,750,775,800,825,850,875,900,925,950,975,1000]
Z=[10,50,100,200,250,300,350,400,450,500,550,600,675,750,800,850,900,950,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000]
if model=="BARY" and which=='combo':
A=[1,35,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675,700,725,750,775,800,825,850,900,925,950,975,1000]
Z=[10,50,100,200,250,300,350,400,450,500,550,600,650,700,900,950,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000]
# --- binning for BARY model
# Y axis
BinningA = [0.5,1.5]
BinAAxis = [1.0,47.5]
for i in range(1, len(A)-1):
BinningA.append( (A[i] + A[i+1])/2.0 )
BinAAxis.append( (A[i] + A[i+1])/2.0 )
BinningA.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
BinAAxis.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
# X axis
BinningZ = [9,11]
BinZAxis = [10,75]
for i in range(1, len(Z)-1):
BinningZ.append( (Z[i] + Z[i+1])/2.0 )
BinZAxis.append( (Z[i] + Z[i+1])/2.0 )
BinningZ.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
BinZAxis.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
# --- setup histograms (different models have different binning)
if model=="2HDM":
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z), Z[0], Z[-1]+50, len(A), A[0], A[-1]+25)
limitPlot = ROOT.TH2F("lplot", "lplot", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
if model=="BARY": # variable binning
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z)-1, array('d',BinZAxis), len(A)-1, array('d',BinAAxis))
limitPlot = ROOT.TH2F("lplot", "lplot", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
# --- read in json files
for a in A:
for z in Z:
data = {}
filename = indir+'Zprime'+str(z)+'A'+str(a)+'.json'
if which=='gg' and model=='BARY': # BARY gg ONLY has DM instead of A in filename
filename = indir+'Zprime'+str(z)+'DM'+str(a)+'.json'
scale = 1.
if dowgt: scale = scaleXS(model,z,a)
if os.path.isfile(filename) and scale != "99999":
with open(filename) as jsonfile:
data = json.load(jsonfile)
for key in data: # fill plots from json
limitPlot.SetBinContent(limitPlot.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp0'])
limitPlotUp.SetBinContent(limitPlotUp.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp+1'])
limitPlotDown.SetBinContent(limitPlotDown.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp-1'])
limitPlotUp2.SetBinContent(limitPlotUp2.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp+2'])
limitPlotDown2.SetBinContent(limitPlotDown2.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp-2'])
limitPlotObs.SetBinContent(limitPlotObs.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'obs'])
# --- average plots to make smooth contours
fillAvg(limitPlotObs, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotObs, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotObs, A, Z, False, False, doFillAvgRest)
if doFillAvgAll:
fillAvg(limitPlot, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotUp, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotDown, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotUp2, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotDown2, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlot, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotUp, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotDown, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotUp2, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotDown2, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlot, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotUp, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotDown, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotUp2, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotDown2, A, Z, False, False, doFillAvgRest)
# --- axis labels
limitPlotAxis.GetXaxis().SetTitle("m_{Z'} [GeV]")
limitPlotObs.GetZaxis().SetTitle("#sigma_{95% CL}/#sigma_{th}")
if model=="2HDM": limitPlotAxis.GetYaxis().SetTitle("m_{A} [GeV]")
if model=="BARY": limitPlotAxis.GetYaxis().SetTitle("m_{DM} [GeV]")
# --- clone obs to get contour
limitPlotObsCopy = limitPlotObs.Clone()
# --- set up min and max of z axis
limitPlotObs.SetMaximum(100)
limitPlotObs.SetMinimum(0.3)
# --- set range of x and y axis
if model=="BARY": limitPlotObs.GetXaxis().SetRangeUser(10,2001)
if model=="BARY": limitPlotObs.GetYaxis().SetRangeUser(1,1001)
if model=="2HDM": limitPlotObs.GetXaxis().SetRangeUser(450,2000)
if model=="2HDM": limitPlotObs.GetYaxis().SetRangeUser(300,700)
# --- style plot
limitPlotObs.GetYaxis().SetTitleOffset(0.95) # format axis labels
limitPlotObs.GetZaxis().SetTitleOffset(0.95)
limitPlotObs.GetXaxis().SetLabelSize(0.035) # format axis ticks
limitPlotObs.GetYaxis().SetLabelSize(0.035)
if model=="2HDM": limitPlotAxis.GetXaxis().SetNdivisions(9)
if model=="2HDM": limitPlotAxis.GetYaxis().SetNdivisions(8)
if model=="BARY": limitPlotAxis.GetXaxis().SetNdivisions(10)
if model=="BARY": limitPlotAxis.GetYaxis().SetNdivisions(16)
# --- smooth
if dosmth:
limitPlot.GetXaxis().SetRange(3,limitPlot.GetNbinsX())
limitPlot.Smooth(1,smthfnc)
limitPlotObsCopy.GetXaxis().SetRange(3,limitPlotObsCopy.GetNbinsX())
limitPlotObsCopy.Smooth(1,smthfnc)
limitPlotUp.GetXaxis().SetRange(3,limitPlotUp.GetNbinsX())
limitPlotUp.Smooth(1,smthfnc)
limitPlotDown.GetXaxis().SetRange(3,limitPlotDown.GetNbinsX())
limitPlotDown.Smooth(1,smthfnc)
limitPlot.GetXaxis().SetRange(0,limitPlot.GetNbinsX())
limitPlotObsCopy.GetXaxis().SetRange(0,limitPlotObsCopy.GetNbinsX())
limitPlotUp.GetXaxis().SetRange(0,limitPlotUp.GetNbinsX())
limitPlotDown.GetXaxis().SetRange(0,limitPlotDown.GetNbinsX())
#limitPlot.Smooth(1,smthfnc)
#limitPlotObsCopy.Smooth(1,smthfnc)
#limitPlotUp.Smooth(1,smthfnc)
#limitPlotDown.Smooth(1,smthfnc)
# --- get and style each contour
# 1 sigma up
limitPlotUp.SetMinimum(1)
limitPlotUp.SetContour(1)
limitPlotUp.SetLineWidth(1)
# 1 sigma down
limitPlotDown.SetMinimum(1)
limitPlotDown.SetContour(1)
limitPlotDown.SetLineWidth(1)
# observed
limitPlotObs.SetLineWidth(3)
limitPlotObs.SetLineColor(2)
limitPlotObsCopy.SetMinimum(1)
limitPlotObsCopy.SetContour(1)
limitPlotObsCopy.SetLineWidth(3)
limitPlotObsCopy.SetLineColor(2)
# expected
limitPlot.SetMinimum(1)
limitPlot.SetContour(1)
limitPlot.SetLineStyle(7)
limitPlot.SetLineWidth(3)
# --- draw plots
limitPlotAxis.Draw("COLZ")
limitPlotObs.Draw("COLZ SAME")
limitPlotUp.Draw("CONT3 SAME")
limitPlotDown.Draw("CONT3 SAME")
limitPlotObsCopy.Draw("CONT3 SAME")
limitPlot.Draw("CONT3 SAME")
# --- legend and extra text box
x1 = 0.18
y1 = 0.55
x2 = x1+0.42
y2 = y1+0.35
# --- add white box below bary
white = ROOT.TPaveText(x1,y1,x2,y2,"NDC")
white.AddText("")
white.SetFillColor(0)
white.Draw("SAME")
# --- latex
if model=="2HDM": txt1 = "#bf{Z'-2HDM}"
if model=="BARY": txt1 = "#bf{Baryonic Z'}"
txt1 += "#bf{, Z' #rightarrow DM + h"
if which=='gg': txt1 += "(#gamma#gamma)}"
if which=='tt': txt1 += "(#tau#tau)} "
if which=='combo': txt1 += "(#gamma#gamma + #tau#tau)}"
if model=="2HDM": txt2 = "#bf{Dirac DM, m_{DM} = 100 GeV}"
if model=="BARY": txt2 = "#bf{Dirac DM, g_{q} = 0.25, g_{DM} = 1.0 }"
if model=="2HDM": txt3 = "#bf{g_{Z'} = 0.8, g_{DM} = 1.0}"
if model=="BARY": txt3 = ""
txt = ROOT.TPaveText(x1,y1+0.15,x2,y2,"NDC")
txt.AddText(txt1)
txt.AddText(txt2)
txt.AddText(txt3)
txt.SetTextAlign(12)
txt.SetTextSize(0.04)
txt.Draw("SAME")
# --- legend
if model=="2HDM": leg = ROOT.TLegend(x1,y1,x2,y1+0.15)
if model=="BARY": leg = ROOT.TLegend(x1,y1+0.05,x2,y1+0.2)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.040)
leg.AddEntry(limitPlotObs,"Observed 95% CL","L")
leg.AddEntry(limitPlot,"Expected 95% CL","L")
leg.AddEntry(limitPlotUp,"#pm 1 s.d.","L")
leg.Draw()
canv.cd()
canv.Update()
CMS_lumi(canv,4,0)
canv.RedrawAxis()
canv.Update()
# --- save
outname = outdir+'contours_'
if do90: outname += '90CL'
else: outname += '95CL'
outname += '_'+model+'_'+which+'.root'
outfile = ROOT.TFile(outname,'RECREATE')
outfile.cd()
limitPlot.Write()
limitPlotObs.Write()
if do90:
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.png')
else:
canv.Print(outdir+'limits2D_'+model+'_'+which+'.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'.png')
def fillAvg(limitPlot,A,Z,doL,doH,doR):
# --- ordering for each option
irange = range(1,limitPlot.GetNbinsY()+1)
jrange = range(1,limitPlot.GetNbinsX()+1)
if doL: jrange = list(reversed(range(1,limitPlot.GetNbinsX()+1)))
if doH: irange = list(reversed(range(1,limitPlot.GetNbinsY()+1)))
# --- average over 4 adjacent bins
for i in irange:
for j in jrange:
aVal = A[i-1]
zVal = Z[j-1]
binVal = str(limitPlot.GetBinContent(j,i))
# --- only if bin is 0 do averaging
if binVal == "0.0" and ((doL and 2*float(aVal) < float(zVal)) or (doH and 2*float(aVal) > float(zVal)) or (doR)):
avg = 0.0
div = 0.0
back = limitPlot.GetBinContent(j-1,i)
forw = limitPlot.GetBinContent(j+1,i)
down = limitPlot.GetBinContent(j,i-1)
abov = limitPlot.GetBinContent(j,i+1)
if back != 0.0 and ((doL and back < 50.) or (doH and back > 50.) or (doR)):
avg += back
div += 1
if forw != 0.0 and ((doL and forw < 50.) or (doH and forw > 50.) or (doR)):
avg += forw
div += 1
if down != 0.0 and ((doL and down < 50.) or (doH and down > 50.) or (doR)):
avg += down
div += 1
if abov != 0.0 and ((doL and abov < 50.) or (doH and abov > 50.) or (doR)):
avg += abov
div += 1
if div != 0:
avg = avg/div
limitPlot.SetBinContent(j,i,avg)
def InvertPalette():
# --- Function to make inverted kBird palette
alpha=1
stops = array('d', ([ 0.0000, 0.1250, 0.2500, 0.3750, 0.5000, 0.6250, 0.7500, 0.8750, 1.0000]) )
red = array('d', ([ 0.2082, 0.0592, 0.0780, 0.0232, 0.1802, 0.5301, 0.8186, 0.9956, 0.9764]) )
green = array('d', ([ 0.1664, 0.3599, 0.5041, 0.6419, 0.7178, 0.7492, 0.7328, 0.7862, 0.9832]) )
blue = array('d', ([ 0.5293, 0.8684, 0.8385, 0.7914, 0.6425, 0.4662, 0.3499, 0.1968, 0.0539]) )
invred = numpy.asarray(list(reversed(red)))
invgreen = numpy.asarray(list(reversed(green)))
invblue = numpy.asarray(list(reversed(blue)))
ROOT.TColor.CreateGradientColorTable(9, stops, invred, invgreen, invblue, 255, alpha);
def scaleXS(model,Z,A):
# --- Function to scale point by 1/xsec
if model=="2HDM": xsRef = open("crosssectionZp2HDM.txt")
if model=="BARY": xsRef = open("crosssectionZpBaryonic.txt")
returnString = "99999"
for line in xsRef:
if (str(line.split(' ')[0]) == str(Z) and str(line.split(' ')[1]) == str(A)):
returnString = str(1./float(line.split(' ')[2]))
print returnString
return returnString
def init():
# options
parser = OptionParser("usage: %prog [options]")
parser.add_option("-O",action="store",dest="outdir",type="string",
default="",help="Output directory [default = %default]"),
parser.add_option("-m",action="store",dest="model",type="string",
default="",help="Which model (2HDM or BARY)"),
parser.add_option("-w",action="store",dest="which",type="string",
default="",help="Which channel (gg, tt, combo)"),
parser.add_option("--dowgt",action="store_true",dest="dowgt",
default=False,help="Weight by 1/xsec (if not already done) [default = %default]"),
parser.add_option("--do90",action="store_true",dest="do90",
default=False,help="Store 90%CL root file [default = %default]"),
parser.add_option("--dosmooth",action="store_true",dest="dosmth",
default=False,help="Smooth TH2 after filling and avg. [default = %default]"),
parser.add_option("--smth",action="store",dest="smthfnc",type="string",
default="k5a",help="Smoothing function to apply [default = %default]"),
(options, args) = parser.parse_args()
if options.model!="2HDM" and options.model!="BARY":
print "Model "+options.model+" is NOT a valid option."
sys.exit()
if options.which!="gg" and options.which!="tt" and options.which!="combo":
print "Channel "+options.which+" is NOT a valid option."
sys.exit()
#run
print "Making 2D limit plot for: "+options.model+" "+options.which
if options.do90: print "Using 90CL limits"
if options.dowgt: print "Weighting by 1/xsec"
if options.dosmth: print "Smoothing applied"
run(options)
if __name__=="__main__":
init()
| run | identifier_name |
plot2DlimitsAll.py | import os
import sys
import math
import ROOT
#from ROOT import TColor
from array import array
from optparse import OptionParser
from CMS_lumi import CMS_lumi
import plotting_interp as plot
import re
import json
import types
import numpy
def run(opts):
# --- read in options
model = opts.model
which = opts.which
outdir = opts.outdir
do90 = opts.do90
dowgt = opts.dowgt
dosmth = opts.dosmth
smthfnc = opts.smthfnc
#if dosmth: addtxt = '_smth'
# --- read in files
indir = '/eos/cms/store/group/phys_exotica/MonoHgg/MonoH-COMBO-2016/'+model+'_jsons/'
if dowgt: wfile = ''
else: wfile = '_weighted'
if do90: indir += which+'_'+model+wfile+'_results_90CL/'
else: indir += which+'_'+model+wfile+'_results/'
# --- options for plot averaging
doFillAvgLow = True # do averaging below mMed = 2*mDM line
doFillAvgHigh = True # do averaging above mMed = 2*mDM line
if model=="2HDM": doFillAvgLow = False
if model=="2HDM": doFillAvgHigh = False
doFillAvgRest = True # do averaging at line or average normally
doFillAvgAll = True # do averaging for all plots not just observed
# --- setup general style
ROOT.gROOT.SetBatch(ROOT.kTRUE)
ROOT.gStyle.SetOptStat(0)
plot.ModTDRStyle()
canv = ROOT.TCanvas()
canv.SetLogz()
canv.SetTicks()
canv.SetRightMargin(0.16) # allow enough space for z axis label
canv.cd()
# --- setup palette
ROOT.gStyle.SetPalette(57) # palette normal
InvertPalette() # palette inverted
ROOT.gStyle.SetNumberContours(255)
A=[]; Z=[]
# --- mass points
if model=="2HDM":
A=[300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675]
Z=[450,500,550,600,650,700,750,800,850,900,950,1000,1050,1100,1150,1200,1250,1300,1350,1400,1450,1500,1550,1600,1650,1700,1750,1800,1850,1900,1950]
if model=="BARY":
A=[1,35,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675,700,725,750,775,800,825,850,875,900,925,950,975,1000]
Z=[10,50,100,200,250,300,350,400,450,500,550,600,675,750,800,850,900,950,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000]
if model=="BARY" and which=='combo':
A=[1,35,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500,525,550,575,600,625,650,675,700,725,750,775,800,825,850,900,925,950,975,1000]
Z=[10,50,100,200,250,300,350,400,450,500,550,600,650,700,900,950,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000]
# --- binning for BARY model
# Y axis
BinningA = [0.5,1.5]
BinAAxis = [1.0,47.5]
for i in range(1, len(A)-1):
BinningA.append( (A[i] + A[i+1])/2.0 )
BinAAxis.append( (A[i] + A[i+1])/2.0 )
BinningA.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
BinAAxis.append( (A[-1] + A[-1] - ((A[-1] + A[-2])/2.0)) )
# X axis
BinningZ = [9,11]
BinZAxis = [10,75]
for i in range(1, len(Z)-1):
BinningZ.append( (Z[i] + Z[i+1])/2.0 )
BinZAxis.append( (Z[i] + Z[i+1])/2.0 )
BinningZ.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
BinZAxis.append( (Z[-1] + Z[-1] - ((Z[-1] + Z[-2])/2.0)) )
# --- setup histograms (different models have different binning)
if model=="2HDM":
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z), Z[0], Z[-1]+50, len(A), A[0], A[-1]+25)
limitPlot = ROOT.TH2F("lplot", "lplot", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(Z), Z[0]-25, Z[-1]+50, len(A), A[0]-12.5, A[-1]+25)
if model=="BARY": # variable binning
limitPlotAxis = ROOT.TH2F("lplotAxis", "lplotAxis", len(Z)-1, array('d',BinZAxis), len(A)-1, array('d',BinAAxis))
limitPlot = ROOT.TH2F("lplot", "lplot", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotObs = ROOT.TH2F("lplotObs", "lplotObs", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotUp = ROOT.TH2F("lplotU", "lplotU", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotDown = ROOT.TH2F("lplotDown", "lplotDown", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotUp2 = ROOT.TH2F("lplotU2", "lplotU2", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
limitPlotDown2 = ROOT.TH2F("lplotDown2", "lplotDown2", len(BinningZ)-1, array('d',BinningZ), len(BinningA)-1, array('d',BinningA))
# --- read in json files
for a in A:
|
# --- average plots to make smooth contours
fillAvg(limitPlotObs, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotObs, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotObs, A, Z, False, False, doFillAvgRest)
if doFillAvgAll:
fillAvg(limitPlot, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotUp, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotDown, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotUp2, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlotDown2, A, Z, doFillAvgLow, False, False)
fillAvg(limitPlot, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotUp, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotDown, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotUp2, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlotDown2, A, Z, False, doFillAvgHigh, False)
fillAvg(limitPlot, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotUp, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotDown, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotUp2, A, Z, False, False, doFillAvgRest)
fillAvg(limitPlotDown2, A, Z, False, False, doFillAvgRest)
# --- axis labels
limitPlotAxis.GetXaxis().SetTitle("m_{Z'} [GeV]")
limitPlotObs.GetZaxis().SetTitle("#sigma_{95% CL}/#sigma_{th}")
if model=="2HDM": limitPlotAxis.GetYaxis().SetTitle("m_{A} [GeV]")
if model=="BARY": limitPlotAxis.GetYaxis().SetTitle("m_{DM} [GeV]")
# --- clone obs to get contour
limitPlotObsCopy = limitPlotObs.Clone()
# --- set up min and max of z axis
limitPlotObs.SetMaximum(100)
limitPlotObs.SetMinimum(0.3)
# --- set range of x and y axis
if model=="BARY": limitPlotObs.GetXaxis().SetRangeUser(10,2001)
if model=="BARY": limitPlotObs.GetYaxis().SetRangeUser(1,1001)
if model=="2HDM": limitPlotObs.GetXaxis().SetRangeUser(450,2000)
if model=="2HDM": limitPlotObs.GetYaxis().SetRangeUser(300,700)
# --- style plot
limitPlotObs.GetYaxis().SetTitleOffset(0.95) # format axis labels
limitPlotObs.GetZaxis().SetTitleOffset(0.95)
limitPlotObs.GetXaxis().SetLabelSize(0.035) # format axis ticks
limitPlotObs.GetYaxis().SetLabelSize(0.035)
if model=="2HDM": limitPlotAxis.GetXaxis().SetNdivisions(9)
if model=="2HDM": limitPlotAxis.GetYaxis().SetNdivisions(8)
if model=="BARY": limitPlotAxis.GetXaxis().SetNdivisions(10)
if model=="BARY": limitPlotAxis.GetYaxis().SetNdivisions(16)
# --- smooth
if dosmth:
limitPlot.GetXaxis().SetRange(3,limitPlot.GetNbinsX())
limitPlot.Smooth(1,smthfnc)
limitPlotObsCopy.GetXaxis().SetRange(3,limitPlotObsCopy.GetNbinsX())
limitPlotObsCopy.Smooth(1,smthfnc)
limitPlotUp.GetXaxis().SetRange(3,limitPlotUp.GetNbinsX())
limitPlotUp.Smooth(1,smthfnc)
limitPlotDown.GetXaxis().SetRange(3,limitPlotDown.GetNbinsX())
limitPlotDown.Smooth(1,smthfnc)
limitPlot.GetXaxis().SetRange(0,limitPlot.GetNbinsX())
limitPlotObsCopy.GetXaxis().SetRange(0,limitPlotObsCopy.GetNbinsX())
limitPlotUp.GetXaxis().SetRange(0,limitPlotUp.GetNbinsX())
limitPlotDown.GetXaxis().SetRange(0,limitPlotDown.GetNbinsX())
#limitPlot.Smooth(1,smthfnc)
#limitPlotObsCopy.Smooth(1,smthfnc)
#limitPlotUp.Smooth(1,smthfnc)
#limitPlotDown.Smooth(1,smthfnc)
# --- get and style each contour
# 1 sigma up
limitPlotUp.SetMinimum(1)
limitPlotUp.SetContour(1)
limitPlotUp.SetLineWidth(1)
# 1 sigma down
limitPlotDown.SetMinimum(1)
limitPlotDown.SetContour(1)
limitPlotDown.SetLineWidth(1)
# observed
limitPlotObs.SetLineWidth(3)
limitPlotObs.SetLineColor(2)
limitPlotObsCopy.SetMinimum(1)
limitPlotObsCopy.SetContour(1)
limitPlotObsCopy.SetLineWidth(3)
limitPlotObsCopy.SetLineColor(2)
# expected
limitPlot.SetMinimum(1)
limitPlot.SetContour(1)
limitPlot.SetLineStyle(7)
limitPlot.SetLineWidth(3)
# --- draw plots
limitPlotAxis.Draw("COLZ")
limitPlotObs.Draw("COLZ SAME")
limitPlotUp.Draw("CONT3 SAME")
limitPlotDown.Draw("CONT3 SAME")
limitPlotObsCopy.Draw("CONT3 SAME")
limitPlot.Draw("CONT3 SAME")
# --- legend and extra text box
x1 = 0.18
y1 = 0.55
x2 = x1+0.42
y2 = y1+0.35
# --- add white box below bary
white = ROOT.TPaveText(x1,y1,x2,y2,"NDC")
white.AddText("")
white.SetFillColor(0)
white.Draw("SAME")
# --- latex
if model=="2HDM": txt1 = "#bf{Z'-2HDM}"
if model=="BARY": txt1 = "#bf{Baryonic Z'}"
txt1 += "#bf{, Z' #rightarrow DM + h"
if which=='gg': txt1 += "(#gamma#gamma)}"
if which=='tt': txt1 += "(#tau#tau)} "
if which=='combo': txt1 += "(#gamma#gamma + #tau#tau)}"
if model=="2HDM": txt2 = "#bf{Dirac DM, m_{DM} = 100 GeV}"
if model=="BARY": txt2 = "#bf{Dirac DM, g_{q} = 0.25, g_{DM} = 1.0 }"
if model=="2HDM": txt3 = "#bf{g_{Z'} = 0.8, g_{DM} = 1.0}"
if model=="BARY": txt3 = ""
txt = ROOT.TPaveText(x1,y1+0.15,x2,y2,"NDC")
txt.AddText(txt1)
txt.AddText(txt2)
txt.AddText(txt3)
txt.SetTextAlign(12)
txt.SetTextSize(0.04)
txt.Draw("SAME")
# --- legend
if model=="2HDM": leg = ROOT.TLegend(x1,y1,x2,y1+0.15)
if model=="BARY": leg = ROOT.TLegend(x1,y1+0.05,x2,y1+0.2)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.040)
leg.AddEntry(limitPlotObs,"Observed 95% CL","L")
leg.AddEntry(limitPlot,"Expected 95% CL","L")
leg.AddEntry(limitPlotUp,"#pm 1 s.d.","L")
leg.Draw()
canv.cd()
canv.Update()
CMS_lumi(canv,4,0)
canv.RedrawAxis()
canv.Update()
# --- save
outname = outdir+'contours_'
if do90: outname += '90CL'
else: outname += '95CL'
outname += '_'+model+'_'+which+'.root'
outfile = ROOT.TFile(outname,'RECREATE')
outfile.cd()
limitPlot.Write()
limitPlotObs.Write()
if do90:
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'_90CL.png')
else:
canv.Print(outdir+'limits2D_'+model+'_'+which+'.pdf')
canv.Print(outdir+'limits2D_'+model+'_'+which+'.png')
def fillAvg(limitPlot,A,Z,doL,doH,doR):
# --- ordering for each option
irange = range(1,limitPlot.GetNbinsY()+1)
jrange = range(1,limitPlot.GetNbinsX()+1)
if doL: jrange = list(reversed(range(1,limitPlot.GetNbinsX()+1)))
if doH: irange = list(reversed(range(1,limitPlot.GetNbinsY()+1)))
# --- average over 4 adjacent bins
for i in irange:
for j in jrange:
aVal = A[i-1]
zVal = Z[j-1]
binVal = str(limitPlot.GetBinContent(j,i))
# --- only if bin is 0 do averaging
if binVal == "0.0" and ((doL and 2*float(aVal) < float(zVal)) or (doH and 2*float(aVal) > float(zVal)) or (doR)):
avg = 0.0
div = 0.0
back = limitPlot.GetBinContent(j-1,i)
forw = limitPlot.GetBinContent(j+1,i)
down = limitPlot.GetBinContent(j,i-1)
abov = limitPlot.GetBinContent(j,i+1)
if back != 0.0 and ((doL and back < 50.) or (doH and back > 50.) or (doR)):
avg += back
div += 1
if forw != 0.0 and ((doL and forw < 50.) or (doH and forw > 50.) or (doR)):
avg += forw
div += 1
if down != 0.0 and ((doL and down < 50.) or (doH and down > 50.) or (doR)):
avg += down
div += 1
if abov != 0.0 and ((doL and abov < 50.) or (doH and abov > 50.) or (doR)):
avg += abov
div += 1
if div != 0:
avg = avg/div
limitPlot.SetBinContent(j,i,avg)
def InvertPalette():
# --- Function to make inverted kBird palette
alpha=1
stops = array('d', ([ 0.0000, 0.1250, 0.2500, 0.3750, 0.5000, 0.6250, 0.7500, 0.8750, 1.0000]) )
red = array('d', ([ 0.2082, 0.0592, 0.0780, 0.0232, 0.1802, 0.5301, 0.8186, 0.9956, 0.9764]) )
green = array('d', ([ 0.1664, 0.3599, 0.5041, 0.6419, 0.7178, 0.7492, 0.7328, 0.7862, 0.9832]) )
blue = array('d', ([ 0.5293, 0.8684, 0.8385, 0.7914, 0.6425, 0.4662, 0.3499, 0.1968, 0.0539]) )
invred = numpy.asarray(list(reversed(red)))
invgreen = numpy.asarray(list(reversed(green)))
invblue = numpy.asarray(list(reversed(blue)))
ROOT.TColor.CreateGradientColorTable(9, stops, invred, invgreen, invblue, 255, alpha);
def scaleXS(model,Z,A):
# --- Function to scale point by 1/xsec
if model=="2HDM": xsRef = open("crosssectionZp2HDM.txt")
if model=="BARY": xsRef = open("crosssectionZpBaryonic.txt")
returnString = "99999"
for line in xsRef:
if (str(line.split(' ')[0]) == str(Z) and str(line.split(' ')[1]) == str(A)):
returnString = str(1./float(line.split(' ')[2]))
print returnString
return returnString
def init():
# options
parser = OptionParser("usage: %prog [options]")
parser.add_option("-O",action="store",dest="outdir",type="string",
default="",help="Output directory [default = %default]"),
parser.add_option("-m",action="store",dest="model",type="string",
default="",help="Which model (2HDM or BARY)"),
parser.add_option("-w",action="store",dest="which",type="string",
default="",help="Which channel (gg, tt, combo)"),
parser.add_option("--dowgt",action="store_true",dest="dowgt",
default=False,help="Weight by 1/xsec (if not already done) [default = %default]"),
parser.add_option("--do90",action="store_true",dest="do90",
default=False,help="Store 90%CL root file [default = %default]"),
parser.add_option("--dosmooth",action="store_true",dest="dosmth",
default=False,help="Smooth TH2 after filling and avg. [default = %default]"),
parser.add_option("--smth",action="store",dest="smthfnc",type="string",
default="k5a",help="Smoothing function to apply [default = %default]"),
(options, args) = parser.parse_args()
if options.model!="2HDM" and options.model!="BARY":
print "Model "+options.model+" is NOT a valid option."
sys.exit()
if options.which!="gg" and options.which!="tt" and options.which!="combo":
print "Channel "+options.which+" is NOT a valid option."
sys.exit()
#run
print "Making 2D limit plot for: "+options.model+" "+options.which
if options.do90: print "Using 90CL limits"
if options.dowgt: print "Weighting by 1/xsec"
if options.dosmth: print "Smoothing applied"
run(options)
if __name__=="__main__":
init()
| for z in Z:
data = {}
filename = indir+'Zprime'+str(z)+'A'+str(a)+'.json'
if which=='gg' and model=='BARY': # BARY gg ONLY has DM instead of A in filename
filename = indir+'Zprime'+str(z)+'DM'+str(a)+'.json'
scale = 1.
if dowgt: scale = scaleXS(model,z,a)
if os.path.isfile(filename) and scale != "99999":
with open(filename) as jsonfile:
data = json.load(jsonfile)
for key in data: # fill plots from json
limitPlot.SetBinContent(limitPlot.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp0'])
limitPlotUp.SetBinContent(limitPlotUp.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp+1'])
limitPlotDown.SetBinContent(limitPlotDown.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp-1'])
limitPlotUp2.SetBinContent(limitPlotUp2.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp+2'])
limitPlotDown2.SetBinContent(limitPlotDown2.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'exp-2'])
limitPlotObs.SetBinContent(limitPlotObs.GetXaxis().FindBin(float(z)),limitPlot.GetYaxis().FindBin(float(a)),float(scale)*data[key][u'obs']) | conditional_block |
reidtools.py | from __future__ import absolute_import
from __future__ import print_function
__all__ = ['visualize_ranked_results', 'visactmap']
import numpy as np
import os
import os.path as osp
import shutil
import cv2
from PIL import Image
import torch
from torch.nn import functional as F
from .tools import mkdir_if_missing
GRID_SPACING = 10
QUERY_EXTRA_SPACING = 90
BW = 8 # border width
GREEN = (1, 215, 117)
RED = (111, 107, 241)
PAD_SPACING = 5
def visualize_ranked_results(distmat, dataset, data_type, width=128, height=256, save_dir='', topk=10):
"""Visualizes ranked results.
Supports both image-reid and video-reid.
For image-reid, ranks will be plotted in a single figure. For video-reid, ranks will be
saved in folders each containing a tracklet.
Args:
distmat (numpy.ndarray): distance matrix of shape (num_query, num_gallery).
dataset (tuple): a 2-tuple containing (query, gallery), each of which contains
tuples of (img_path(s), pid, camid).
data_type (str): "image" or "video".
width (int, optional): resized image width. Default is 128.
height (int, optional): resized image height. Default is 256.
save_dir (str): directory to save output images.
topk (int, optional): denoting top-k images in the rank list to be visualized.
Default is 10.
"""
num_q, num_g = distmat.shape
mkdir_if_missing(save_dir)
print('# query: {}\n# gallery {}'.format(num_q, num_g))
print('Visualizing top-{} ranks ...'.format(topk))
query, gallery = dataset
assert num_q == len(query)
assert num_g == len(gallery)
indices = np.argsort(distmat, axis=1)
def | (src, dst, rank, prefix, matched=False):
"""
Args:
src: image path or tuple (for vidreid)
dst: target directory
rank: int, denoting ranked position, starting from 1
prefix: string
matched: bool
"""
if isinstance(src, tuple) or isinstance(src, list):
if prefix == 'gallery':
suffix = 'TRUE' if matched else 'FALSE'
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3)) + '_' + suffix
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
mkdir_if_missing(dst)
for img_path in src:
shutil.copy(img_path, dst)
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3) + '_name_' + osp.basename(src))
shutil.copy(src, dst)
for q_idx in range(num_q):
item = query[q_idx]
qimg_path, qpid, qcamid = item[:3]
# qsegment_path = item[6]
qpid, qcamid = int(qpid), int(qcamid)
num_cols = topk + 1
# grid_img = 255 * np.ones((2*height+GRID_SPACING, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
grid_img = 255 * np.ones((height, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
idx_str = str(qpid) + '\n'
if data_type == 'image':
qimg = cv2.imread(qimg_path)
qimg = Image.fromarray(cv2.cvtColor(qimg, cv2.COLOR_BGR2RGB))
qimg = cv2.cvtColor(np.asarray(qimg), cv2.COLOR_RGB2BGR)
qimg = cv2.resize(qimg, (width, height))
qimg = cv2.copyMakeBorder(qimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
qimg = cv2.resize(qimg, (width, height)) # resize twice to ensure that the border width is consistent across images
# qsegment = cv2.imread(qsegment_path)
# qsegment = Image.fromarray(cv2.cvtColor(qsegment, cv2.COLOR_BGR2RGB))
# qsegment = cv2.cvtColor(np.asarray(qsegment), cv2.COLOR_RGB2BGR)
#
# qsegment = cv2.resize(qsegment, (width, height))
# qsegment = cv2.copyMakeBorder(qsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
# qsegment = cv2.resize(qsegment, (
# width, height)) # resize twice to ensure that the border width is consistent across images
grid_img[:height, :width, :] = qimg
# grid_img[height+GRID_SPACING:, :width, :] = qsegment
else:
qdir = osp.join(save_dir, osp.basename(osp.splitext(qimg_path)[0]))
mkdir_if_missing(qdir)
_cp_img_to(qimg_path, qdir, rank=0, prefix='query')
rank_idx = 1
for g_idx in indices[q_idx, :]:
item = gallery[g_idx]
gimg_path, gpid, gcamid = item[:3]
# gsegment_path = item[6]
gpid, gcamid = int(gpid), int(gcamid)
invalid = (qpid == gpid) & (qcamid == gcamid)
idx_str = idx_str + str(gpid) + ' '
if not invalid:
matched = gpid == qpid
if data_type == 'image':
border_color = GREEN if matched else RED
gimg = cv2.imread(gimg_path)
gimg = Image.fromarray(cv2.cvtColor(gimg, cv2.COLOR_BGR2RGB))
gimg = cv2.cvtColor(np.asarray(gimg), cv2.COLOR_RGB2BGR)
gimg = cv2.resize(gimg, (width, height))
gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, 1, value=border_color)
gimg = cv2.resize(gimg, (width, height))
# gsegment = cv2.imread(gsegment_path)
# gsegment = Image.fromarray(cv2.cvtColor(gsegment, cv2.COLOR_BGR2RGB))
# gsegment = cv2.cvtColor(np.asarray(gsegment), cv2.COLOR_RGB2BGR)
#
# gsegment = cv2.resize(gsegment, (width, height))
# gsegment = cv2.copyMakeBorder(gsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gsegment = cv2.resize(gsegment, (width, height))
start = rank_idx * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
end = (rank_idx + 1) * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
grid_img[:height, start:end, :] = gimg
# grid_img[height+GRID_SPACING:, start:end, :] = gsegment
else:
_cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='gallery', matched=matched)
rank_idx += 1
if rank_idx > topk:
break
# if rank_idx > topk-1:
# break
relpath = qimg_path.split('/rgb/')[-1]
imname = osp.basename(osp.splitext(relpath)[0])
dirname = osp.dirname(relpath)
dir_path = osp.join(save_dir, dirname)
if not osp.exists(dir_path):
os.makedirs(dir_path)
cv2.imwrite(osp.join(dir_path, imname + '.jpg'), grid_img)
with open(osp.join(dir_path, imname + '.txt'), 'w') as fp:
fp.write(idx_str)
# imname = osp.basename(osp.splitext(qimg_path)[0])
# cv2.imwrite(osp.join(save_dir, imname+'.jpg'), grid_img)
if (q_idx + 1) % 100 == 0:
print('- done {}/{}'.format(q_idx + 1, num_q))
print('Done. Images have been saved to "{}" ...'.format(save_dir))
@torch.no_grad()
def visactmap(testloader, model, save_dir, width, height, print_freq, use_gpu, **kwargs):
"""Visualizes CNN activation maps to see where the CNN focuses on to extract features.
This function takes as input the query images of target datasets
Reference:
- Zagoruyko and Komodakis. Paying more attention to attention: Improving the
performance of convolutional neural networks via attention transfer. ICLR, 2017
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
"""
model.eval()
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
# original images and activation maps are saved individually
actmap_dir = osp.join(save_dir, 'actmap_layer2')
mkdir_if_missing(actmap_dir)
print('Visualizing activation maps ...')
for batch_idx, data in enumerate(testloader):
# imgs, paths = data[0], data[3]
# imgs, paths = data[0], data[3]
imgs, contours, paths = data[0], data[1], data[4]
if use_gpu:
imgs = imgs.cuda()
contours = contours.cuda()
# forward to get convolutional feature maps
try:
# outputs = model(segments, imgs, return_featuremaps=True)
outputs = model(imgs, contours, return_featuremaps=True)
except TypeError:
raise TypeError('forward() got unexpected keyword argument "return_featuremaps". ' \
'Please add return_featuremaps as an input argument to forward(). When ' \
'return_featuremaps=True, return feature maps only.')
if outputs.dim() != 4:
raise ValueError('The model output is supposed to have ' \
'shape of (b, c, h, w), i.e. 4 dimensions, but got {} dimensions. '
'Please make sure you set the model output at eval mode '
'to be the last convolutional feature maps'.format(outputs.dim()))
# compute activation maps
outputs = (outputs ** 2).sum(1)
b, h, w = outputs.size()
outputs = outputs.view(b, h * w)
outputs = F.normalize(outputs, p=2, dim=1)
outputs = outputs.view(b, h, w)
if use_gpu:
imgs, outputs = imgs.cpu(), outputs.cpu()
for j in range(outputs.size(0)):
# get image name
path = paths[j]
# imname = osp.basename(osp.splitext(path)[0])
path = path.split('/')
imname = osp.splitext(path[-2] + '_' + path[-1])[0]
# RGB image
img = imgs[j, ...]
for t, m, s in zip(img, imagenet_mean, imagenet_std):
t.mul_(s).add_(m).clamp_(0, 1)
img_np = np.uint8(np.floor(img.numpy() * 255))
img_np = img_np.transpose((1, 2, 0)) # (c, h, w) -> (h, w, c)
# activation map
am = outputs[j, ...].numpy()
am = cv2.resize(am, (width, height))
am = 255 * (am - np.max(am)) / (np.max(am) - np.min(am) + 1e-12)
am = np.uint8(np.floor(am))
am = cv2.applyColorMap(am, cv2.COLORMAP_JET)
# overlapped
overlapped = img_np * 0.3 + am * 0.7
overlapped[overlapped > 255] = 255
overlapped = overlapped.astype(np.uint8)
# # save images in a single figure (add white spacing between images)
# # from left to right: original image, activation map, overlapped image
# grid_img = 255 * np.ones((height, 3 * width + 2 * GRID_SPACING, 3), dtype=np.uint8)
# grid_img[:, :width, :] = img_np[:, :, ::-1]
# grid_img[:, width + GRID_SPACING: 2 * width + GRID_SPACING, :] = am
# grid_img[:, 2 * width + 2 * GRID_SPACING:, :] = overlapped
cv2.imwrite(osp.join(actmap_dir, imname+'_ori.jpg'), img_np[:, :, ::-1])
cv2.imwrite(osp.join(actmap_dir, imname + '_am.jpg'), am)
cv2.imwrite(osp.join(actmap_dir, imname + '_overlap.jpg'), overlapped)
if (batch_idx + 1) % print_freq == 0:
print('- done batch {}/{}'.format(batch_idx + 1, len(testloader)))
| _cp_img_to | identifier_name |
reidtools.py | from __future__ import absolute_import
from __future__ import print_function
__all__ = ['visualize_ranked_results', 'visactmap']
import numpy as np
import os
import os.path as osp
import shutil
import cv2
from PIL import Image
import torch
from torch.nn import functional as F
from .tools import mkdir_if_missing
GRID_SPACING = 10
QUERY_EXTRA_SPACING = 90
BW = 8 # border width
GREEN = (1, 215, 117)
RED = (111, 107, 241)
PAD_SPACING = 5
def visualize_ranked_results(distmat, dataset, data_type, width=128, height=256, save_dir='', topk=10):
"""Visualizes ranked results.
Supports both image-reid and video-reid.
For image-reid, ranks will be plotted in a single figure. For video-reid, ranks will be
saved in folders each containing a tracklet.
Args:
distmat (numpy.ndarray): distance matrix of shape (num_query, num_gallery).
dataset (tuple): a 2-tuple containing (query, gallery), each of which contains
tuples of (img_path(s), pid, camid).
data_type (str): "image" or "video".
width (int, optional): resized image width. Default is 128.
height (int, optional): resized image height. Default is 256.
save_dir (str): directory to save output images.
topk (int, optional): denoting top-k images in the rank list to be visualized.
Default is 10.
"""
num_q, num_g = distmat.shape
mkdir_if_missing(save_dir)
print('# query: {}\n# gallery {}'.format(num_q, num_g))
print('Visualizing top-{} ranks ...'.format(topk))
query, gallery = dataset
assert num_q == len(query)
assert num_g == len(gallery)
indices = np.argsort(distmat, axis=1)
def _cp_img_to(src, dst, rank, prefix, matched=False):
|
for q_idx in range(num_q):
item = query[q_idx]
qimg_path, qpid, qcamid = item[:3]
# qsegment_path = item[6]
qpid, qcamid = int(qpid), int(qcamid)
num_cols = topk + 1
# grid_img = 255 * np.ones((2*height+GRID_SPACING, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
grid_img = 255 * np.ones((height, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
idx_str = str(qpid) + '\n'
if data_type == 'image':
qimg = cv2.imread(qimg_path)
qimg = Image.fromarray(cv2.cvtColor(qimg, cv2.COLOR_BGR2RGB))
qimg = cv2.cvtColor(np.asarray(qimg), cv2.COLOR_RGB2BGR)
qimg = cv2.resize(qimg, (width, height))
qimg = cv2.copyMakeBorder(qimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
qimg = cv2.resize(qimg, (width, height)) # resize twice to ensure that the border width is consistent across images
# qsegment = cv2.imread(qsegment_path)
# qsegment = Image.fromarray(cv2.cvtColor(qsegment, cv2.COLOR_BGR2RGB))
# qsegment = cv2.cvtColor(np.asarray(qsegment), cv2.COLOR_RGB2BGR)
#
# qsegment = cv2.resize(qsegment, (width, height))
# qsegment = cv2.copyMakeBorder(qsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
# qsegment = cv2.resize(qsegment, (
# width, height)) # resize twice to ensure that the border width is consistent across images
grid_img[:height, :width, :] = qimg
# grid_img[height+GRID_SPACING:, :width, :] = qsegment
else:
qdir = osp.join(save_dir, osp.basename(osp.splitext(qimg_path)[0]))
mkdir_if_missing(qdir)
_cp_img_to(qimg_path, qdir, rank=0, prefix='query')
rank_idx = 1
for g_idx in indices[q_idx, :]:
item = gallery[g_idx]
gimg_path, gpid, gcamid = item[:3]
# gsegment_path = item[6]
gpid, gcamid = int(gpid), int(gcamid)
invalid = (qpid == gpid) & (qcamid == gcamid)
idx_str = idx_str + str(gpid) + ' '
if not invalid:
matched = gpid == qpid
if data_type == 'image':
border_color = GREEN if matched else RED
gimg = cv2.imread(gimg_path)
gimg = Image.fromarray(cv2.cvtColor(gimg, cv2.COLOR_BGR2RGB))
gimg = cv2.cvtColor(np.asarray(gimg), cv2.COLOR_RGB2BGR)
gimg = cv2.resize(gimg, (width, height))
gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, 1, value=border_color)
gimg = cv2.resize(gimg, (width, height))
# gsegment = cv2.imread(gsegment_path)
# gsegment = Image.fromarray(cv2.cvtColor(gsegment, cv2.COLOR_BGR2RGB))
# gsegment = cv2.cvtColor(np.asarray(gsegment), cv2.COLOR_RGB2BGR)
#
# gsegment = cv2.resize(gsegment, (width, height))
# gsegment = cv2.copyMakeBorder(gsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gsegment = cv2.resize(gsegment, (width, height))
start = rank_idx * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
end = (rank_idx + 1) * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
grid_img[:height, start:end, :] = gimg
# grid_img[height+GRID_SPACING:, start:end, :] = gsegment
else:
_cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='gallery', matched=matched)
rank_idx += 1
if rank_idx > topk:
break
# if rank_idx > topk-1:
# break
relpath = qimg_path.split('/rgb/')[-1]
imname = osp.basename(osp.splitext(relpath)[0])
dirname = osp.dirname(relpath)
dir_path = osp.join(save_dir, dirname)
if not osp.exists(dir_path):
os.makedirs(dir_path)
cv2.imwrite(osp.join(dir_path, imname + '.jpg'), grid_img)
with open(osp.join(dir_path, imname + '.txt'), 'w') as fp:
fp.write(idx_str)
# imname = osp.basename(osp.splitext(qimg_path)[0])
# cv2.imwrite(osp.join(save_dir, imname+'.jpg'), grid_img)
if (q_idx + 1) % 100 == 0:
print('- done {}/{}'.format(q_idx + 1, num_q))
print('Done. Images have been saved to "{}" ...'.format(save_dir))
@torch.no_grad()
def visactmap(testloader, model, save_dir, width, height, print_freq, use_gpu, **kwargs):
"""Visualizes CNN activation maps to see where the CNN focuses on to extract features.
This function takes as input the query images of target datasets
Reference:
- Zagoruyko and Komodakis. Paying more attention to attention: Improving the
performance of convolutional neural networks via attention transfer. ICLR, 2017
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
"""
model.eval()
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
# original images and activation maps are saved individually
actmap_dir = osp.join(save_dir, 'actmap_layer2')
mkdir_if_missing(actmap_dir)
print('Visualizing activation maps ...')
for batch_idx, data in enumerate(testloader):
# imgs, paths = data[0], data[3]
# imgs, paths = data[0], data[3]
imgs, contours, paths = data[0], data[1], data[4]
if use_gpu:
imgs = imgs.cuda()
contours = contours.cuda()
# forward to get convolutional feature maps
try:
# outputs = model(segments, imgs, return_featuremaps=True)
outputs = model(imgs, contours, return_featuremaps=True)
except TypeError:
raise TypeError('forward() got unexpected keyword argument "return_featuremaps". ' \
'Please add return_featuremaps as an input argument to forward(). When ' \
'return_featuremaps=True, return feature maps only.')
if outputs.dim() != 4:
raise ValueError('The model output is supposed to have ' \
'shape of (b, c, h, w), i.e. 4 dimensions, but got {} dimensions. '
'Please make sure you set the model output at eval mode '
'to be the last convolutional feature maps'.format(outputs.dim()))
# compute activation maps
outputs = (outputs ** 2).sum(1)
b, h, w = outputs.size()
outputs = outputs.view(b, h * w)
outputs = F.normalize(outputs, p=2, dim=1)
outputs = outputs.view(b, h, w)
if use_gpu:
imgs, outputs = imgs.cpu(), outputs.cpu()
for j in range(outputs.size(0)):
# get image name
path = paths[j]
# imname = osp.basename(osp.splitext(path)[0])
path = path.split('/')
imname = osp.splitext(path[-2] + '_' + path[-1])[0]
# RGB image
img = imgs[j, ...]
for t, m, s in zip(img, imagenet_mean, imagenet_std):
t.mul_(s).add_(m).clamp_(0, 1)
img_np = np.uint8(np.floor(img.numpy() * 255))
img_np = img_np.transpose((1, 2, 0)) # (c, h, w) -> (h, w, c)
# activation map
am = outputs[j, ...].numpy()
am = cv2.resize(am, (width, height))
am = 255 * (am - np.max(am)) / (np.max(am) - np.min(am) + 1e-12)
am = np.uint8(np.floor(am))
am = cv2.applyColorMap(am, cv2.COLORMAP_JET)
# overlapped
overlapped = img_np * 0.3 + am * 0.7
overlapped[overlapped > 255] = 255
overlapped = overlapped.astype(np.uint8)
# # save images in a single figure (add white spacing between images)
# # from left to right: original image, activation map, overlapped image
# grid_img = 255 * np.ones((height, 3 * width + 2 * GRID_SPACING, 3), dtype=np.uint8)
# grid_img[:, :width, :] = img_np[:, :, ::-1]
# grid_img[:, width + GRID_SPACING: 2 * width + GRID_SPACING, :] = am
# grid_img[:, 2 * width + 2 * GRID_SPACING:, :] = overlapped
cv2.imwrite(osp.join(actmap_dir, imname+'_ori.jpg'), img_np[:, :, ::-1])
cv2.imwrite(osp.join(actmap_dir, imname + '_am.jpg'), am)
cv2.imwrite(osp.join(actmap_dir, imname + '_overlap.jpg'), overlapped)
if (batch_idx + 1) % print_freq == 0:
print('- done batch {}/{}'.format(batch_idx + 1, len(testloader)))
| """
Args:
src: image path or tuple (for vidreid)
dst: target directory
rank: int, denoting ranked position, starting from 1
prefix: string
matched: bool
"""
if isinstance(src, tuple) or isinstance(src, list):
if prefix == 'gallery':
suffix = 'TRUE' if matched else 'FALSE'
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3)) + '_' + suffix
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
mkdir_if_missing(dst)
for img_path in src:
shutil.copy(img_path, dst)
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3) + '_name_' + osp.basename(src))
shutil.copy(src, dst) | identifier_body |
reidtools.py | from __future__ import absolute_import
from __future__ import print_function
__all__ = ['visualize_ranked_results', 'visactmap']
import numpy as np
import os
import os.path as osp
import shutil
import cv2
from PIL import Image
import torch
from torch.nn import functional as F
from .tools import mkdir_if_missing
GRID_SPACING = 10
QUERY_EXTRA_SPACING = 90
BW = 8 # border width
GREEN = (1, 215, 117)
RED = (111, 107, 241)
PAD_SPACING = 5
def visualize_ranked_results(distmat, dataset, data_type, width=128, height=256, save_dir='', topk=10):
"""Visualizes ranked results.
Supports both image-reid and video-reid.
For image-reid, ranks will be plotted in a single figure. For video-reid, ranks will be
saved in folders each containing a tracklet.
Args:
distmat (numpy.ndarray): distance matrix of shape (num_query, num_gallery).
dataset (tuple): a 2-tuple containing (query, gallery), each of which contains
tuples of (img_path(s), pid, camid).
data_type (str): "image" or "video".
width (int, optional): resized image width. Default is 128.
height (int, optional): resized image height. Default is 256.
save_dir (str): directory to save output images.
topk (int, optional): denoting top-k images in the rank list to be visualized.
Default is 10.
"""
num_q, num_g = distmat.shape
mkdir_if_missing(save_dir)
print('# query: {}\n# gallery {}'.format(num_q, num_g))
print('Visualizing top-{} ranks ...'.format(topk))
query, gallery = dataset
assert num_q == len(query)
assert num_g == len(gallery)
indices = np.argsort(distmat, axis=1)
def _cp_img_to(src, dst, rank, prefix, matched=False):
"""
Args:
src: image path or tuple (for vidreid)
dst: target directory
rank: int, denoting ranked position, starting from 1
prefix: string
matched: bool
"""
if isinstance(src, tuple) or isinstance(src, list):
if prefix == 'gallery':
suffix = 'TRUE' if matched else 'FALSE'
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3)) + '_' + suffix
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
mkdir_if_missing(dst)
for img_path in src:
shutil.copy(img_path, dst)
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3) + '_name_' + osp.basename(src))
shutil.copy(src, dst)
for q_idx in range(num_q):
item = query[q_idx]
qimg_path, qpid, qcamid = item[:3]
# qsegment_path = item[6]
qpid, qcamid = int(qpid), int(qcamid)
num_cols = topk + 1
# grid_img = 255 * np.ones((2*height+GRID_SPACING, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
grid_img = 255 * np.ones((height, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
idx_str = str(qpid) + '\n'
if data_type == 'image':
qimg = cv2.imread(qimg_path)
qimg = Image.fromarray(cv2.cvtColor(qimg, cv2.COLOR_BGR2RGB))
qimg = cv2.cvtColor(np.asarray(qimg), cv2.COLOR_RGB2BGR)
qimg = cv2.resize(qimg, (width, height))
qimg = cv2.copyMakeBorder(qimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
qimg = cv2.resize(qimg, (width, height)) # resize twice to ensure that the border width is consistent across images
# qsegment = cv2.imread(qsegment_path)
# qsegment = Image.fromarray(cv2.cvtColor(qsegment, cv2.COLOR_BGR2RGB))
# qsegment = cv2.cvtColor(np.asarray(qsegment), cv2.COLOR_RGB2BGR)
#
# qsegment = cv2.resize(qsegment, (width, height))
# qsegment = cv2.copyMakeBorder(qsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
# qsegment = cv2.resize(qsegment, (
# width, height)) # resize twice to ensure that the border width is consistent across images
grid_img[:height, :width, :] = qimg
# grid_img[height+GRID_SPACING:, :width, :] = qsegment
else:
qdir = osp.join(save_dir, osp.basename(osp.splitext(qimg_path)[0]))
mkdir_if_missing(qdir)
_cp_img_to(qimg_path, qdir, rank=0, prefix='query')
rank_idx = 1
for g_idx in indices[q_idx, :]:
item = gallery[g_idx]
gimg_path, gpid, gcamid = item[:3]
# gsegment_path = item[6]
gpid, gcamid = int(gpid), int(gcamid)
invalid = (qpid == gpid) & (qcamid == gcamid)
idx_str = idx_str + str(gpid) + ' '
if not invalid:
matched = gpid == qpid
if data_type == 'image':
border_color = GREEN if matched else RED
gimg = cv2.imread(gimg_path)
gimg = Image.fromarray(cv2.cvtColor(gimg, cv2.COLOR_BGR2RGB))
gimg = cv2.cvtColor(np.asarray(gimg), cv2.COLOR_RGB2BGR)
gimg = cv2.resize(gimg, (width, height))
gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, 1, value=border_color)
gimg = cv2.resize(gimg, (width, height))
# gsegment = cv2.imread(gsegment_path)
# gsegment = Image.fromarray(cv2.cvtColor(gsegment, cv2.COLOR_BGR2RGB))
# gsegment = cv2.cvtColor(np.asarray(gsegment), cv2.COLOR_RGB2BGR)
#
# gsegment = cv2.resize(gsegment, (width, height))
# gsegment = cv2.copyMakeBorder(gsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gsegment = cv2.resize(gsegment, (width, height))
start = rank_idx * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
end = (rank_idx + 1) * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
grid_img[:height, start:end, :] = gimg
# grid_img[height+GRID_SPACING:, start:end, :] = gsegment
else:
_cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='gallery', matched=matched)
rank_idx += 1
if rank_idx > topk:
break
# if rank_idx > topk-1:
# break
relpath = qimg_path.split('/rgb/')[-1]
imname = osp.basename(osp.splitext(relpath)[0])
dirname = osp.dirname(relpath)
dir_path = osp.join(save_dir, dirname)
if not osp.exists(dir_path):
os.makedirs(dir_path)
cv2.imwrite(osp.join(dir_path, imname + '.jpg'), grid_img)
with open(osp.join(dir_path, imname + '.txt'), 'w') as fp:
fp.write(idx_str)
# imname = osp.basename(osp.splitext(qimg_path)[0])
# cv2.imwrite(osp.join(save_dir, imname+'.jpg'), grid_img)
if (q_idx + 1) % 100 == 0:
print('- done {}/{}'.format(q_idx + 1, num_q))
print('Done. Images have been saved to "{}" ...'.format(save_dir))
@torch.no_grad()
def visactmap(testloader, model, save_dir, width, height, print_freq, use_gpu, **kwargs):
"""Visualizes CNN activation maps to see where the CNN focuses on to extract features.
This function takes as input the query images of target datasets
Reference:
- Zagoruyko and Komodakis. Paying more attention to attention: Improving the
performance of convolutional neural networks via attention transfer. ICLR, 2017
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
"""
model.eval()
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
# original images and activation maps are saved individually
actmap_dir = osp.join(save_dir, 'actmap_layer2')
mkdir_if_missing(actmap_dir)
print('Visualizing activation maps ...')
for batch_idx, data in enumerate(testloader):
# imgs, paths = data[0], data[3]
# imgs, paths = data[0], data[3]
imgs, contours, paths = data[0], data[1], data[4]
if use_gpu:
|
# forward to get convolutional feature maps
try:
# outputs = model(segments, imgs, return_featuremaps=True)
outputs = model(imgs, contours, return_featuremaps=True)
except TypeError:
raise TypeError('forward() got unexpected keyword argument "return_featuremaps". ' \
'Please add return_featuremaps as an input argument to forward(). When ' \
'return_featuremaps=True, return feature maps only.')
if outputs.dim() != 4:
raise ValueError('The model output is supposed to have ' \
'shape of (b, c, h, w), i.e. 4 dimensions, but got {} dimensions. '
'Please make sure you set the model output at eval mode '
'to be the last convolutional feature maps'.format(outputs.dim()))
# compute activation maps
outputs = (outputs ** 2).sum(1)
b, h, w = outputs.size()
outputs = outputs.view(b, h * w)
outputs = F.normalize(outputs, p=2, dim=1)
outputs = outputs.view(b, h, w)
if use_gpu:
imgs, outputs = imgs.cpu(), outputs.cpu()
for j in range(outputs.size(0)):
# get image name
path = paths[j]
# imname = osp.basename(osp.splitext(path)[0])
path = path.split('/')
imname = osp.splitext(path[-2] + '_' + path[-1])[0]
# RGB image
img = imgs[j, ...]
for t, m, s in zip(img, imagenet_mean, imagenet_std):
t.mul_(s).add_(m).clamp_(0, 1)
img_np = np.uint8(np.floor(img.numpy() * 255))
img_np = img_np.transpose((1, 2, 0)) # (c, h, w) -> (h, w, c)
# activation map
am = outputs[j, ...].numpy()
am = cv2.resize(am, (width, height))
am = 255 * (am - np.max(am)) / (np.max(am) - np.min(am) + 1e-12)
am = np.uint8(np.floor(am))
am = cv2.applyColorMap(am, cv2.COLORMAP_JET)
# overlapped
overlapped = img_np * 0.3 + am * 0.7
overlapped[overlapped > 255] = 255
overlapped = overlapped.astype(np.uint8)
# # save images in a single figure (add white spacing between images)
# # from left to right: original image, activation map, overlapped image
# grid_img = 255 * np.ones((height, 3 * width + 2 * GRID_SPACING, 3), dtype=np.uint8)
# grid_img[:, :width, :] = img_np[:, :, ::-1]
# grid_img[:, width + GRID_SPACING: 2 * width + GRID_SPACING, :] = am
# grid_img[:, 2 * width + 2 * GRID_SPACING:, :] = overlapped
cv2.imwrite(osp.join(actmap_dir, imname+'_ori.jpg'), img_np[:, :, ::-1])
cv2.imwrite(osp.join(actmap_dir, imname + '_am.jpg'), am)
cv2.imwrite(osp.join(actmap_dir, imname + '_overlap.jpg'), overlapped)
if (batch_idx + 1) % print_freq == 0:
print('- done batch {}/{}'.format(batch_idx + 1, len(testloader)))
| imgs = imgs.cuda()
contours = contours.cuda() | conditional_block |
reidtools.py | from __future__ import absolute_import
from __future__ import print_function
__all__ = ['visualize_ranked_results', 'visactmap']
import numpy as np
import os
import os.path as osp
import shutil
import cv2
from PIL import Image
import torch
from torch.nn import functional as F
from .tools import mkdir_if_missing
GRID_SPACING = 10
QUERY_EXTRA_SPACING = 90
BW = 8 # border width
GREEN = (1, 215, 117)
RED = (111, 107, 241)
PAD_SPACING = 5
def visualize_ranked_results(distmat, dataset, data_type, width=128, height=256, save_dir='', topk=10):
"""Visualizes ranked results.
Supports both image-reid and video-reid.
For image-reid, ranks will be plotted in a single figure. For video-reid, ranks will be
saved in folders each containing a tracklet.
Args:
distmat (numpy.ndarray): distance matrix of shape (num_query, num_gallery).
dataset (tuple): a 2-tuple containing (query, gallery), each of which contains
tuples of (img_path(s), pid, camid).
data_type (str): "image" or "video".
width (int, optional): resized image width. Default is 128.
height (int, optional): resized image height. Default is 256.
save_dir (str): directory to save output images.
topk (int, optional): denoting top-k images in the rank list to be visualized.
Default is 10.
"""
num_q, num_g = distmat.shape
mkdir_if_missing(save_dir)
print('# query: {}\n# gallery {}'.format(num_q, num_g))
print('Visualizing top-{} ranks ...'.format(topk))
query, gallery = dataset
assert num_q == len(query)
assert num_g == len(gallery)
indices = np.argsort(distmat, axis=1)
def _cp_img_to(src, dst, rank, prefix, matched=False):
"""
Args:
src: image path or tuple (for vidreid)
dst: target directory
rank: int, denoting ranked position, starting from 1
prefix: string
matched: bool
"""
if isinstance(src, tuple) or isinstance(src, list):
if prefix == 'gallery':
suffix = 'TRUE' if matched else 'FALSE'
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3)) + '_' + suffix
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))
mkdir_if_missing(dst)
for img_path in src:
shutil.copy(img_path, dst)
else:
dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3) + '_name_' + osp.basename(src))
shutil.copy(src, dst)
for q_idx in range(num_q):
item = query[q_idx]
qimg_path, qpid, qcamid = item[:3]
# qsegment_path = item[6]
qpid, qcamid = int(qpid), int(qcamid)
num_cols = topk + 1
# grid_img = 255 * np.ones((2*height+GRID_SPACING, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
grid_img = 255 * np.ones((height, num_cols*width+(topk-1)*GRID_SPACING+QUERY_EXTRA_SPACING, 3), dtype=np.uint8)
idx_str = str(qpid) + '\n'
if data_type == 'image':
qimg = cv2.imread(qimg_path)
qimg = Image.fromarray(cv2.cvtColor(qimg, cv2.COLOR_BGR2RGB))
qimg = cv2.cvtColor(np.asarray(qimg), cv2.COLOR_RGB2BGR)
qimg = cv2.resize(qimg, (width, height))
qimg = cv2.copyMakeBorder(qimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
qimg = cv2.resize(qimg, (width, height)) # resize twice to ensure that the border width is consistent across images
# qsegment = cv2.imread(qsegment_path)
# qsegment = Image.fromarray(cv2.cvtColor(qsegment, cv2.COLOR_BGR2RGB))
# qsegment = cv2.cvtColor(np.asarray(qsegment), cv2.COLOR_RGB2BGR)
#
# qsegment = cv2.resize(qsegment, (width, height))
# qsegment = cv2.copyMakeBorder(qsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0))
# qsegment = cv2.resize(qsegment, (
# width, height)) # resize twice to ensure that the border width is consistent across images
grid_img[:height, :width, :] = qimg
# grid_img[height+GRID_SPACING:, :width, :] = qsegment
else:
qdir = osp.join(save_dir, osp.basename(osp.splitext(qimg_path)[0]))
mkdir_if_missing(qdir)
_cp_img_to(qimg_path, qdir, rank=0, prefix='query')
rank_idx = 1
for g_idx in indices[q_idx, :]:
item = gallery[g_idx]
gimg_path, gpid, gcamid = item[:3]
# gsegment_path = item[6]
gpid, gcamid = int(gpid), int(gcamid)
invalid = (qpid == gpid) & (qcamid == gcamid)
idx_str = idx_str + str(gpid) + ' '
if not invalid:
matched = gpid == qpid
if data_type == 'image':
border_color = GREEN if matched else RED
gimg = cv2.imread(gimg_path)
gimg = Image.fromarray(cv2.cvtColor(gimg, cv2.COLOR_BGR2RGB))
gimg = cv2.cvtColor(np.asarray(gimg), cv2.COLOR_RGB2BGR)
gimg = cv2.resize(gimg, (width, height))
gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gimg = cv2.copyMakeBorder(gimg, BW, BW, BW, BW, 1, value=border_color)
gimg = cv2.resize(gimg, (width, height))
# gsegment = cv2.imread(gsegment_path)
# gsegment = Image.fromarray(cv2.cvtColor(gsegment, cv2.COLOR_BGR2RGB))
# gsegment = cv2.cvtColor(np.asarray(gsegment), cv2.COLOR_RGB2BGR)
#
# gsegment = cv2.resize(gsegment, (width, height))
# gsegment = cv2.copyMakeBorder(gsegment, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=border_color)
# gsegment = cv2.resize(gsegment, (width, height))
start = rank_idx * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
end = (rank_idx + 1) * width + (rank_idx - 1) * GRID_SPACING + QUERY_EXTRA_SPACING
grid_img[:height, start:end, :] = gimg
# grid_img[height+GRID_SPACING:, start:end, :] = gsegment
else:
_cp_img_to(gimg_path, qdir, rank=rank_idx, prefix='gallery', matched=matched)
rank_idx += 1
if rank_idx > topk:
break
# if rank_idx > topk-1:
# break
relpath = qimg_path.split('/rgb/')[-1]
imname = osp.basename(osp.splitext(relpath)[0])
dirname = osp.dirname(relpath)
dir_path = osp.join(save_dir, dirname)
if not osp.exists(dir_path):
os.makedirs(dir_path)
cv2.imwrite(osp.join(dir_path, imname + '.jpg'), grid_img)
with open(osp.join(dir_path, imname + '.txt'), 'w') as fp:
fp.write(idx_str)
# imname = osp.basename(osp.splitext(qimg_path)[0])
# cv2.imwrite(osp.join(save_dir, imname+'.jpg'), grid_img)
if (q_idx + 1) % 100 == 0:
print('- done {}/{}'.format(q_idx + 1, num_q))
print('Done. Images have been saved to "{}" ...'.format(save_dir))
@torch.no_grad()
def visactmap(testloader, model, save_dir, width, height, print_freq, use_gpu, **kwargs):
"""Visualizes CNN activation maps to see where the CNN focuses on to extract features.
This function takes as input the query images of target datasets
Reference:
- Zagoruyko and Komodakis. Paying more attention to attention: Improving the
performance of convolutional neural networks via attention transfer. ICLR, 2017
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
"""
model.eval()
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
# original images and activation maps are saved individually
actmap_dir = osp.join(save_dir, 'actmap_layer2')
mkdir_if_missing(actmap_dir)
print('Visualizing activation maps ...')
for batch_idx, data in enumerate(testloader):
# imgs, paths = data[0], data[3]
# imgs, paths = data[0], data[3]
imgs, contours, paths = data[0], data[1], data[4]
if use_gpu:
imgs = imgs.cuda()
contours = contours.cuda()
# forward to get convolutional feature maps
try:
# outputs = model(segments, imgs, return_featuremaps=True)
outputs = model(imgs, contours, return_featuremaps=True)
except TypeError:
raise TypeError('forward() got unexpected keyword argument "return_featuremaps". ' \
'Please add return_featuremaps as an input argument to forward(). When ' \
'return_featuremaps=True, return feature maps only.')
if outputs.dim() != 4:
raise ValueError('The model output is supposed to have ' \
'shape of (b, c, h, w), i.e. 4 dimensions, but got {} dimensions. '
'Please make sure you set the model output at eval mode '
'to be the last convolutional feature maps'.format(outputs.dim()))
| outputs = (outputs ** 2).sum(1)
b, h, w = outputs.size()
outputs = outputs.view(b, h * w)
outputs = F.normalize(outputs, p=2, dim=1)
outputs = outputs.view(b, h, w)
if use_gpu:
imgs, outputs = imgs.cpu(), outputs.cpu()
for j in range(outputs.size(0)):
# get image name
path = paths[j]
# imname = osp.basename(osp.splitext(path)[0])
path = path.split('/')
imname = osp.splitext(path[-2] + '_' + path[-1])[0]
# RGB image
img = imgs[j, ...]
for t, m, s in zip(img, imagenet_mean, imagenet_std):
t.mul_(s).add_(m).clamp_(0, 1)
img_np = np.uint8(np.floor(img.numpy() * 255))
img_np = img_np.transpose((1, 2, 0)) # (c, h, w) -> (h, w, c)
# activation map
am = outputs[j, ...].numpy()
am = cv2.resize(am, (width, height))
am = 255 * (am - np.max(am)) / (np.max(am) - np.min(am) + 1e-12)
am = np.uint8(np.floor(am))
am = cv2.applyColorMap(am, cv2.COLORMAP_JET)
# overlapped
overlapped = img_np * 0.3 + am * 0.7
overlapped[overlapped > 255] = 255
overlapped = overlapped.astype(np.uint8)
# # save images in a single figure (add white spacing between images)
# # from left to right: original image, activation map, overlapped image
# grid_img = 255 * np.ones((height, 3 * width + 2 * GRID_SPACING, 3), dtype=np.uint8)
# grid_img[:, :width, :] = img_np[:, :, ::-1]
# grid_img[:, width + GRID_SPACING: 2 * width + GRID_SPACING, :] = am
# grid_img[:, 2 * width + 2 * GRID_SPACING:, :] = overlapped
cv2.imwrite(osp.join(actmap_dir, imname+'_ori.jpg'), img_np[:, :, ::-1])
cv2.imwrite(osp.join(actmap_dir, imname + '_am.jpg'), am)
cv2.imwrite(osp.join(actmap_dir, imname + '_overlap.jpg'), overlapped)
if (batch_idx + 1) % print_freq == 0:
print('- done batch {}/{}'.format(batch_idx + 1, len(testloader))) | # compute activation maps | random_line_split |
mqtt.go | package mqttModule
import (
"github.com/eclipse/paho.mqtt.golang"
"fmt"
"time"
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"strconv"
"encoding/json"
"os"
"sync"
interfaces "github.com/gw123/GMQ/core/interfaces"
"github.com/gw123/GMQ/common/common_types"
)
const MaxMsgLen = 100
type MsgIds struct {
Ids []string
head int
tail int
}
func NewMsgIds() *MsgIds {
this := new(MsgIds)
this.head = 0
this.Ids = make([]string, MaxMsgLen)
return this
}
var msgIdsLock = sync.Mutex{}
func (msgIds *MsgIds) Check(msgId string) bool {
msgIdsLock.Lock()
defer msgIdsLock.Unlock()
for _, id1 := range msgIds.Ids {
if id1 == msgId {
return false
}
}
if msgIds.head >= MaxMsgLen {
msgIds.head = 0
}
msgIds.Ids[msgIds.head] = msgId
msgIds.head++;
return true
}
var msgIds *MsgIds
func init() {
msgIds = NewMsgIds()
}
type Device struct {
DeviceName string
ProductKey string
DeviceSecret string
IsLogin bool
Status uint
LasUsedtime int64
}
type Iot struct {
Host string
DeviceName string
ProductKey string
ClientId string
Username string
Password string
Sign string
Conn mqtt.Client
logOut interfaces.ModuleLogger
App interfaces.App
SubDevices []Device
}
type Params struct {
ProductKey string
DeviceName string
DeviceSecret string
OnConnectHandler mqtt.OnConnectHandler
ConnectionLostHandler mqtt.ConnectionLostHandler
Logger interfaces.ModuleLogger
App interfaces.App
DefaultHandel mqtt.MessageHandler
}
func NewIot(params Params) (iot *Iot) {
iot = new(Iot)
iot.SubDevices = make([]Device, 0)
sign, timestamp := iot.GetSign(params.ProductKey, params.DeviceName, params.DeviceSecret)
iot.Password = sign
iot.ClientId = params.DeviceName + "|securemode=3,signmethod=hmacsha1,timestamp=" + timestamp + "|"
iot.Username = params.DeviceName + "&" + params.ProductKey
iot.DeviceName = params.DeviceName
iot.ProductKey = params.ProductKey
iot.Host = params.ProductKey + ".iot-as-mqtt.cn-shanghai.aliyuncs.com:1883"
opts := mqtt.NewClientOptions().AddBroker(iot.Host).SetClientID(iot.ClientId).SetUsername(iot.Username).SetPassword(iot.Password)
opts.SetPingTimeout(5 * time.Second)
opts.SetKeepAlive(30 * time.Second)
opts.SetCleanSession(false)
opts.SetAutoReconnect(true)
opts.SetConnectionLostHandler(params.ConnectionLostHandler)
opts.SetDefaultPublishHandler(params.DefaultHandel)
opts.SetOnConnectHandler(params.OnConnectHandler)
opts.SetMaxReconnectInterval(2 * time.Minute)
iot.App = params.App
iot.logOut = params.Logger
iot.Conn = mqtt.NewClient(opts)
return
}
func (this *Iot) Connect() (err error) {
c := this.Conn
if token := c.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
return nil
}
func (this *Iot) Close() {
if this.Conn.IsConnected() {
this.Conn.Disconnect(256)
}
}
func (this *Iot) Publish(topic string, qos byte, retained bool, payload interface{}) error {
this.Conn.Publish(topic, qos, retained, payload)
return nil
}
func (this *Iot) Subscribe(topic string, qos byte, callback mqtt.MessageHandler) error {
if token := this.Conn.Subscribe(topic, qos, func(client mqtt.Client, message mqtt.Message) {
callback(client, message)
}); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeAndCheck(topic string, qos byte) error {
if token := this.Conn.Subscribe(topic, qos, this.SubscribeGetCallback); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeGetCallback(client mqtt.Client, message mqtt.Message) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
msg := &AliMsg{}
err := json.Unmarshal(message.Payload(), msg)
if err != nil | .Check(msg.MsgId) {
this.writeLog("warning", "msgId "+msg.MsgId+" Topic"+topic+" 重复消息")
return
}
event := common_types.NewEvent(msg.MsgId ,message.Payload())
this.App.Pub(event)
}
/***
* 订阅get消息
*/
func (this *Iot) SubscribeGet() {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
this.SubscribeAndCheck(topic, 1)
}
/***
* 子设备
*/
func (this *Iot) SubscribeSubGet(subProductKey, subDeviceName string) {
topic := "/" + subProductKey + "/" + subDeviceName + "/get"
this.SubscribeAndCheck(topic, 0)
}
/***
* 子设备注册
*/
func (this *Iot) PublishSubRegister(subProductKey, subDeviceName string) {
data := "{'id': '%s', 'version':'1.0','params':[{'deviceName':'%s','productKey':'%s'}],'method':'thing.sub.register'}"
data = fmt.Sprintf(data, this.getMsgId(), subDeviceName, subProductKey)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register"
this.Publish(topic, 0, false, []byte(data))
}
func (this *Iot) SubscribeSubRegisterReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubRegister_reply json内容解析失败 "+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubRegister_reply 子设备注册失败 "+msg.Message)
return
}
v, ok := msg.Data.([]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败 "+string(message.Payload()))
return
}
for _, deviceData := range v {
deviceInfo, ok := deviceData.(map[string]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败->不能转为map"+string(message.Payload()))
continue
}
deviceSecret, _ := deviceInfo["deviceSecret"].(string)
productKey, _ := deviceInfo["productKey"].(string)
deviceName, _ := deviceInfo["deviceName"].(string)
this.writeLog("info", "SubRegister_reply 注册成功: "+deviceName)
go this.SubDeviceLogin(productKey, deviceName, deviceSecret)
}
})
}
func (this *Iot) SubDeviceLogin(productKey, deviceName, deviceSecret string) {
this.AppendSubDevice(productKey, deviceName, deviceSecret)
this.PublishSubAdd(productKey, deviceName, deviceSecret)
time.Sleep(time.Second * 4)
this.PublishSubLogin(productKey, deviceName, deviceSecret)
}
/***
* 添加子设备
*/
func (this *Iot) PublishSubAdd(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","version":"1.0","params":[{"productKey" : "%s","deviceName" : "%s","clientId":"%s","sign":"%s","signmethod":"hmacSha1","timestamp":"%s"}],"method":"thing.topo.add"}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add"
this.SubscribeSubAddReply()
this.Publish(topic, 0, true, []byte(data))
}
func (this *Iot) SubscribeSubAddReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add_reply"
this.Subscribe(topic_reply, 0, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "PublishSubAdd "+"JSON解析失败")
return
}
if msg.Code != 200 {
this.writeLog("error", "PublishSubAdd "+msg.Message)
}
this.writeLog("info", "PublishSubAdd 子设备拓扑添加成功")
return
})
}
/***
step [1, 100] -1:代表升级失败 -2:代表下载失败 -3:代表校验失败 -4:代表烧写失败
desc 进度信息
*/
func (this *Iot) PublishProgress(step int8, desc string) {
topic := "/ota/device/progress/" + this.ProductKey + "/" + this.DeviceName
data := `{ "id": "%s", "params": {"step":"%d", "desc":" %s"}}`
data = fmt.Sprintf(data, this.getMsgId(), step, desc)
this.Publish(topic, 1, false, []byte(data))
}
func (this *Iot) SubscribeUpgrade() {
topic := "/ota/device/upgrade/" + this.ProductKey + "/" + this.DeviceName
this.Subscribe(topic, 1, this.SubscribeUpgradeCallback)
}
func (this *Iot) SubscribeUpgradeCallback(client mqtt.Client, message mqtt.Message) {
//fmt.Println("SubscribeUpgradeCallback", message.Topic(), string(message.Payload()))
update := common_types.UpdateResponse{}
err := json.Unmarshal(message.Payload(), &update)
if err != nil {
this.writeLog("error", "SubscribeUpgrade"+"Json fail "+err.Error())
return
}
if update.Message != "success" {
this.writeLog("error", "SubscribeUpgrade "+update.Message)
return
}
this.SyncUpgradeFile(update)
}
/***
* 上报设备版本信息
*/
func (this *Iot) PublishInform(version string) {
data := `{"id": "%s","params": {"version": "%s"}}`
data = fmt.Sprintf(data, this.getMsgId(), version)
topic := "/ota/device/inform/" + this.ProductKey + "/" + this.DeviceName
this.Publish(topic, 0, false, []byte(data))
}
/***
* 子设备登陆 这个函数会一直执行 ,所以在运行时要加上 go iot.PublishSubLogin
*/
func (this *Iot) PublishSubLogin(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s","clientId":"%s","sign":"%s","timestamp":"%s","signMethod":"hmacSha1","cleanSession":"false"}}`
data = fmt.Sprintf(data, "ababab", subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/ext/session/" + this.ProductKey + "/" + this.DeviceName + "/combine/login"
this.Publish(topic, 1, true, []byte(data))
}
/***
* 子设备登陆回调函数
*/
func (this *Iot) SubscribeSubLoginReply() {
topic_reply := "/ext/session/" + this.ProductKey + "/" + this.DeviceName + "/combine/login_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg := common_types.LoginResponse{}
err := json.Unmarshal(message.Payload(), &msg)
if err != nil {
this.writeLog("error", "SubLogin_reply Json 解析失败"+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubLogin_reply Json 登陆失败"+msg.Message)
return
}
this.writeLog("info", "SubLogin_reply "+msg.Data.DeviceName+" 登陆成功"+msg.Message)
/*订阅主题*/
this.SubscribeSubGet(msg.Data.ProductKey, msg.Data.DeviceName)
})
}
/***
* 子设备下线
*/
func (this *Iot) PublishSubLoginOut(subProductKey, subDeviceName string) {
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s",}}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName)
topci := "/" + this.ProductKey + "/" + this.DeviceName + "/combine/logout"
topci_reply := "/" + this.ProductKey + "/" + this.DeviceName + "/combine/logout_reply"
this.Publish(topci, 1, false, []byte(data))
this.Subscribe(topci_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubLoginOut :"+subDeviceName+" "+err.Error())
return
}
if msg.Code != 200 {
this.writeLog("error", "SubLoginOut :"+subDeviceName+" "+msg.Message)
return
}
})
}
/***
* 计算签名
*/
func (this *Iot) GetSign(productKey, deviceName, deviceSecret string) (string, string) {
timestamp := strconv.Itoa(int(time.Now().Unix()))
str := "clientId" + deviceName + "deviceName" + deviceName + "productKey" + productKey + "timestamp" + timestamp;
key := []byte(deviceSecret)
hmacHandel := hmac.New(sha1.New, key)
hmacHandel.Write([]byte(str))
res := hmacHandel.Sum(nil)
return hex.EncodeToString(res), timestamp
}
/***
* 获取一个唯一的消息Id
*/
func (this *Iot) getMsgId() string {
return strconv.Itoa(int(time.Now().UnixNano()))
}
func (this *Iot) SetLogOutPut(writer interfaces.ModuleLogger) {
this.logOut = writer
}
func (this *Iot) writeLog(logType, Content string) {
switch logType {
case "warning":
this.logOut.Warning(Content)
break;
case "info":
this.logOut.Info(Content)
break
case "error":
this.logOut.Error(Content)
break;
case "debug":
this.logOut.Debug(Content)
break
default:
this.logOut.Info(Content)
break
}
}
func (this *Iot) Write(data []byte) (int, error) {
if this.Conn.IsConnected() {
this.PublishRaw(data)
}
return 0, nil
}
var appendSubDevicesMutex = sync.Mutex{}
/***
* 添加子设备
*/
func (this *Iot) AppendSubDevice(subProductKey, subDeviceName, subDeviceSecret string) (Device) {
subDevice := Device{}
subDevice.ProductKey = subProductKey
subDevice.DeviceName = subDeviceName
subDevice.DeviceSecret = subDeviceSecret
appendSubDevicesMutex.Lock()
this.SubDevices = append(this.SubDevices, subDevice)
appendSubDevicesMutex.Unlock()
return subDevice
}
func (this *Iot) PublishRaw(data []byte) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/update"
this.Publish(topic, 1, false, data)
}
func (this *Iot) PublishLog(log []byte) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/update"
type Log struct {
Timestamp int64 `json:"timestamp"`
Event string `json:"event"`
Data string `json:"data"`
}
logData := Log{}
logData.Timestamp = time.Now().Unix()
logData.Data = string(log)
logData.Event = "log"
data, err := json.Marshal(logData)
if err != nil {
return
}
this.Publish(topic, 1, false, data)
}
func (this *Iot) SyncUpgradeFile(data common_types.UpdateResponse) {
fileContent := `%s
%s
%s`
fileContent = fmt.Sprintf(fileContent, data.Data.Md5, data.Data.Url, data.Data.Version)
var file *os.File;
var err error;
flag := false
for i := 5; i > 0; i-- {
file, err = os.OpenFile("upgrade.plan", os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0660)
if err != nil {
continue
}
flag = true
time.Sleep(time.Second)
break
}
if !flag {
this.writeLog("error", "SyncUpgradeFile: "+err.Error())
return
}
defer file.Close()
file.Write([]byte(fileContent))
}
| {
this.writeLog("error", "Topic "+topic+" 消息解密失败 "+err.Error()+" Payload: "+string(message.Payload()))
return
}
if !msgIds | conditional_block |
mqtt.go | package mqttModule
import (
"github.com/eclipse/paho.mqtt.golang"
"fmt"
"time"
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"strconv"
"encoding/json"
"os"
"sync"
interfaces "github.com/gw123/GMQ/core/interfaces"
"github.com/gw123/GMQ/common/common_types"
)
const MaxMsgLen = 100
type MsgIds struct {
Ids []string
head int
tail int
}
func NewMsgIds() *MsgIds {
this := new(MsgIds)
this.head = 0
this.Ids = make([]string, MaxMsgLen)
return this
}
var msgIdsLock = sync.Mutex{}
func (msgIds *MsgIds) Check(msgId string) bool {
msgIdsLock.Lock()
defer msgIdsLock.Unlock()
for _, id1 := range msgIds.Ids {
if id1 == msgId {
return false
}
}
if msgIds.head >= MaxMsgLen {
msgIds.head = 0
}
msgIds.Ids[msgIds.head] = msgId
msgIds.head++;
return true
}
var msgIds *MsgIds
func init() {
msgIds = NewMsgIds()
}
type Device struct {
DeviceName string
ProductKey string
DeviceSecret string
IsLogin bool
Status uint
LasUsedtime int64
}
type Iot struct {
Host string | ProductKey string
ClientId string
Username string
Password string
Sign string
Conn mqtt.Client
logOut interfaces.ModuleLogger
App interfaces.App
SubDevices []Device
}
type Params struct {
ProductKey string
DeviceName string
DeviceSecret string
OnConnectHandler mqtt.OnConnectHandler
ConnectionLostHandler mqtt.ConnectionLostHandler
Logger interfaces.ModuleLogger
App interfaces.App
DefaultHandel mqtt.MessageHandler
}
func NewIot(params Params) (iot *Iot) {
iot = new(Iot)
iot.SubDevices = make([]Device, 0)
sign, timestamp := iot.GetSign(params.ProductKey, params.DeviceName, params.DeviceSecret)
iot.Password = sign
iot.ClientId = params.DeviceName + "|securemode=3,signmethod=hmacsha1,timestamp=" + timestamp + "|"
iot.Username = params.DeviceName + "&" + params.ProductKey
iot.DeviceName = params.DeviceName
iot.ProductKey = params.ProductKey
iot.Host = params.ProductKey + ".iot-as-mqtt.cn-shanghai.aliyuncs.com:1883"
opts := mqtt.NewClientOptions().AddBroker(iot.Host).SetClientID(iot.ClientId).SetUsername(iot.Username).SetPassword(iot.Password)
opts.SetPingTimeout(5 * time.Second)
opts.SetKeepAlive(30 * time.Second)
opts.SetCleanSession(false)
opts.SetAutoReconnect(true)
opts.SetConnectionLostHandler(params.ConnectionLostHandler)
opts.SetDefaultPublishHandler(params.DefaultHandel)
opts.SetOnConnectHandler(params.OnConnectHandler)
opts.SetMaxReconnectInterval(2 * time.Minute)
iot.App = params.App
iot.logOut = params.Logger
iot.Conn = mqtt.NewClient(opts)
return
}
func (this *Iot) Connect() (err error) {
c := this.Conn
if token := c.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
return nil
}
func (this *Iot) Close() {
if this.Conn.IsConnected() {
this.Conn.Disconnect(256)
}
}
func (this *Iot) Publish(topic string, qos byte, retained bool, payload interface{}) error {
this.Conn.Publish(topic, qos, retained, payload)
return nil
}
func (this *Iot) Subscribe(topic string, qos byte, callback mqtt.MessageHandler) error {
if token := this.Conn.Subscribe(topic, qos, func(client mqtt.Client, message mqtt.Message) {
callback(client, message)
}); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeAndCheck(topic string, qos byte) error {
if token := this.Conn.Subscribe(topic, qos, this.SubscribeGetCallback); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeGetCallback(client mqtt.Client, message mqtt.Message) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
msg := &AliMsg{}
err := json.Unmarshal(message.Payload(), msg)
if err != nil {
this.writeLog("error", "Topic "+topic+" 消息解密失败 "+err.Error()+" Payload: "+string(message.Payload()))
return
}
if !msgIds.Check(msg.MsgId) {
this.writeLog("warning", "msgId "+msg.MsgId+" Topic"+topic+" 重复消息")
return
}
event := common_types.NewEvent(msg.MsgId ,message.Payload())
this.App.Pub(event)
}
/***
* 订阅get消息
*/
func (this *Iot) SubscribeGet() {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
this.SubscribeAndCheck(topic, 1)
}
/***
* 子设备
*/
func (this *Iot) SubscribeSubGet(subProductKey, subDeviceName string) {
topic := "/" + subProductKey + "/" + subDeviceName + "/get"
this.SubscribeAndCheck(topic, 0)
}
/***
* 子设备注册
*/
func (this *Iot) PublishSubRegister(subProductKey, subDeviceName string) {
data := "{'id': '%s', 'version':'1.0','params':[{'deviceName':'%s','productKey':'%s'}],'method':'thing.sub.register'}"
data = fmt.Sprintf(data, this.getMsgId(), subDeviceName, subProductKey)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register"
this.Publish(topic, 0, false, []byte(data))
}
func (this *Iot) SubscribeSubRegisterReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubRegister_reply json内容解析失败 "+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubRegister_reply 子设备注册失败 "+msg.Message)
return
}
v, ok := msg.Data.([]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败 "+string(message.Payload()))
return
}
for _, deviceData := range v {
deviceInfo, ok := deviceData.(map[string]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败->不能转为map"+string(message.Payload()))
continue
}
deviceSecret, _ := deviceInfo["deviceSecret"].(string)
productKey, _ := deviceInfo["productKey"].(string)
deviceName, _ := deviceInfo["deviceName"].(string)
this.writeLog("info", "SubRegister_reply 注册成功: "+deviceName)
go this.SubDeviceLogin(productKey, deviceName, deviceSecret)
}
})
}
func (this *Iot) SubDeviceLogin(productKey, deviceName, deviceSecret string) {
this.AppendSubDevice(productKey, deviceName, deviceSecret)
this.PublishSubAdd(productKey, deviceName, deviceSecret)
time.Sleep(time.Second * 4)
this.PublishSubLogin(productKey, deviceName, deviceSecret)
}
/***
* 添加子设备
*/
func (this *Iot) PublishSubAdd(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","version":"1.0","params":[{"productKey" : "%s","deviceName" : "%s","clientId":"%s","sign":"%s","signmethod":"hmacSha1","timestamp":"%s"}],"method":"thing.topo.add"}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add"
this.SubscribeSubAddReply()
this.Publish(topic, 0, true, []byte(data))
}
func (this *Iot) SubscribeSubAddReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add_reply"
this.Subscribe(topic_reply, 0, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "PublishSubAdd "+"JSON解析失败")
return
}
if msg.Code != 200 {
this.writeLog("error", "PublishSubAdd "+msg.Message)
}
this.writeLog("info", "PublishSubAdd 子设备拓扑添加成功")
return
})
}
/***
step [1, 100] -1:代表升级失败 -2:代表下载失败 -3:代表校验失败 -4:代表烧写失败
desc 进度信息
*/
func (this *Iot) PublishProgress(step int8, desc string) {
topic := "/ota/device/progress/" + this.ProductKey + "/" + this.DeviceName
data := `{ "id": "%s", "params": {"step":"%d", "desc":" %s"}}`
data = fmt.Sprintf(data, this.getMsgId(), step, desc)
this.Publish(topic, 1, false, []byte(data))
}
func (this *Iot) SubscribeUpgrade() {
topic := "/ota/device/upgrade/" + this.ProductKey + "/" + this.DeviceName
this.Subscribe(topic, 1, this.SubscribeUpgradeCallback)
}
func (this *Iot) SubscribeUpgradeCallback(client mqtt.Client, message mqtt.Message) {
//fmt.Println("SubscribeUpgradeCallback", message.Topic(), string(message.Payload()))
update := common_types.UpdateResponse{}
err := json.Unmarshal(message.Payload(), &update)
if err != nil {
this.writeLog("error", "SubscribeUpgrade"+"Json fail "+err.Error())
return
}
if update.Message != "success" {
this.writeLog("error", "SubscribeUpgrade "+update.Message)
return
}
this.SyncUpgradeFile(update)
}
/***
* 上报设备版本信息
*/
func (this *Iot) PublishInform(version string) {
data := `{"id": "%s","params": {"version": "%s"}}`
data = fmt.Sprintf(data, this.getMsgId(), version)
topic := "/ota/device/inform/" + this.ProductKey + "/" + this.DeviceName
this.Publish(topic, 0, false, []byte(data))
}
/***
* 子设备登陆 这个函数会一直执行 ,所以在运行时要加上 go iot.PublishSubLogin
*/
func (this *Iot) PublishSubLogin(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s","clientId":"%s","sign":"%s","timestamp":"%s","signMethod":"hmacSha1","cleanSession":"false"}}`
data = fmt.Sprintf(data, "ababab", subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/ext/session/" + this.ProductKey + "/" + this.DeviceName + "/combine/login"
this.Publish(topic, 1, true, []byte(data))
}
/***
* 子设备登陆回调函数
*/
func (this *Iot) SubscribeSubLoginReply() {
topic_reply := "/ext/session/" + this.ProductKey + "/" + this.DeviceName + "/combine/login_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg := common_types.LoginResponse{}
err := json.Unmarshal(message.Payload(), &msg)
if err != nil {
this.writeLog("error", "SubLogin_reply Json 解析失败"+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubLogin_reply Json 登陆失败"+msg.Message)
return
}
this.writeLog("info", "SubLogin_reply "+msg.Data.DeviceName+" 登陆成功"+msg.Message)
/*订阅主题*/
this.SubscribeSubGet(msg.Data.ProductKey, msg.Data.DeviceName)
})
}
/***
* 子设备下线
*/
func (this *Iot) PublishSubLoginOut(subProductKey, subDeviceName string) {
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s",}}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName)
topci := "/" + this.ProductKey + "/" + this.DeviceName + "/combine/logout"
topci_reply := "/" + this.ProductKey + "/" + this.DeviceName + "/combine/logout_reply"
this.Publish(topci, 1, false, []byte(data))
this.Subscribe(topci_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubLoginOut :"+subDeviceName+" "+err.Error())
return
}
if msg.Code != 200 {
this.writeLog("error", "SubLoginOut :"+subDeviceName+" "+msg.Message)
return
}
})
}
/***
* 计算签名
*/
func (this *Iot) GetSign(productKey, deviceName, deviceSecret string) (string, string) {
timestamp := strconv.Itoa(int(time.Now().Unix()))
str := "clientId" + deviceName + "deviceName" + deviceName + "productKey" + productKey + "timestamp" + timestamp;
key := []byte(deviceSecret)
hmacHandel := hmac.New(sha1.New, key)
hmacHandel.Write([]byte(str))
res := hmacHandel.Sum(nil)
return hex.EncodeToString(res), timestamp
}
/***
* 获取一个唯一的消息Id
*/
func (this *Iot) getMsgId() string {
return strconv.Itoa(int(time.Now().UnixNano()))
}
func (this *Iot) SetLogOutPut(writer interfaces.ModuleLogger) {
this.logOut = writer
}
func (this *Iot) writeLog(logType, Content string) {
switch logType {
case "warning":
this.logOut.Warning(Content)
break;
case "info":
this.logOut.Info(Content)
break
case "error":
this.logOut.Error(Content)
break;
case "debug":
this.logOut.Debug(Content)
break
default:
this.logOut.Info(Content)
break
}
}
func (this *Iot) Write(data []byte) (int, error) {
if this.Conn.IsConnected() {
this.PublishRaw(data)
}
return 0, nil
}
var appendSubDevicesMutex = sync.Mutex{}
/***
* 添加子设备
*/
func (this *Iot) AppendSubDevice(subProductKey, subDeviceName, subDeviceSecret string) (Device) {
subDevice := Device{}
subDevice.ProductKey = subProductKey
subDevice.DeviceName = subDeviceName
subDevice.DeviceSecret = subDeviceSecret
appendSubDevicesMutex.Lock()
this.SubDevices = append(this.SubDevices, subDevice)
appendSubDevicesMutex.Unlock()
return subDevice
}
func (this *Iot) PublishRaw(data []byte) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/update"
this.Publish(topic, 1, false, data)
}
func (this *Iot) PublishLog(log []byte) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/update"
type Log struct {
Timestamp int64 `json:"timestamp"`
Event string `json:"event"`
Data string `json:"data"`
}
logData := Log{}
logData.Timestamp = time.Now().Unix()
logData.Data = string(log)
logData.Event = "log"
data, err := json.Marshal(logData)
if err != nil {
return
}
this.Publish(topic, 1, false, data)
}
func (this *Iot) SyncUpgradeFile(data common_types.UpdateResponse) {
fileContent := `%s
%s
%s`
fileContent = fmt.Sprintf(fileContent, data.Data.Md5, data.Data.Url, data.Data.Version)
var file *os.File;
var err error;
flag := false
for i := 5; i > 0; i-- {
file, err = os.OpenFile("upgrade.plan", os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0660)
if err != nil {
continue
}
flag = true
time.Sleep(time.Second)
break
}
if !flag {
this.writeLog("error", "SyncUpgradeFile: "+err.Error())
return
}
defer file.Close()
file.Write([]byte(fileContent))
} | DeviceName string | random_line_split |
mqtt.go | package mqttModule
import (
"github.com/eclipse/paho.mqtt.golang"
"fmt"
"time"
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"strconv"
"encoding/json"
"os"
"sync"
interfaces "github.com/gw123/GMQ/core/interfaces"
"github.com/gw123/GMQ/common/common_types"
)
const MaxMsgLen = 100
type MsgIds struct {
Ids []string
head int
tail int
}
func NewMsgIds() *MsgIds {
this := new(MsgIds)
this.head = 0
this.Ids = make([]string, MaxMsgLen)
return this
}
var msgIdsLock = sync.Mutex{}
func (msgIds *MsgIds) Check(msgId string) bool {
msgIdsLock.Lock()
defer msgIdsLock.Unlock()
for _, id1 := range msgIds.Ids {
if id1 == msgId {
return false
}
}
if msgIds.head >= MaxMsgLen {
msgIds.head = 0
}
msgIds.Ids[msgIds.head] = msgId
msgIds.head++;
return true
}
var msgIds *MsgIds
func init() {
msgIds = NewMsgIds()
}
type Device struct {
DeviceName string
ProductKey string
DeviceSecret string
IsLogin bool
Status uint
LasUsedtime int64
}
type Iot struct {
Host string
DeviceName string
ProductKey string
ClientId string
Username string
Password string
Sign string
Conn mqtt.Client
logOut interfaces.ModuleLogger
App interfaces.App
SubDevices []Device
}
type Params struct {
ProductKey string
DeviceName string
DeviceSecret string
OnConnectHandler mqtt.OnConnectHandler
ConnectionLostHandler mqtt.ConnectionLostHandler
Logger interfaces.ModuleLogger
App interfaces.App
DefaultHandel mqtt.MessageHandler
}
func NewIot(params Params) (iot *Iot) {
iot = new(Iot)
iot.SubDevices = make([]Device, 0)
sign, timestamp := iot.GetSign(params.ProductKey, params.DeviceName, params.DeviceSecret)
iot.Password = sign
iot.ClientId = params.DeviceName + "|securemode=3,signmethod=hmacsha1,timestamp=" + timestamp + "|"
iot.Username = params.DeviceName + "&" + params.ProductKey
iot.DeviceName = params.DeviceName
iot.ProductKey = params.ProductKey
iot.Host = params.ProductKey + ".iot-as-mqtt.cn-shanghai.aliyuncs.com:1883"
opts := mqtt.NewClientOptions().AddBroker(iot.Host).SetClientID(iot.ClientId).SetUsername(iot.Username).SetPassword(iot.Password)
opts.SetPingTimeout(5 * time.Second)
opts.SetKeepAlive(30 * time.Second)
opts.SetCleanSession(false)
opts.SetAutoReconnect(true)
opts.SetConnectionLostHandler(params.ConnectionLostHandler)
opts.SetDefaultPublishHandler(params.DefaultHandel)
opts.SetOnConnectHandler(params.OnConnectHandler)
opts.SetMaxReconnectInterval(2 * time.Minute)
iot.App = params.App
iot.logOut = params.Logger
iot.Conn = mqtt.NewClient(opts)
return
}
func (this *Iot) Connect() (err error) {
c := this.Conn
if token := c.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
return nil
}
func (this *Iot) Close() {
if this.Conn.IsConnected() {
this.Conn.Disconnect(256)
}
}
func (this *Iot) Publish(topic string, qos byte, retained bool, payload interface{}) error {
this.Conn.Publish(topic, qos, retained, payload)
return nil
}
func (this *Iot) Subscribe(topic string, qos byte, callback mqtt.MessageHandler) error {
if token := this.Conn.Subscribe(topic, qos, func(client mqtt.Client, message mqtt.Message) {
callback(client, message)
}); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeAndCheck(topic string, qos byte) error {
if token := this.Conn.Subscribe(topic, qos, this.SubscribeGetCallback); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeGetCallback(client mqtt.Client, message mqtt.Message) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
msg := &AliMsg{}
err := json.Unmarshal(message.Payload(), msg)
if err != nil {
this.writeLog("error", "Topic "+topic+" 消息解密失败 "+err.Error()+" Payload: "+string(message.Payload()))
return
}
if !msgIds.Check(msg.MsgId) {
this.writeLog("warning", "msgId "+msg.MsgId+" Topic"+topic+" 重复消息")
return
}
event := common_types.NewEvent(msg.MsgId ,message.Payload())
this.App.Pub(event)
}
/***
* 订阅get消息
*/
func (this *Iot) SubscribeGet() {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
this.SubscribeAndCheck(topic, 1)
}
/***
* 子设备
*/
func (this *Iot) SubscribeSubGet(subProductKey, subDeviceName string) {
topic := "/" + subProductKey + "/" + subDeviceName + "/get"
this.SubscribeAndCheck(topic, 0)
}
/***
* 子设备注册
*/
func (this *Iot) PublishSubRegister(subProductKey, subDeviceName string) {
data := "{'id': '%s', 'version':'1.0','params':[{'deviceName':'%s','productKey':'%s'}],'method':'thing.sub.register'}"
data = fmt.Sprintf(data, this.getMsgId(), subDeviceName, subProductKey)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register"
this.Publish(topic, 0, false, []byte(data))
}
func (this *Iot) SubscribeSubRegisterReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubRegister_reply json内容解析失败 "+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubRegister_reply 子设备注册失败 "+msg.Message)
return
}
v, ok := msg.Data.([]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败 "+string(message.Payload()))
return
}
for _, deviceData := range v {
deviceInfo, ok := deviceData.(map[string]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败->不能转为map"+string(message.Payload()))
continue
}
deviceSecret, _ := deviceInfo["deviceSecret"].(string)
productKey, _ := deviceInfo["productKey"].(string)
deviceName, _ := deviceInfo["deviceName"].(string)
this.writeLog("info", "SubRegister_reply 注册成功: "+deviceName)
go this.SubDeviceLogin(productKey, deviceName, deviceSecret)
}
})
}
func (this *Iot) SubDeviceLogin(productKey, deviceName, deviceSecret string) {
this.AppendSubDevice(productKey, deviceName, deviceSecret)
this.PublishSubAdd(productKey, deviceName, deviceSecret)
time.Sleep(time.Second * 4)
this.PublishSubLogin(productKey, deviceName, deviceSecret)
}
/***
* 添加子设备
*/
func (this *Iot) PublishSubAdd(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","version":"1.0","params":[{"productKey" : "%s","deviceName" : "%s","clientId":"%s","sign":"%s","signmethod":"hmacSha1","timestamp":"%s"}],"method":"thing.topo.add"}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add"
this.SubscribeSubAddReply()
this.Publish(topic, 0, true, []byte(data))
}
func (this *Iot) SubscribeSubAddReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add_reply"
this.Subscribe(topic_reply, 0, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "PublishSubAdd "+"JSON解析失败")
return
}
if msg.Code != 200 {
this.writeLog("error", "PublishSubAdd "+msg.Message)
}
this.writeLog("info", "PublishSubAdd 子设备拓扑添加成功")
return
})
}
/***
step [1, 100] -1:代表升级失败 -2:代表下载失败 -3:代表校验失败 -4:代表烧写失败
desc 进度信息
*/
func (this *Iot) PublishProgress(step int8, desc string) {
topic := "/ota/device/progress/" + this.ProductKey + "/" + this.DeviceName
data := `{ "id": "%s", "params": {"step":"%d", "desc":" %s"}}`
data = fmt.Sprintf(data, this.getMsgId(), step, desc)
this.Publish(topic, 1, false, []byte(data))
}
func (this *Iot) SubscribeUpgrade() {
topic := "/ota/device/upgrade/" + this.ProductKey + "/" + this.DeviceName
this.Subscribe(topic, 1, this.SubscribeUpgradeCallback)
}
func (this *Iot) SubscribeUpgradeCallback(client mqtt.Client, message mqtt.Message) {
//fmt.Println("SubscribeUpgradeCallback", message.Topic(), string(message.Payload()))
update := common_types.UpdateResponse{}
err := json.Unmarshal(message.Payload(), &update)
if err != nil {
this.writeLog("error", "SubscribeUpgrade"+"Json fail "+err.Error())
return
}
if update.Message != "success" {
this.writeLog("error", "SubscribeUpgrade "+update.Message)
return
}
this.SyncUpgradeFile(update)
}
/***
* 上报设备版本信息
*/
func (this *Iot) PublishInform(version string) {
data := `{"id": "%s","params": {"version": "%s"}}`
data = fmt.Sprintf(data, this.getMsgId(), version)
topic := "/ota/device/inform/" + this.ProductKey + "/" + this.DeviceName
this.Publish(topic, 0, false, []byte(data))
}
/***
* 子设备登陆 这个函数会一直执行 ,所以在运行时要加上 go iot.PublishSubLogin
*/
func (this *Iot) PublishSubLogin(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s","clientId":"%s","sign":"%s","timestamp":"%s","signMethod":"hmacSha1","cleanSession":"false"}}`
data = fmt.Sprintf(data, "ababab", subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/ext/session/" + this.ProductKey + "/" + this.DeviceName + "/combine/login"
this.Publish(topic, 1, true, []byte(data))
}
/***
* 子设备登陆回调函数
*/
func (this *Iot) SubscribeSubLoginReply() {
topic_reply := "/ext/session/" + this.ProductKey + "/" + this.DeviceName + "/combine/login_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg := common_types.LoginResponse{}
err := json.Unmarshal(message.Payload(), &msg)
if err != nil {
this.writeLog("error", "SubLogin_reply Json 解析失败"+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubLogin_reply Json 登陆失败"+msg.Message)
return
}
this.writeLog("info", "SubLogin_reply "+msg.Data.DeviceName+" 登陆成功"+msg.Message)
/*订阅主题*/
this.SubscribeSubGet(msg.Data.ProductKey, msg.Data.DeviceName)
})
}
/***
* 子设备下线
*/
func (this *Iot) PublishSubLoginOut(subProductKey, subDeviceName string) {
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s",}}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName)
topci := "/" + this.ProductKey + "/" + this.DeviceName + "/combine/logout"
topci_reply := "/" + this.ProductKey + "/" + this.DeviceName + "/combine/logout_reply"
this.Publish(topci, 1, false, []byte(data))
this.Subscribe(topci_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubLoginOut :"+subDeviceName+" "+err.Error())
return
}
if msg.Code != 200 {
this.writeLog("error", "SubLoginOut :"+subDeviceName+" "+msg.Message)
return
}
})
}
/***
* 计算签名
*/
func (this *Iot) GetSign(productKey, deviceName, deviceSecret string) (string, string) {
timestamp := strconv.Itoa(int(time.Now().Unix()))
str := "clientId" + deviceName + "deviceName" + deviceName + "productKey" + productKey + "timestamp" + timestamp;
key := []byte(deviceSecret)
hmacHandel := hmac.New(sha1.New, key)
hmacHandel.Write([]byte(str))
res := hmacHandel.Sum(nil)
return hex.EncodeToString(res), timestamp
}
/***
* 获取一个唯一的消息Id
*/
func (this *Iot) getMsgId() string {
return strconv.Itoa(int(time.Now().UnixNano()))
}
func (this *Iot) SetLogOutPut(writer interfaces.ModuleLogger) {
this.logOut = writer
}
func (this *Iot) writeLog(logType, Content string) {
switch logType {
case "warning":
this.logOut.Warning(Content)
break;
case "info":
this.logOut.Info(Content)
break
case "error":
this.logOut.Error(Content)
break;
case "debug":
this.logOut.Debug(Content)
break
default:
this.logOut.Info(Content)
break
}
}
func (this *Iot) Write(data []byte) (int, error) {
if this.Conn.IsConnected() {
this.PublishRaw(data)
}
return 0, nil
}
var appendSubDevicesMutex = sync.Mutex{}
/***
* 添加子设备
*/
func (this *Iot) AppendSubDevice(subProductKey, subDeviceName, subDeviceSecret string) (Device) {
subDevice := Device{}
subDevice.ProductKey = subProductKey
subDevice.DeviceName = subDeviceName
subDevice.DeviceSecret = subDeviceSecret
appendSubDevicesMutex.Lock()
this.SubDevices = append(this.SubDevices, subDevice)
appendSubDevicesMutex.Unlock()
return subDevice
}
func (this *Iot) PublishRaw(data []byte) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/update"
this.Publish(topic, 1, false, data)
}
func (this *Iot) PublishLog(log []byte) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/update"
type Log struct {
Timestamp int64 `json:"timestamp"`
Event string `json:"event"`
Data string `json:"data"`
}
logData := Log{}
logData.Timestamp = time.Now().Unix()
logData.Data = string(log)
logData.Event = "log"
data, err := json.Marshal(logData)
if err != nil {
|
this.Publish(topic, 1, false, data)
}
func (this *Iot) SyncUpgradeFile(data common_types.UpdateResponse) {
fileContent := `%s
%s
%s`
fileContent = fmt.Sprintf(fileContent, data.Data.Md5, data.Data.Url, data.Data.Version)
var file *os.File;
var err error;
flag := false
for i := 5; i > 0; i-- {
file, err = os.OpenFile("upgrade.plan", os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0660)
if err != nil {
continue
}
flag = true
time.Sleep(time.Second)
break
}
if !flag {
this.writeLog("error", "SyncUpgradeFile: "+err.Error())
return
}
defer file.Close()
file.Write([]byte(fileContent))
}
| return
} | identifier_name |
mqtt.go | package mqttModule
import (
"github.com/eclipse/paho.mqtt.golang"
"fmt"
"time"
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"strconv"
"encoding/json"
"os"
"sync"
interfaces "github.com/gw123/GMQ/core/interfaces"
"github.com/gw123/GMQ/common/common_types"
)
const MaxMsgLen = 100
type MsgIds struct {
Ids []string
head int
tail int
}
func NewMsgIds() *MsgIds {
this := new(MsgIds)
this.head = 0
this.Ids = make([]string, MaxMsgLen)
return this
}
var msgIdsLock = sync.Mutex{}
func (msgIds *MsgIds) Check(msgId string) bool {
msgIdsLock.Lock()
defer msgIdsLock.Unlock()
for _, id1 := range msgIds.Ids {
if id1 == msgId {
return false
}
}
if msgIds.head >= MaxMsgLen {
msgIds.head = 0
}
msgIds.Ids[msgIds.head] = msgId
msgIds.head++;
return true
}
var msgIds *MsgIds
func init() {
msgIds = NewMsgIds()
}
type Device struct {
DeviceName string
ProductKey string
DeviceSecret string
IsLogin bool
Status uint
LasUsedtime int64
}
type Iot struct {
Host string
DeviceName string
ProductKey string
ClientId string
Username string
Password string
Sign string
Conn mqtt.Client
logOut interfaces.ModuleLogger
App interfaces.App
SubDevices []Device
}
type Params struct {
ProductKey string
DeviceName string
DeviceSecret string
OnConnectHandler mqtt.OnConnectHandler
ConnectionLostHandler mqtt.ConnectionLostHandler
Logger interfaces.ModuleLogger
App interfaces.App
DefaultHandel mqtt.MessageHandler
}
func NewIot(params Params) (iot *Iot) {
iot = new(Iot)
iot.SubDevices = make([]Device, 0)
sign, timestamp := iot.GetSign(params.ProductKey, params.DeviceName, params.DeviceSecret)
iot.Password = sign
iot.ClientId = params.DeviceName + "|securemode=3,signmethod=hmacsha1,timestamp=" + timestamp + "|"
iot.Username = params.DeviceName + "&" + params.ProductKey
iot.DeviceName = params.DeviceName
iot.ProductKey = params.ProductKey
iot.Host = params.ProductKey + ".iot-as-mqtt.cn-shanghai.aliyuncs.com:1883"
opts := mqtt.NewClientOptions().AddBroker(iot.Host).SetClientID(iot.ClientId).SetUsername(iot.Username).SetPassword(iot.Password)
opts.SetPingTimeout(5 * time.Second)
opts.SetKeepAlive(30 * time.Second)
opts.SetCleanSession(false)
opts.SetAutoReconnect(true)
opts.SetConnectionLostHandler(params.ConnectionLostHandler)
opts.SetDefaultPublishHandler(params.DefaultHandel)
opts.SetOnConnectHandler(params.OnConnectHandler)
opts.SetMaxReconnectInterval(2 * time.Minute)
iot.App = params.App
iot.logOut = params.Logger
iot.Conn = mqtt.NewClient(opts)
return
}
func (this *Iot) Connect() (err error) {
c := this.Conn
if token := c.Connect(); token.Wait() && token.Error() != nil {
return token.Error()
}
return nil
}
func (this *Iot) Close() {
if this.Conn.IsConnected() {
this.Conn.Disconnect(256)
}
}
func (this *Iot) Publish(topic string, qos byte, retained bool, payload interface{}) error {
this.Conn.Publish(topic, qos, retained, payload)
return nil
}
func (this *Iot) Subscribe(topic string, qos byte, callback mqtt.MessageHandler) error {
if token := this.Conn.Subscribe(topic, qos, func(client mqtt.Client, message mqtt.Message) {
callback(client, message)
}); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeAndCheck(topic string, qos byte) error {
if token := this.Conn.Subscribe(topic, qos, this.SubscribeGetCallback); token.Wait() && token.Error() != nil {
this.writeLog("error", "Token publish: "+token.Error().Error())
return token.Error()
}
return nil
}
func (this *Iot) SubscribeGetCallback(client mqtt.Client, message mqtt.Message) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/get"
msg := &AliMsg{}
err := json.Unmarshal(message.Payload(), msg)
if err != nil {
this.writeLog("error", "Topic "+topic+" 消息解密失败 "+err.Error()+" Payload: "+string(message.Payload()))
return
}
if !msgIds.Check(msg.MsgId) {
this.writeLog("warning", "msgId "+msg.MsgId+" Topic"+topic+" 重复消息")
return
}
event := common_types.NewEvent(msg.MsgId ,message.Payload())
this.App.Pub(event)
}
/***
* 订阅get消息
*/
func (this *Iot) SubscribeGet() {
topic := "/" + this.Produ | *Iot) SubscribeSubGet(subProductKey, subDeviceName string) {
topic := "/" + subProductKey + "/" + subDeviceName + "/get"
this.SubscribeAndCheck(topic, 0)
}
/***
* 子设备注册
*/
func (this *Iot) PublishSubRegister(subProductKey, subDeviceName string) {
data := "{'id': '%s', 'version':'1.0','params':[{'deviceName':'%s','productKey':'%s'}],'method':'thing.sub.register'}"
data = fmt.Sprintf(data, this.getMsgId(), subDeviceName, subProductKey)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register"
this.Publish(topic, 0, false, []byte(data))
}
func (this *Iot) SubscribeSubRegisterReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/sub/register_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubRegister_reply json内容解析失败 "+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubRegister_reply 子设备注册失败 "+msg.Message)
return
}
v, ok := msg.Data.([]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败 "+string(message.Payload()))
return
}
for _, deviceData := range v {
deviceInfo, ok := deviceData.(map[string]interface{})
if !ok {
this.writeLog("error", "SubRegister_reply json内容解析失败->data解析失败->不能转为map"+string(message.Payload()))
continue
}
deviceSecret, _ := deviceInfo["deviceSecret"].(string)
productKey, _ := deviceInfo["productKey"].(string)
deviceName, _ := deviceInfo["deviceName"].(string)
this.writeLog("info", "SubRegister_reply 注册成功: "+deviceName)
go this.SubDeviceLogin(productKey, deviceName, deviceSecret)
}
})
}
func (this *Iot) SubDeviceLogin(productKey, deviceName, deviceSecret string) {
this.AppendSubDevice(productKey, deviceName, deviceSecret)
this.PublishSubAdd(productKey, deviceName, deviceSecret)
time.Sleep(time.Second * 4)
this.PublishSubLogin(productKey, deviceName, deviceSecret)
}
/***
* 添加子设备
*/
func (this *Iot) PublishSubAdd(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","version":"1.0","params":[{"productKey" : "%s","deviceName" : "%s","clientId":"%s","sign":"%s","signmethod":"hmacSha1","timestamp":"%s"}],"method":"thing.topo.add"}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add"
this.SubscribeSubAddReply()
this.Publish(topic, 0, true, []byte(data))
}
func (this *Iot) SubscribeSubAddReply() {
topic_reply := "/sys/" + this.ProductKey + "/" + this.DeviceName + "/thing/topo/add_reply"
this.Subscribe(topic_reply, 0, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "PublishSubAdd "+"JSON解析失败")
return
}
if msg.Code != 200 {
this.writeLog("error", "PublishSubAdd "+msg.Message)
}
this.writeLog("info", "PublishSubAdd 子设备拓扑添加成功")
return
})
}
/***
step [1, 100] -1:代表升级失败 -2:代表下载失败 -3:代表校验失败 -4:代表烧写失败
desc 进度信息
*/
func (this *Iot) PublishProgress(step int8, desc string) {
topic := "/ota/device/progress/" + this.ProductKey + "/" + this.DeviceName
data := `{ "id": "%s", "params": {"step":"%d", "desc":" %s"}}`
data = fmt.Sprintf(data, this.getMsgId(), step, desc)
this.Publish(topic, 1, false, []byte(data))
}
func (this *Iot) SubscribeUpgrade() {
topic := "/ota/device/upgrade/" + this.ProductKey + "/" + this.DeviceName
this.Subscribe(topic, 1, this.SubscribeUpgradeCallback)
}
func (this *Iot) SubscribeUpgradeCallback(client mqtt.Client, message mqtt.Message) {
//fmt.Println("SubscribeUpgradeCallback", message.Topic(), string(message.Payload()))
update := common_types.UpdateResponse{}
err := json.Unmarshal(message.Payload(), &update)
if err != nil {
this.writeLog("error", "SubscribeUpgrade"+"Json fail "+err.Error())
return
}
if update.Message != "success" {
this.writeLog("error", "SubscribeUpgrade "+update.Message)
return
}
this.SyncUpgradeFile(update)
}
/***
* 上报设备版本信息
*/
func (this *Iot) PublishInform(version string) {
data := `{"id": "%s","params": {"version": "%s"}}`
data = fmt.Sprintf(data, this.getMsgId(), version)
topic := "/ota/device/inform/" + this.ProductKey + "/" + this.DeviceName
this.Publish(topic, 0, false, []byte(data))
}
/***
* 子设备登陆 这个函数会一直执行 ,所以在运行时要加上 go iot.PublishSubLogin
*/
func (this *Iot) PublishSubLogin(subProductKey, subDeviceName, subDeviceSecret string) {
sign, timestamp := this.GetSign(subProductKey, subDeviceName, subDeviceSecret)
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s","clientId":"%s","sign":"%s","timestamp":"%s","signMethod":"hmacSha1","cleanSession":"false"}}`
data = fmt.Sprintf(data, "ababab", subProductKey, subDeviceName, subDeviceName, sign, timestamp)
topic := "/ext/session/" + this.ProductKey + "/" + this.DeviceName + "/combine/login"
this.Publish(topic, 1, true, []byte(data))
}
/***
* 子设备登陆回调函数
*/
func (this *Iot) SubscribeSubLoginReply() {
topic_reply := "/ext/session/" + this.ProductKey + "/" + this.DeviceName + "/combine/login_reply"
this.Subscribe(topic_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg := common_types.LoginResponse{}
err := json.Unmarshal(message.Payload(), &msg)
if err != nil {
this.writeLog("error", "SubLogin_reply Json 解析失败"+string(message.Payload()))
return
}
if msg.Code != 200 {
this.writeLog("error", "SubLogin_reply Json 登陆失败"+msg.Message)
return
}
this.writeLog("info", "SubLogin_reply "+msg.Data.DeviceName+" 登陆成功"+msg.Message)
/*订阅主题*/
this.SubscribeSubGet(msg.Data.ProductKey, msg.Data.DeviceName)
})
}
/***
* 子设备下线
*/
func (this *Iot) PublishSubLoginOut(subProductKey, subDeviceName string) {
data := `{"id":"%s","params":{"productKey":"%s","deviceName":"%s",}}`
data = fmt.Sprintf(data, this.getMsgId(), subProductKey, subDeviceName)
topci := "/" + this.ProductKey + "/" + this.DeviceName + "/combine/logout"
topci_reply := "/" + this.ProductKey + "/" + this.DeviceName + "/combine/logout_reply"
this.Publish(topci, 1, false, []byte(data))
this.Subscribe(topci_reply, 1, func(client mqtt.Client, message mqtt.Message) {
msg, err := common_types.ParseAliMsg(message.Payload())
if err != nil {
this.writeLog("error", "SubLoginOut :"+subDeviceName+" "+err.Error())
return
}
if msg.Code != 200 {
this.writeLog("error", "SubLoginOut :"+subDeviceName+" "+msg.Message)
return
}
})
}
/***
* 计算签名
*/
func (this *Iot) GetSign(productKey, deviceName, deviceSecret string) (string, string) {
timestamp := strconv.Itoa(int(time.Now().Unix()))
str := "clientId" + deviceName + "deviceName" + deviceName + "productKey" + productKey + "timestamp" + timestamp;
key := []byte(deviceSecret)
hmacHandel := hmac.New(sha1.New, key)
hmacHandel.Write([]byte(str))
res := hmacHandel.Sum(nil)
return hex.EncodeToString(res), timestamp
}
/***
* 获取一个唯一的消息Id
*/
func (this *Iot) getMsgId() string {
return strconv.Itoa(int(time.Now().UnixNano()))
}
func (this *Iot) SetLogOutPut(writer interfaces.ModuleLogger) {
this.logOut = writer
}
func (this *Iot) writeLog(logType, Content string) {
switch logType {
case "warning":
this.logOut.Warning(Content)
break;
case "info":
this.logOut.Info(Content)
break
case "error":
this.logOut.Error(Content)
break;
case "debug":
this.logOut.Debug(Content)
break
default:
this.logOut.Info(Content)
break
}
}
func (this *Iot) Write(data []byte) (int, error) {
if this.Conn.IsConnected() {
this.PublishRaw(data)
}
return 0, nil
}
var appendSubDevicesMutex = sync.Mutex{}
/***
* 添加子设备
*/
func (this *Iot) AppendSubDevice(subProductKey, subDeviceName, subDeviceSecret string) (Device) {
subDevice := Device{}
subDevice.ProductKey = subProductKey
subDevice.DeviceName = subDeviceName
subDevice.DeviceSecret = subDeviceSecret
appendSubDevicesMutex.Lock()
this.SubDevices = append(this.SubDevices, subDevice)
appendSubDevicesMutex.Unlock()
return subDevice
}
func (this *Iot) PublishRaw(data []byte) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/update"
this.Publish(topic, 1, false, data)
}
func (this *Iot) PublishLog(log []byte) {
topic := "/" + this.ProductKey + "/" + this.DeviceName + "/update"
type Log struct {
Timestamp int64 `json:"timestamp"`
Event string `json:"event"`
Data string `json:"data"`
}
logData := Log{}
logData.Timestamp = time.Now().Unix()
logData.Data = string(log)
logData.Event = "log"
data, err := json.Marshal(logData)
if err != nil {
return
}
this.Publish(topic, 1, false, data)
}
func (this *Iot) SyncUpgradeFile(data common_types.UpdateResponse) {
fileContent := `%s
%s
%s`
fileContent = fmt.Sprintf(fileContent, data.Data.Md5, data.Data.Url, data.Data.Version)
var file *os.File;
var err error;
flag := false
for i := 5; i > 0; i-- {
file, err = os.OpenFile("upgrade.plan", os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0660)
if err != nil {
continue
}
flag = true
time.Sleep(time.Second)
break
}
if !flag {
this.writeLog("error", "SyncUpgradeFile: "+err.Error())
return
}
defer file.Close()
file.Write([]byte(fileContent))
}
| ctKey + "/" + this.DeviceName + "/get"
this.SubscribeAndCheck(topic, 1)
}
/***
* 子设备
*/
func (this | identifier_body |
MapTilingScheme.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Bentley Systems, Incorporated. All rights reserved.
* See LICENSE.md in the project root for license terms and full copyright notice.
*--------------------------------------------------------------------------------------------*/
/** @packageDocumentation
* @module Tile
*/
import { assert } from "@bentley/bentleyjs-core";
import {
Cartographic,
EcefLocation,
} from "@bentley/imodeljs-common";
import {
Angle,
Point2d,
Point3d,
Range2d,
Transform,
Vector3d,
} from "@bentley/geometry-core";
import { IModelConnection } from "../IModelConnection";
/** @internal */
export class QuadId {
public level: number;
public column: number;
public row: number;
public get isValid() { return this.level >= 0; }
private static _scratchCartographic = new Cartographic();
public static createFromContentId(stringId: string) {
const idParts = stringId.split("_");
if (3 !== idParts.length) {
assert(false, "Invalid quad tree ID");
return new QuadId(-1, -1, -1);
}
return new QuadId(parseInt(idParts[0], 10), parseInt(idParts[1], 10), parseInt(idParts[2], 10));
}
public get contentId(): string { return this.level + "_" + this.column + "_" + this.row; }
public constructor(level: number, column: number, row: number) {
this.level = level;
this.column = column;
this.row = row;
}
// Not used in display - used only to tell whether this tile overlaps the range provided by a tile provider for attribution.
public getLatLongRange(mapTilingScheme: MapTilingScheme): Range2d {
const range = Range2d.createNull();
mapTilingScheme.tileXYToCartographic(this.column, this.row, this.level, QuadId._scratchCartographic);
range.extendXY(QuadId._scratchCartographic.longitude * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
mapTilingScheme.tileXYToCartographic(this.column + 1, this.row + 1, this.level, QuadId._scratchCartographic);
range.extendXY(QuadId._scratchCartographic.longitude * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
return range;
}
}
/** @internal */
export class MapTileRectangle extends Range2d {
public constructor(west = 0, south = 0, east = 0, north = 0) {
super(west, south, east, north);
}
public static create(west = 0, south = 0, east = 0, north = 0, result?: MapTileRectangle): MapTileRectangle {
if (!result)
return new MapTileRectangle(west, south, east, north);
result.init(west, south, east, north);
return result;
}
public get west() { return this.low.x; }
public set west(x: number) { this.low.x = x; }
public get south() { return this.low.y; }
public set south(y: number) { this.low.y = y; }
public get east() { return this.high.x; }
public set east(x: number) { this.high.x = x; }
public get north() { return this.high.y; }
public set north(y: number) { this.high.y = y; }
public init(west = 0, south = 0, east = 0, north = 0) {
this.west = west;
this.south = south;
this.east = east;
this.north = north;
}
public containsCartographic(carto: Cartographic) { return this.containsXY(carto.longitude, carto.latitude); }
public getCenter(result?: Cartographic): Cartographic {
return Cartographic.fromRadians((this.west + this.east) / 2, (this.north + this.south) / 2, 0, result);
}
}
/** @internal */
export abstract class MapTilingScheme {
private _scratchFraction = Point2d.createZero();
/**
* @param longitude in radians (-pi to pi)
*/
public longitudeToXFraction(longitude: number) {
return longitude / Angle.pi2Radians + .5;
}
/**
* Return longitude in radians (-pi to pi from fraction).
* @param xFraction
*/
public xFractionToLongitude(xFraction: number) {
return Angle.pi2Radians * (xFraction - .5);
}
public abstract yFractionToLatitude(yFraction: number): number;
public abstract latitudeToYFraction(latitude: number): number;
protected constructor(public readonly numberOfLevelZeroTilesX: number, public readonly numberOfLevelZeroTilesY: number, private _rowZeroAtTop: boolean) { }
/**
* Gets the total number of tiles in the X direction at a specified level-of-detail.
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the X direction at the given level.
*/
public getNumberOfXTilesAtLevel(level: number) {
return 0 === level ? 1 : this.numberOfLevelZeroTilesX << (level - 1);
}
/**
* Gets the total number of tiles in the Y direction at a specified level-of-detail.
*
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the Y direction at the given level.
*/
public getNumberOfYTilesAtLevel(level: number): number {
return (0 === level) ? 1 : this.numberOfLevelZeroTilesY << (level - 1);
}
|
public tileYToFraction(y: number, level: number): number {
let yFraction = y / this.getNumberOfYTilesAtLevel(level);
if (this._rowZeroAtTop)
yFraction = 1.0 - yFraction;
return yFraction;
}
public tileXToLongitude(x: number, level: number) {
return this.xFractionToLongitude(this.tileXToFraction(x, level));
}
public tileYToLatitude(y: number, level: number) {
return this.yFractionToLatitude(this.tileYToFraction(y, level));
}
/**
* Gets the fraction of the normalized (0-1) coordinates with at left, bottom.
*
* @param x column
* @param y row
* @param level depth
* @param result result (0-1 from left, bottom
*/
public tileXYToFraction(x: number, y: number, level: number, result?: Point2d): Point2d {
if (undefined === result)
result = Point2d.createZero();
result.x = this.tileXToFraction(x, level);
result.y = this.tileYToFraction(y, level);
return result;
}
/**
*
* @param x column
* @param y row
* @param level depth
* @param result result longitude, latitude.
* @param height height (optional)
*/
public tileXYToCartographic(x: number, y: number, level: number, result: Cartographic, height?: number): Cartographic {
this.tileXYToFraction(x, y, level, this._scratchFraction);
return this.fractionToCartographic(this._scratchFraction.x, this._scratchFraction.y, result, height);
}
public tileXYToRectangle(x: number, y: number, level: number, result?: MapTileRectangle) {
return MapTileRectangle.create(this.tileXToLongitude(x, level), this.tileYToLatitude(y, level), this.tileXToLongitude(x + 1, level), this.tileYToLatitude(y + 1, level), result);
}
/**
*
* @param xFraction
* @param yFraction
* @param result
* @param height
*/
public fractionToCartographic(xFraction: number, yFraction: number, result: Cartographic, height?: number): Cartographic {
result.longitude = this.xFractionToLongitude(xFraction);
result.latitude = this.yFractionToLatitude(yFraction);
result.height = undefined === height ? 0.0 : height;
return result;
}
public cartographicToFraction(latitudeRadians: number, longitudeRadians: number, result: Point2d): Point2d {
result.x = this.longitudeToXFraction(longitudeRadians);
result.y = this.latitudeToYFraction(latitudeRadians);
return result;
}
// gets the longitude and latitude into a point with coordinates between 0 and 1
public ecefToPixelFraction(point: Point3d): Point3d {
const cartoGraphic = Cartographic.fromEcef(point)!;
return Point3d.create(this.longitudeToXFraction(cartoGraphic.longitude), this.latitudeToYFraction(cartoGraphic.latitude), 0.0);
}
public computeMercatorFractionToDb(iModel: IModelConnection, groundBias: number): Transform {
const ecefLocation: EcefLocation = iModel.ecefLocation!;
const dbToEcef = ecefLocation.getTransform();
const projectCenter = Point3d.create(iModel.projectExtents.center.x, iModel.projectExtents.center.y, groundBias);
const projectEast = Point3d.create(projectCenter.x + 1.0, projectCenter.y, groundBias);
const projectNorth = Point3d.create(projectCenter.x, projectCenter.y + 1.0, groundBias);
const mercatorOrigin = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectCenter));
const mercatorX = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectEast));
const mercatorY = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectNorth));
const deltaX = Vector3d.createStartEnd(mercatorOrigin, mercatorX);
const deltaY = Vector3d.createStartEnd(mercatorOrigin, mercatorY);
const dbToMercator = Transform.createOriginAndMatrixColumns(mercatorOrigin, deltaX, deltaY, Vector3d.create(0.0, 0.0, 1.0)).multiplyTransformTransform(Transform.createTranslationXYZ(-projectCenter.x, -projectCenter.y, -groundBias));
return dbToMercator.inverse() as Transform;
}
}
/** @internal */
export class GeographicTilingScheme extends MapTilingScheme {
public constructor(numberOfLevelZeroTilesX: number = 2, numberOfLevelZeroTilesY: number = 1, rowZeroAtTop: boolean = false) {
super(numberOfLevelZeroTilesX, numberOfLevelZeroTilesY, rowZeroAtTop);
}
public yFractionToLatitude(yFraction: number): number {
return Math.PI * (yFraction - .5);
}
public latitudeToYFraction(latitude: number): number {
return .5 + latitude / Math.PI;
}
}
/** @internal */
export class WebMercatorTilingScheme extends MapTilingScheme {
public constructor(numberOfLevelZeroTilesX: number = 2, numberOfLevelZeroTilesY: number = 2, rowZeroAtTop: boolean = false) {
super(numberOfLevelZeroTilesX, numberOfLevelZeroTilesY, rowZeroAtTop);
}
public yFractionToLatitude(yFraction: number): number {
const mercatorAngle = Angle.pi2Radians * (yFraction - .5);
return Angle.piOver2Radians - (2.0 * Math.atan(Math.exp(mercatorAngle)));
}
public latitudeToYFraction(latitude: number): number {
const sinLatitude = Math.sin(latitude);
return (0.5 - Math.log((1.0 + sinLatitude) / (1.0 - sinLatitude)) / (4.0 * Angle.piRadians)); // https://msdn.microsoft.com/en-us/library/bb259689.aspx
}
} | public tileXToFraction(x: number, level: number): number {
return x / this.getNumberOfXTilesAtLevel(level);
}
| random_line_split |
MapTilingScheme.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Bentley Systems, Incorporated. All rights reserved.
* See LICENSE.md in the project root for license terms and full copyright notice.
*--------------------------------------------------------------------------------------------*/
/** @packageDocumentation
* @module Tile
*/
import { assert } from "@bentley/bentleyjs-core";
import {
Cartographic,
EcefLocation,
} from "@bentley/imodeljs-common";
import {
Angle,
Point2d,
Point3d,
Range2d,
Transform,
Vector3d,
} from "@bentley/geometry-core";
import { IModelConnection } from "../IModelConnection";
/** @internal */
export class QuadId {
public level: number;
public column: number;
public row: number;
public get | () { return this.level >= 0; }
private static _scratchCartographic = new Cartographic();
public static createFromContentId(stringId: string) {
const idParts = stringId.split("_");
if (3 !== idParts.length) {
assert(false, "Invalid quad tree ID");
return new QuadId(-1, -1, -1);
}
return new QuadId(parseInt(idParts[0], 10), parseInt(idParts[1], 10), parseInt(idParts[2], 10));
}
public get contentId(): string { return this.level + "_" + this.column + "_" + this.row; }
public constructor(level: number, column: number, row: number) {
this.level = level;
this.column = column;
this.row = row;
}
// Not used in display - used only to tell whether this tile overlaps the range provided by a tile provider for attribution.
public getLatLongRange(mapTilingScheme: MapTilingScheme): Range2d {
const range = Range2d.createNull();
mapTilingScheme.tileXYToCartographic(this.column, this.row, this.level, QuadId._scratchCartographic);
range.extendXY(QuadId._scratchCartographic.longitude * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
mapTilingScheme.tileXYToCartographic(this.column + 1, this.row + 1, this.level, QuadId._scratchCartographic);
range.extendXY(QuadId._scratchCartographic.longitude * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
return range;
}
}
/** @internal */
export class MapTileRectangle extends Range2d {
public constructor(west = 0, south = 0, east = 0, north = 0) {
super(west, south, east, north);
}
public static create(west = 0, south = 0, east = 0, north = 0, result?: MapTileRectangle): MapTileRectangle {
if (!result)
return new MapTileRectangle(west, south, east, north);
result.init(west, south, east, north);
return result;
}
public get west() { return this.low.x; }
public set west(x: number) { this.low.x = x; }
public get south() { return this.low.y; }
public set south(y: number) { this.low.y = y; }
public get east() { return this.high.x; }
public set east(x: number) { this.high.x = x; }
public get north() { return this.high.y; }
public set north(y: number) { this.high.y = y; }
public init(west = 0, south = 0, east = 0, north = 0) {
this.west = west;
this.south = south;
this.east = east;
this.north = north;
}
public containsCartographic(carto: Cartographic) { return this.containsXY(carto.longitude, carto.latitude); }
public getCenter(result?: Cartographic): Cartographic {
return Cartographic.fromRadians((this.west + this.east) / 2, (this.north + this.south) / 2, 0, result);
}
}
/** @internal */
export abstract class MapTilingScheme {
private _scratchFraction = Point2d.createZero();
/**
* @param longitude in radians (-pi to pi)
*/
public longitudeToXFraction(longitude: number) {
return longitude / Angle.pi2Radians + .5;
}
/**
* Return longitude in radians (-pi to pi from fraction).
* @param xFraction
*/
public xFractionToLongitude(xFraction: number) {
return Angle.pi2Radians * (xFraction - .5);
}
public abstract yFractionToLatitude(yFraction: number): number;
public abstract latitudeToYFraction(latitude: number): number;
protected constructor(public readonly numberOfLevelZeroTilesX: number, public readonly numberOfLevelZeroTilesY: number, private _rowZeroAtTop: boolean) { }
/**
* Gets the total number of tiles in the X direction at a specified level-of-detail.
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the X direction at the given level.
*/
public getNumberOfXTilesAtLevel(level: number) {
return 0 === level ? 1 : this.numberOfLevelZeroTilesX << (level - 1);
}
/**
* Gets the total number of tiles in the Y direction at a specified level-of-detail.
*
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the Y direction at the given level.
*/
public getNumberOfYTilesAtLevel(level: number): number {
return (0 === level) ? 1 : this.numberOfLevelZeroTilesY << (level - 1);
}
public tileXToFraction(x: number, level: number): number {
return x / this.getNumberOfXTilesAtLevel(level);
}
public tileYToFraction(y: number, level: number): number {
let yFraction = y / this.getNumberOfYTilesAtLevel(level);
if (this._rowZeroAtTop)
yFraction = 1.0 - yFraction;
return yFraction;
}
public tileXToLongitude(x: number, level: number) {
return this.xFractionToLongitude(this.tileXToFraction(x, level));
}
public tileYToLatitude(y: number, level: number) {
return this.yFractionToLatitude(this.tileYToFraction(y, level));
}
/**
* Gets the fraction of the normalized (0-1) coordinates with at left, bottom.
*
* @param x column
* @param y row
* @param level depth
* @param result result (0-1 from left, bottom
*/
public tileXYToFraction(x: number, y: number, level: number, result?: Point2d): Point2d {
if (undefined === result)
result = Point2d.createZero();
result.x = this.tileXToFraction(x, level);
result.y = this.tileYToFraction(y, level);
return result;
}
/**
*
* @param x column
* @param y row
* @param level depth
* @param result result longitude, latitude.
* @param height height (optional)
*/
public tileXYToCartographic(x: number, y: number, level: number, result: Cartographic, height?: number): Cartographic {
this.tileXYToFraction(x, y, level, this._scratchFraction);
return this.fractionToCartographic(this._scratchFraction.x, this._scratchFraction.y, result, height);
}
public tileXYToRectangle(x: number, y: number, level: number, result?: MapTileRectangle) {
return MapTileRectangle.create(this.tileXToLongitude(x, level), this.tileYToLatitude(y, level), this.tileXToLongitude(x + 1, level), this.tileYToLatitude(y + 1, level), result);
}
/**
*
* @param xFraction
* @param yFraction
* @param result
* @param height
*/
public fractionToCartographic(xFraction: number, yFraction: number, result: Cartographic, height?: number): Cartographic {
result.longitude = this.xFractionToLongitude(xFraction);
result.latitude = this.yFractionToLatitude(yFraction);
result.height = undefined === height ? 0.0 : height;
return result;
}
public cartographicToFraction(latitudeRadians: number, longitudeRadians: number, result: Point2d): Point2d {
result.x = this.longitudeToXFraction(longitudeRadians);
result.y = this.latitudeToYFraction(latitudeRadians);
return result;
}
// gets the longitude and latitude into a point with coordinates between 0 and 1
public ecefToPixelFraction(point: Point3d): Point3d {
const cartoGraphic = Cartographic.fromEcef(point)!;
return Point3d.create(this.longitudeToXFraction(cartoGraphic.longitude), this.latitudeToYFraction(cartoGraphic.latitude), 0.0);
}
public computeMercatorFractionToDb(iModel: IModelConnection, groundBias: number): Transform {
const ecefLocation: EcefLocation = iModel.ecefLocation!;
const dbToEcef = ecefLocation.getTransform();
const projectCenter = Point3d.create(iModel.projectExtents.center.x, iModel.projectExtents.center.y, groundBias);
const projectEast = Point3d.create(projectCenter.x + 1.0, projectCenter.y, groundBias);
const projectNorth = Point3d.create(projectCenter.x, projectCenter.y + 1.0, groundBias);
const mercatorOrigin = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectCenter));
const mercatorX = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectEast));
const mercatorY = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectNorth));
const deltaX = Vector3d.createStartEnd(mercatorOrigin, mercatorX);
const deltaY = Vector3d.createStartEnd(mercatorOrigin, mercatorY);
const dbToMercator = Transform.createOriginAndMatrixColumns(mercatorOrigin, deltaX, deltaY, Vector3d.create(0.0, 0.0, 1.0)).multiplyTransformTransform(Transform.createTranslationXYZ(-projectCenter.x, -projectCenter.y, -groundBias));
return dbToMercator.inverse() as Transform;
}
}
/** @internal */
export class GeographicTilingScheme extends MapTilingScheme {
public constructor(numberOfLevelZeroTilesX: number = 2, numberOfLevelZeroTilesY: number = 1, rowZeroAtTop: boolean = false) {
super(numberOfLevelZeroTilesX, numberOfLevelZeroTilesY, rowZeroAtTop);
}
public yFractionToLatitude(yFraction: number): number {
return Math.PI * (yFraction - .5);
}
public latitudeToYFraction(latitude: number): number {
return .5 + latitude / Math.PI;
}
}
/** @internal */
export class WebMercatorTilingScheme extends MapTilingScheme {
public constructor(numberOfLevelZeroTilesX: number = 2, numberOfLevelZeroTilesY: number = 2, rowZeroAtTop: boolean = false) {
super(numberOfLevelZeroTilesX, numberOfLevelZeroTilesY, rowZeroAtTop);
}
public yFractionToLatitude(yFraction: number): number {
const mercatorAngle = Angle.pi2Radians * (yFraction - .5);
return Angle.piOver2Radians - (2.0 * Math.atan(Math.exp(mercatorAngle)));
}
public latitudeToYFraction(latitude: number): number {
const sinLatitude = Math.sin(latitude);
return (0.5 - Math.log((1.0 + sinLatitude) / (1.0 - sinLatitude)) / (4.0 * Angle.piRadians)); // https://msdn.microsoft.com/en-us/library/bb259689.aspx
}
}
| isValid | identifier_name |
MapTilingScheme.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Bentley Systems, Incorporated. All rights reserved.
* See LICENSE.md in the project root for license terms and full copyright notice.
*--------------------------------------------------------------------------------------------*/
/** @packageDocumentation
* @module Tile
*/
import { assert } from "@bentley/bentleyjs-core";
import {
Cartographic,
EcefLocation,
} from "@bentley/imodeljs-common";
import {
Angle,
Point2d,
Point3d,
Range2d,
Transform,
Vector3d,
} from "@bentley/geometry-core";
import { IModelConnection } from "../IModelConnection";
/** @internal */
export class QuadId {
public level: number;
public column: number;
public row: number;
public get isValid() { return this.level >= 0; }
private static _scratchCartographic = new Cartographic();
public static createFromContentId(stringId: string) {
const idParts = stringId.split("_");
if (3 !== idParts.length) {
assert(false, "Invalid quad tree ID");
return new QuadId(-1, -1, -1);
}
return new QuadId(parseInt(idParts[0], 10), parseInt(idParts[1], 10), parseInt(idParts[2], 10));
}
public get contentId(): string { return this.level + "_" + this.column + "_" + this.row; }
public constructor(level: number, column: number, row: number) {
this.level = level;
this.column = column;
this.row = row;
}
// Not used in display - used only to tell whether this tile overlaps the range provided by a tile provider for attribution.
public getLatLongRange(mapTilingScheme: MapTilingScheme): Range2d {
const range = Range2d.createNull();
mapTilingScheme.tileXYToCartographic(this.column, this.row, this.level, QuadId._scratchCartographic);
range.extendXY(QuadId._scratchCartographic.longitude * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
mapTilingScheme.tileXYToCartographic(this.column + 1, this.row + 1, this.level, QuadId._scratchCartographic);
range.extendXY(QuadId._scratchCartographic.longitude * Angle.degreesPerRadian, QuadId._scratchCartographic.latitude * Angle.degreesPerRadian);
return range;
}
}
/** @internal */
export class MapTileRectangle extends Range2d {
public constructor(west = 0, south = 0, east = 0, north = 0) {
super(west, south, east, north);
}
public static create(west = 0, south = 0, east = 0, north = 0, result?: MapTileRectangle): MapTileRectangle {
if (!result)
return new MapTileRectangle(west, south, east, north);
result.init(west, south, east, north);
return result;
}
public get west() { return this.low.x; }
public set west(x: number) { this.low.x = x; }
public get south() { return this.low.y; }
public set south(y: number) { this.low.y = y; }
public get east() { return this.high.x; }
public set east(x: number) { this.high.x = x; }
public get north() { return this.high.y; }
public set north(y: number) { this.high.y = y; }
public init(west = 0, south = 0, east = 0, north = 0) {
this.west = west;
this.south = south;
this.east = east;
this.north = north;
}
public containsCartographic(carto: Cartographic) { return this.containsXY(carto.longitude, carto.latitude); }
public getCenter(result?: Cartographic): Cartographic {
return Cartographic.fromRadians((this.west + this.east) / 2, (this.north + this.south) / 2, 0, result);
}
}
/** @internal */
export abstract class MapTilingScheme {
private _scratchFraction = Point2d.createZero();
/**
* @param longitude in radians (-pi to pi)
*/
public longitudeToXFraction(longitude: number) {
return longitude / Angle.pi2Radians + .5;
}
/**
* Return longitude in radians (-pi to pi from fraction).
* @param xFraction
*/
public xFractionToLongitude(xFraction: number) {
return Angle.pi2Radians * (xFraction - .5);
}
public abstract yFractionToLatitude(yFraction: number): number;
public abstract latitudeToYFraction(latitude: number): number;
protected constructor(public readonly numberOfLevelZeroTilesX: number, public readonly numberOfLevelZeroTilesY: number, private _rowZeroAtTop: boolean) { }
/**
* Gets the total number of tiles in the X direction at a specified level-of-detail.
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the X direction at the given level.
*/
public getNumberOfXTilesAtLevel(level: number) {
return 0 === level ? 1 : this.numberOfLevelZeroTilesX << (level - 1);
}
/**
* Gets the total number of tiles in the Y direction at a specified level-of-detail.
*
*
* @param {Number} level The level-of-detail. Level 0 is the root tile.
* @returns {Number} The number of tiles in the Y direction at the given level.
*/
public getNumberOfYTilesAtLevel(level: number): number {
return (0 === level) ? 1 : this.numberOfLevelZeroTilesY << (level - 1);
}
public tileXToFraction(x: number, level: number): number {
return x / this.getNumberOfXTilesAtLevel(level);
}
public tileYToFraction(y: number, level: number): number {
let yFraction = y / this.getNumberOfYTilesAtLevel(level);
if (this._rowZeroAtTop)
yFraction = 1.0 - yFraction;
return yFraction;
}
public tileXToLongitude(x: number, level: number) {
return this.xFractionToLongitude(this.tileXToFraction(x, level));
}
public tileYToLatitude(y: number, level: number) {
return this.yFractionToLatitude(this.tileYToFraction(y, level));
}
/**
* Gets the fraction of the normalized (0-1) coordinates with at left, bottom.
*
* @param x column
* @param y row
* @param level depth
* @param result result (0-1 from left, bottom
*/
public tileXYToFraction(x: number, y: number, level: number, result?: Point2d): Point2d {
if (undefined === result)
result = Point2d.createZero();
result.x = this.tileXToFraction(x, level);
result.y = this.tileYToFraction(y, level);
return result;
}
/**
*
* @param x column
* @param y row
* @param level depth
* @param result result longitude, latitude.
* @param height height (optional)
*/
public tileXYToCartographic(x: number, y: number, level: number, result: Cartographic, height?: number): Cartographic {
this.tileXYToFraction(x, y, level, this._scratchFraction);
return this.fractionToCartographic(this._scratchFraction.x, this._scratchFraction.y, result, height);
}
public tileXYToRectangle(x: number, y: number, level: number, result?: MapTileRectangle) {
return MapTileRectangle.create(this.tileXToLongitude(x, level), this.tileYToLatitude(y, level), this.tileXToLongitude(x + 1, level), this.tileYToLatitude(y + 1, level), result);
}
/**
*
* @param xFraction
* @param yFraction
* @param result
* @param height
*/
public fractionToCartographic(xFraction: number, yFraction: number, result: Cartographic, height?: number): Cartographic {
result.longitude = this.xFractionToLongitude(xFraction);
result.latitude = this.yFractionToLatitude(yFraction);
result.height = undefined === height ? 0.0 : height;
return result;
}
public cartographicToFraction(latitudeRadians: number, longitudeRadians: number, result: Point2d): Point2d {
result.x = this.longitudeToXFraction(longitudeRadians);
result.y = this.latitudeToYFraction(latitudeRadians);
return result;
}
// gets the longitude and latitude into a point with coordinates between 0 and 1
public ecefToPixelFraction(point: Point3d): Point3d {
const cartoGraphic = Cartographic.fromEcef(point)!;
return Point3d.create(this.longitudeToXFraction(cartoGraphic.longitude), this.latitudeToYFraction(cartoGraphic.latitude), 0.0);
}
public computeMercatorFractionToDb(iModel: IModelConnection, groundBias: number): Transform {
const ecefLocation: EcefLocation = iModel.ecefLocation!;
const dbToEcef = ecefLocation.getTransform();
const projectCenter = Point3d.create(iModel.projectExtents.center.x, iModel.projectExtents.center.y, groundBias);
const projectEast = Point3d.create(projectCenter.x + 1.0, projectCenter.y, groundBias);
const projectNorth = Point3d.create(projectCenter.x, projectCenter.y + 1.0, groundBias);
const mercatorOrigin = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectCenter));
const mercatorX = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectEast));
const mercatorY = this.ecefToPixelFraction(dbToEcef.multiplyPoint3d(projectNorth));
const deltaX = Vector3d.createStartEnd(mercatorOrigin, mercatorX);
const deltaY = Vector3d.createStartEnd(mercatorOrigin, mercatorY);
const dbToMercator = Transform.createOriginAndMatrixColumns(mercatorOrigin, deltaX, deltaY, Vector3d.create(0.0, 0.0, 1.0)).multiplyTransformTransform(Transform.createTranslationXYZ(-projectCenter.x, -projectCenter.y, -groundBias));
return dbToMercator.inverse() as Transform;
}
}
/** @internal */
export class GeographicTilingScheme extends MapTilingScheme {
public constructor(numberOfLevelZeroTilesX: number = 2, numberOfLevelZeroTilesY: number = 1, rowZeroAtTop: boolean = false) {
super(numberOfLevelZeroTilesX, numberOfLevelZeroTilesY, rowZeroAtTop);
}
public yFractionToLatitude(yFraction: number): number {
return Math.PI * (yFraction - .5);
}
public latitudeToYFraction(latitude: number): number {
return .5 + latitude / Math.PI;
}
}
/** @internal */
export class WebMercatorTilingScheme extends MapTilingScheme {
public constructor(numberOfLevelZeroTilesX: number = 2, numberOfLevelZeroTilesY: number = 2, rowZeroAtTop: boolean = false) |
public yFractionToLatitude(yFraction: number): number {
const mercatorAngle = Angle.pi2Radians * (yFraction - .5);
return Angle.piOver2Radians - (2.0 * Math.atan(Math.exp(mercatorAngle)));
}
public latitudeToYFraction(latitude: number): number {
const sinLatitude = Math.sin(latitude);
return (0.5 - Math.log((1.0 + sinLatitude) / (1.0 - sinLatitude)) / (4.0 * Angle.piRadians)); // https://msdn.microsoft.com/en-us/library/bb259689.aspx
}
}
| {
super(numberOfLevelZeroTilesX, numberOfLevelZeroTilesY, rowZeroAtTop);
} | identifier_body |
exceptions.py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from common.errorcodes import ErrorCode
from common.exceptions import BaseAPIError
from django.utils.translation import ugettext_lazy as _
| MODULE_CODE = ErrorCode.BKDATA_AUTH
class AuthCode:
# 错误码映射表,目前以三元组定义,包含了错误码、描述和处理方式(可选)
SDK_PERMISSION_DENIED_ERR = ("001", _("资源访问权限不足"))
SDK_PARAM_MISS_ERR = ("002", _("认证参数缺失"), _("请查看API请求文档"))
SDK_AUTHENTICATION_ERR = ("003", _("认证不通过,请提供合法的 BKData 认证信息"), _("请查看API请求文档"))
SDK_INVALID_SECRET_ERR = (
"004",
_("内部模块调用请传递准确的 bk_app_code 和 bk_app_secret"),
_("传递给的变量需要与 dataapi_settings.py 保持一致"),
)
SDK_INVALID_TOKEN_ERR = ("005", _("数据平台授权码不正确"), _("前往平台授权码页面查看授权码"))
SDK_NO_INDENTITY_ERR = ("007", _("未检测到有效的认证信息"), _("请查看API请求文档"))
SDK_WRONG_INDENTIRY_ERR = ("008", _("错误的认证方式"), _("请查看API请求文档"))
SDK_JWT_VERIFY_ERR = ("009", _("ESB 传递的 JWT 字符串解析失败"))
# 单据异常
NO_PERMISSION_ERR = ("101", _("当前用户无单据权限"))
TICKET_STATE_HAS_BEEN_OPERATED_ERR = ("102", _("当前单据状态已审批"))
APPROVAL_RANGE_ERR = ("103", _("审批范围错误"))
NOT_IN_APPROVAL_PROCESS_ERR = ("104", _("未在可审批阶段"))
NO_UPDATE_PERMISSION_ERR = ("105", _("无更新权限"))
OBJECT_CLASS_ERR = ("106", _("object类型错误"))
SUBJECT_CHECK_ERR = ("107", _("主体校验失败"))
SCOPE_CHECK_ERR = ("108", _("申请范围校验失败"))
HAS_ALREADY_EXISTS_ERR = ("109", _("已提交过此权限申请,请勿重复提交"))
OBJECT_NOT_EXIST_ERR = ("110", _("object不存在"))
ACTION_CHECK_ERR = ("111", _("action校验错误"))
NO_PROCESSOR_ERR = ("112", _("没有审批人"))
UNEXPECTED_TICKET_TYPE = ("113", _("单据类型校验错误"))
REPEAT_PERMISSION_INFO = ("114", _("重复的权限信息"))
ROLE_AT_LEAST_ONE_ERR = ("115", _("该角色成员不能为空"))
PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR = ("116", _("申请的权限对象不存在"))
PROJECT_DATA_VALID_ERR = ("117", _("项目所申请的业务数据的区域标签不合法"))
NO_MATCHED_TICKET_TYPE_PROCESS_ERR = ("118", _("没有匹配的单据类型的审批流程"))
TICKET_CALLBACK_ERR = ("119", _("通用单据回调失败"))
NOTICE_APPROVE_CALLBACK_ERR = ("120", _("单据通知审核回调异常"))
NOT_EXIST_ERR = ("120", _("实例不存在"))
NOT_OBJECT_CLASS_SAME_WITH_META = ("121", _("不存在与 Meta 映射的对象类型"))
# data token 异常
TOKEN_NOT_EXIST_ERR = ("201", _("授权码不存在"))
TOKEN_NOT_AUTHORIZED = ("202", _("授权码未完全通过审批"))
TOKEN_EXPIRED_ERR = ("203", _("授权码已过期"))
DATA_SCOPE_FORMAT_ERR = ("204", _("数据范围格式错误"))
DATA_SCOPE_VALID_ERR = ("205", _("不合法的对象范围"))
TOKEN_DISABLED_ERR = ("206", _("授权码已被禁止使用"))
# 资源关系异常
QUERY_TOO_MANY_RESOURCE_ERR = ("401", _("查询太多资源,拒绝返回内容"))
INVALID_PARENT_RESOURCE_TYPE_ERR = ("402", _("非法的父级资源类型"))
INVALID_RESOURCE_ATTR_ERR = ("403", _("非法的资源属性"))
FILTER_NOT_TO_SCOPE_ERR = ("404", _("资源过滤器无法装换为属性范围"))
# IAM 关联异常
BKIAM_NOT_SUPPOR_ATTR_AUTHO = ("501", _("BKIAM 不支持通过接口进行属性授权"))
BKIAM_CONFIGURE_RESTRICTION = ("502", _("BKIAM 配置限制"))
BKIAM_SYNC_AUTHO_ERR = ("503", _("BKIAM 同步失败"))
BKIAM_POLICIES_COUNT_LIMIT_ERR = ("504", _("BKIAM 策略数上限溢出"))
# BaseModel 异常
CORE_BASE_MODEL_NO_PK = ("601", _("基础模型配置不存在PK"))
CORE_BASE_MODEL_INSTANCE_NOT_EXIT = ("602", _("基础模型实例不存在"))
# Others
PERMISSION_DENIED_ERR = ("705", _("角色访问权限不足"))
NO_FUNCTION_ERR = ("706", _("功能还未实现"))
APP_NOT_MATCH_ERR = ("707", _("非法APP发起访问,请检查 APP_CODE"))
OUTER_MODEL_ATTR_ERR = ("708", _("存在不符合预期的 Model 数据"))
PARAM_ERR = ("709", _("参数校验错误"))
REDIS_CONNECT_ERR = ("710", _("无法连接REDIS服务"), _("检查AuthAPI依赖redis服务是否正常"))
OBJECT_SERIALIZER_NO_PK_ERR = ("711", _("ObjectClass 类型定义缺少对主键定义"))
UPDATE_ROLE_ERR = ("712", _("更新角色成员列表错误"))
# ITSM
TICKET_CREATE_ERROR = ("801", _("当前有未完成的单据"))
CALL_BACK_ERROR = ("802", _("回调第三方模块发生错误"))
ITSM_CATALOGS_NOT_EXIST = ("803", _("未在itsm找到相关的目录"))
class PermissionDeniedError(AuthAPIError):
CODE = AuthCode.PERMISSION_DENIED_ERR[0]
MESSAGE = AuthCode.PERMISSION_DENIED_ERR[1]
class NoPermissionError(AuthAPIError):
CODE = AuthCode.NO_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_PERMISSION_ERR[1]
class TicketStateHasBeenOperatedError(AuthAPIError):
CODE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[0]
MESSAGE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[1]
class ApprovalRangeError(AuthAPIError):
CODE = AuthCode.APPROVAL_RANGE_ERR[0]
MESSAGE = AuthCode.APPROVAL_RANGE_ERR[1]
class NotInApprovalProcessError(AuthAPIError):
CODE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[0]
MESSAGE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[1]
class NoUpdatePermissionError(AuthAPIError):
CODE = AuthCode.NO_UPDATE_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_UPDATE_PERMISSION_ERR[1]
class ObjectClassError(AuthAPIError):
CODE = AuthCode.OBJECT_CLASS_ERR[0]
MESSAGE = AuthCode.OBJECT_CLASS_ERR[1]
class SubjectCheckErr(AuthAPIError):
CODE = AuthCode.SUBJECT_CHECK_ERR[0]
MESSAGE = AuthCode.SUBJECT_CHECK_ERR[1]
class UnexpectedTicketTypeErr(AuthAPIError):
CODE = AuthCode.UNEXPECTED_TICKET_TYPE[0]
MESSAGE = AuthCode.UNEXPECTED_TICKET_TYPE[1]
class ScopeCheckErr(AuthAPIError):
CODE = AuthCode.SCOPE_CHECK_ERR[0]
MESSAGE = AuthCode.SCOPE_CHECK_ERR[1]
class NoFunctionErr(AuthAPIError):
CODE = AuthCode.NO_FUNCTION_ERR[0]
MESSAGE = AuthCode.NO_FUNCTION_ERR[1]
class HasAlreadyExistsErr(AuthAPIError):
CODE = AuthCode.HAS_ALREADY_EXISTS_ERR[0]
MESSAGE = AuthCode.HAS_ALREADY_EXISTS_ERR[1]
class ObjectNotExistsErr(AuthAPIError):
CODE = AuthCode.OBJECT_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.OBJECT_NOT_EXIST_ERR[1]
class ActionCheckErr(AuthAPIError):
CODE = AuthCode.ACTION_CHECK_ERR[0]
MESSAGE = AuthCode.ACTION_CHECK_ERR[1]
class NoProcessorErr(AuthAPIError):
CODE = AuthCode.NO_PROCESSOR_ERR[0]
MESSAGE = AuthCode.NO_PROCESSOR_ERR[1]
class RepeatPermissionErr(AuthAPIError):
CODE = AuthCode.REPEAT_PERMISSION_INFO[0]
MESSAGE = AuthCode.REPEAT_PERMISSION_INFO[1]
class RoleAtLeastOneErr(AuthAPIError):
CODE = AuthCode.ROLE_AT_LEAST_ONE_ERR[0]
MESSAGE = AuthCode.ROLE_AT_LEAST_ONE_ERR[1]
class TokenNotExistErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.TOKEN_NOT_EXIST_ERR[1]
class TokenNotAuthorizedErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_AUTHORIZED[0]
MESSAGE = AuthCode.TOKEN_NOT_AUTHORIZED[1]
class TokenExpiredErr(AuthAPIError):
CODE = AuthCode.TOKEN_EXPIRED_ERR[0]
MESSAGE = AuthCode.TOKEN_EXPIRED_ERR[1]
class DataScopeFormatErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_FORMAT_ERR[0]
MESSAGE = AuthCode.DATA_SCOPE_FORMAT_ERR[1]
class DataScopeValidErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_VALID_ERR[0]
MESSAGE = AuthCode.DATA_SCOPE_VALID_ERR[1]
class AppNotMatchErr(AuthAPIError):
CODE = AuthCode.APP_NOT_MATCH_ERR[0]
MESSAGE = AuthCode.APP_NOT_MATCH_ERR[1]
class PermissionObjectDoseNotExistError(AuthAPIError):
CODE = AuthCode.PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR[1]
class TokenDisabledErr(AuthAPIError):
CODE = AuthCode.TOKEN_DISABLED_ERR[0]
MESSAGE = AuthCode.TOKEN_DISABLED_ERR[1]
class OuterModelAttrErr(AuthAPIError):
CODE = AuthCode.OUTER_MODEL_ATTR_ERR[0]
MESSAGE = AuthCode.OUTER_MODEL_ATTR_ERR[1]
class ProjectDataTagValidErr(AuthAPIError):
CODE = AuthCode.PROJECT_DATA_VALID_ERR[0]
MESSAGE = AuthCode.PROJECT_DATA_VALID_ERR[1]
class NoMatchedTicketTypeProcessErr(AuthAPIError):
CODE = AuthCode.NO_MATCHED_TICKET_TYPE_PROCESS_ERR[0]
MESSAGE = AuthCode.NO_MATCHED_TICKET_TYPE_PROCESS_ERR[1]
class TicketCallbackErr(AuthAPIError):
CODE = AuthCode.TICKET_CALLBACK_ERR[0]
MESSAGE = AuthCode.TICKET_CALLBACK_ERR[1]
class NoticeApproveCallbackErr(AuthAPIError):
CODE = AuthCode.NOTICE_APPROVE_CALLBACK_ERR[0]
MESSAGE = AuthCode.NOTICE_APPROVE_CALLBACK_ERR[1]
class NotExistErr(AuthAPIError):
CODE = AuthCode.NOT_EXIST_ERR[0]
MESSAGE = AuthCode.NOT_EXIST_ERR[1]
class NotObjectClassSameWithMeta(AuthAPIError):
CODE = AuthCode.NOT_OBJECT_CLASS_SAME_WITH_META[0]
MESSAGE = AuthCode.NOT_OBJECT_CLASS_SAME_WITH_META[1]
class ParameterErr(AuthAPIError):
CODE = AuthCode.PARAM_ERR[0]
MESSAGE = AuthCode.PARAM_ERR[1]
class RedisConnectError(AuthAPIError):
CODE = AuthCode.REDIS_CONNECT_ERR[0]
MESSAGE = AuthCode.REDIS_CONNECT_ERR[1]
class ObjectSerilizerNoPKErr(AuthAPIError):
CODE = AuthCode.OBJECT_SERIALIZER_NO_PK_ERR[0]
MESSAGE = AuthCode.OBJECT_SERIALIZER_NO_PK_ERR[1]
class UpdateRoleErr(AuthAPIError):
CODE = AuthCode.UPDATE_ROLE_ERR[0]
MESSAGE = AuthCode.UPDATE_ROLE_ERR[1]
class QueryTooManyResourceErr(AuthAPIError):
CODE = AuthCode.QUERY_TOO_MANY_RESOURCE_ERR[0]
MESSAGE = AuthCode.QUERY_TOO_MANY_RESOURCE_ERR[1]
class InvalidParentResourceTypeErr(AuthAPIError):
CODE = AuthCode.INVALID_PARENT_RESOURCE_TYPE_ERR[0]
MESSAGE = AuthCode.INVALID_PARENT_RESOURCE_TYPE_ERR[1]
class InvalidResourceAttrErr(AuthAPIError):
CODE = AuthCode.INVALID_RESOURCE_ATTR_ERR[0]
MESSAGE = AuthCode.INVALID_RESOURCE_ATTR_ERR[1]
class FilterNotToScopeErr(AuthAPIError):
CODE = AuthCode.FILTER_NOT_TO_SCOPE_ERR[0]
MESSAGE = AuthCode.FILTER_NOT_TO_SCOPE_ERR[1]
class BKIAMNotSupportAttrAutho(AuthAPIError):
CODE = AuthCode.BKIAM_NOT_SUPPOR_ATTR_AUTHO[0]
MESSAGE = AuthCode.BKIAM_NOT_SUPPOR_ATTR_AUTHO[1]
class BKIAMConfigureRestrictionErr(AuthAPIError):
CODE = AuthCode.BKIAM_CONFIGURE_RESTRICTION[0]
MESSAGE = AuthCode.BKIAM_CONFIGURE_RESTRICTION[1]
class BKIAMSyncAuthoErr(AuthAPIError):
CODE = AuthCode.BKIAM_SYNC_AUTHO_ERR[0]
MESSAGE = AuthCode.BKIAM_SYNC_AUTHO_ERR[1]
class BKIAMPolicesCountLimitErr(AuthAPIError):
CODE = AuthCode.BKIAM_POLICIES_COUNT_LIMIT_ERR[0]
MESSAGE = AuthCode.BKIAM_POLICIES_COUNT_LIMIT_ERR[1]
class CoreBaseModelNoPK(AuthAPIError):
CODE = AuthCode.CORE_BASE_MODEL_NO_PK[0]
MESSAGE = AuthCode.CORE_BASE_MODEL_NO_PK[1]
class CoreBaseModelInstanceNotExist(AuthAPIError):
CODE = AuthCode.CORE_BASE_MODEL_INSTANCE_NOT_EXIT[0]
MESSAGE = AuthCode.CORE_BASE_MODEL_INSTANCE_NOT_EXIT[1]
class TicketCreateError(AuthAPIError):
CODE = AuthCode.TICKET_CREATE_ERROR[0]
MESSAGE = AuthCode.TICKET_CREATE_ERROR[1]
class CallBackError(AuthAPIError):
CODE = AuthCode.CALL_BACK_ERROR[0]
MESSAGE = AuthCode.CALL_BACK_ERROR[1]
class ItsmCatalogsNotExist(AuthAPIError):
CODE = AuthCode.ITSM_CATALOGS_NOT_EXIST[0]
MESSAGE = AuthCode.ITSM_CATALOGS_NOT_EXIST[1] | class AuthAPIError(BaseAPIError): | random_line_split |
exceptions.py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from common.errorcodes import ErrorCode
from common.exceptions import BaseAPIError
from django.utils.translation import ugettext_lazy as _
class AuthAPIError(BaseAPIError):
MODULE_CODE = ErrorCode.BKDATA_AUTH
class AuthCode:
# 错误码映射表,目前以三元组定义,包含了错误码、描述和处理方式(可选)
SDK_PERMISSION_DENIED_ERR = ("001", _("资源访问权限不足"))
SDK_PARAM_MISS_ERR = ("002", _("认证参数缺失"), _("请查看API请求文档"))
SDK_AUTHENTICATION_ERR = ("003", _("认证不通过,请提供合法的 BKData 认证信息"), _("请查看API请求文档"))
SDK_INVALID_SECRET_ERR = (
"004",
_("内部模块调用请传递准确的 bk_app_code 和 bk_app_secret"),
_("传递给的变量需要与 dataapi_settings.py 保持一致"),
)
SDK_INVALID_TOKEN_ERR = ("005", _("数据平台授权码不正确"), _("前往平台授权码页面查看授权码"))
SDK_NO_INDENTITY_ERR = ("007", _("未检测到有效的认证信息"), _("请查看API请求文档"))
SDK_WRONG_INDENTIRY_ERR = ("008", _("错误的认证方式"), _("请查看API请求文档"))
SDK_JWT_VERIFY_ERR = ("009", _("ESB 传递的 JWT 字符串解析失败"))
# 单据异常
NO_PERMISSION_ERR = ("101", _("当前用户无单据权限"))
TICKET_STATE_HAS_BEEN_OPERATED_ERR = ("102", _("当前单据状态已审批"))
APPROVAL_RANGE_ERR = ("103", _("审批范围错误"))
NOT_IN_APPROVAL_PROCESS_ERR = ("104", _("未在可审批阶段"))
NO_UPDATE_PERMISSION_ERR = ("105", _("无更新权限"))
OBJECT_CLASS_ERR = ("106", _("object类型错误"))
SUBJECT_CHECK_ERR = ("107", _("主体校验失败"))
SCOPE_CHECK_ERR = ("108", _("申请范围校验失败"))
HAS_ALREADY_EXISTS_ERR = ("109", _("已提交过此权限申请,请勿重复提交"))
OBJECT_NOT_EXIST_ERR = ("110", _("object不存在"))
ACTION_CHECK_ERR = ("111", _("action校验错误"))
NO_PROCESSOR_ERR = ("112", _("没有审批人"))
UNEXPECTED_TICKET_TYPE = ("113", _("单据类型校验错误"))
REPEAT_PERMISSION_INFO = ("114", _("重复的权限信息"))
ROLE_AT_LEAST_ONE_ERR = ("115", _("该角色成员不能为空"))
PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR = ("116", _("申请的权限对象不存在"))
PROJECT_DATA_VALID_ERR = ("117", _("项目所申请的业务数据的区域标签不合法"))
NO_MATCHED_TICKET_TYPE_PROCESS_ERR = ("118", _("没有匹配的单据类型的审批流程"))
TICKET_CALLBACK_ERR = ("119", _("通用单据回调失败"))
NOTICE_APPROVE_CALLBACK_ERR = ("120", _("单据通知审核回调异常"))
NOT_EXIST_ERR = ("120", _("实例不存在"))
NOT_OBJECT_CLASS_SAME_WITH_META = ("121", _("不存在与 Meta 映射的对象类型"))
# data token 异常
TOKEN_NOT_EXIST_ERR = ("201", _("授权码不存在"))
TOKEN_NOT_AUTHORIZED = ("202", _("授权码未完全通过审批"))
TOKEN_EXPIRED_ERR = ("203", _("授权码已过期"))
DATA_SCOPE_FORMAT_ERR = ("204", _("数据范围格式错误"))
DATA_SCOPE_VALID_ERR = ("205", _("不合法的对象范围"))
TOKEN_DISABLED_ERR = ("206", _("授权码已被禁止使用"))
# 资源关系异常
QUERY_TOO_MANY_RESOURCE_ERR = ("401", _("查询太多资源,拒绝返回内容"))
INVALID_PARENT_RESOURCE_TYPE_ERR = ("402", _("非法的父级资源类型"))
INVALID_RESOURCE_ATTR_ERR = ("403", _("非法的资源属性"))
FILTER_NOT_TO_SCOPE_ERR = ("404", _("资源过滤器无法装换为属性范围"))
# IAM 关联异常
BKIAM_NOT_SUPPOR_ATTR_AUTHO = ("501", _("BKIAM 不支持通过接口进行属性授权"))
BKIAM_CONFIGURE_RESTRICTION = ("502", _("BKIAM 配置限制"))
BKIAM_SYNC_AUTHO_ERR = ("503", _("BKIAM 同步失败"))
BKIAM_POLICIES_COUNT_LIMIT_ERR = ("504", _("BKIAM 策略数上限溢出"))
# BaseModel 异常
CORE_BASE_MODEL_NO_PK = ("601", _("基础模型配置不存在PK"))
CORE_BASE_MODEL_INSTANCE_NOT_EXIT = ("602", _("基础模型实例不存在"))
# Others
PERMISSION_DENIED_ERR = ("705", _("角色访问权限不足"))
NO_FUNCTION_ERR = ("706", _("功能还未实现"))
APP_NOT_MATCH_ERR = ("707", _("非法APP发起访问,请检查 APP_CODE"))
OUTER_MODEL_ATTR_ERR = ("708", _("存在不符合预期的 Model 数据"))
PARAM_ERR = ("709", _("参数校验错误"))
REDIS_CONNECT_ERR = ("710", _("无法连接REDIS服务"), _("检查AuthAPI依赖redis服务是否正常"))
OBJECT_SERIALIZER_NO_PK_ERR = ("711", _("ObjectClass 类型定义缺少对主键定义"))
UPDATE_ROLE_ERR = ("712", _("更新角色成员列表错误"))
# ITSM
TICKET_CREATE_ERROR = ("801", _("当前有未完成的单据"))
CALL_BACK_ERROR = ("802", _("回调第三方模块发生错误"))
ITSM_CATALOGS_NOT_EXIST = ("803", _("未在itsm找到相关的目录"))
class PermissionDeniedError(AuthAPIError):
CODE = AuthCode.PERMISSION_DENIED_ERR[0]
MESSAGE = AuthCode.PERMISSION_DENIED_ERR[1]
class NoPermissionError(AuthAPIError):
CODE = AuthCode.NO_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_PERMISSION_ERR[1]
class TicketStateHasBeenOperatedError(AuthAPIError):
CODE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[0]
MESSAGE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[1]
class ApprovalRangeError(AuthAPIError):
CODE = AuthCode.APPROVAL_RANGE_ERR[0]
MESSAGE = AuthCode.APPROVAL_RANGE_ERR[1]
class NotInApprovalProcessError(AuthAPIError):
CODE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[0]
MESSAGE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[1]
class NoUpdatePermissionError(AuthAPIError):
CODE = AuthCode.NO_UPDATE_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_UPDATE_PERMISSION_ERR[1]
class ObjectClassError(AuthAPIError):
CODE = AuthCode.OBJECT_CLASS_ERR[0]
MESSAGE = AuthCode.OBJECT_CLASS_ERR[1]
class SubjectCheckErr(AuthAPIError):
CODE = AuthCode.SUBJECT_CHECK_ERR[0]
MESSAGE = AuthCode.SUBJECT_CHECK_ERR[1]
class UnexpectedTicketTypeErr(AuthAPIError):
CODE = AuthCode.UNEXPECTED_TICKET_TYPE[0]
MESSAGE = AuthCode.UNEXPECTED_TICKET_TYPE[1]
class ScopeCheckErr(AuthAPIError):
CODE = AuthCode.SCOPE_CHECK_ERR[0]
MESSAGE = AuthCode.SCOPE_CHECK_ERR[1]
class NoFunctionErr(AuthAPIError):
CODE = AuthCode.NO_FUNCTION_ERR[0]
MESSAGE = AuthCode.NO_FUNCTION_ERR[1]
class HasAlreadyExistsErr(AuthAPIError):
CODE = AuthCode.HAS_ALREADY_EXISTS_ERR[0]
MESSAGE = AuthCode.HAS_ALREADY_EXISTS_ERR[1]
class ObjectNotExistsErr(AuthAPIError):
CODE = AuthCode.OBJECT_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.OBJECT_NOT_EXIST_ERR[1]
class ActionCheckErr(AuthAPIError):
CODE = AuthCode.ACTION_CHECK_ERR[0]
MESSAGE = AuthCode.ACTION_CHECK_ERR[1]
class NoProcessorErr(AuthAPIError):
CODE = AuthCode.NO_PROCESSOR_ERR[0]
MESSAGE = AuthCode.NO_PROCESSOR_ERR[1]
class RepeatPermissionErr(AuthAPIError):
CODE = AuthCode.REPEAT_PERMISSION_INFO[0]
MESSAGE = AuthCode.REPEAT_PERMISSION_INFO[1]
class RoleAtLeastOneErr(AuthAPIError):
CODE = AuthCode.ROLE_AT_LEAST_ONE_ERR[0]
MESSAGE = AuthCode.ROLE_AT_LEAST_ONE_ERR[1]
class TokenNotExistErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.TOKEN_NOT_EXIST_ERR[1]
class TokenNotAuthorizedErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_AUTHORIZED[0]
MESSAGE = AuthCode.TOKEN_NOT_AUTHORIZED[1]
class TokenExpiredErr(AuthAPIError):
CODE = AuthCode.TOKEN_EXPIRED_ERR[0]
MESSAGE = AuthCode.TOKEN_EXPIRED_ERR[1]
class DataScopeFormatErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_FORMAT_ERR[0]
MESSAGE = AuthCode.DATA_SCOPE_FORMAT_ERR[1]
class DataScopeValidErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_VALID_ERR[0]
MESSAGE = AuthCode.DATA_SCOPE_VALID_ERR[1]
class AppNotMatchErr(AuthAPIError):
CODE = AuthCode.APP_NOT_MATCH_ERR[0]
MESSAGE = AuthCode.APP_NOT_MATCH_ERR[1]
class PermissionObjectDoseNotExistError(AuthAPIError):
CODE = AuthCode.PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR[1]
class TokenDisabledErr(AuthAPIError):
CODE = AuthCode.TOKEN_DISABLED_ERR[0]
MESSAGE = AuthCode.TOKEN_DISABLED_ERR[1]
class OuterModelAttrErr(AuthAPIError):
CODE = AuthCode.OUTER_MODEL_ATTR_ERR[0]
MESSAGE = AuthCode.OUTER_MODEL_ATTR_ERR[1]
class ProjectDataTagValidErr(AuthAPIError):
CODE = AuthCode.PROJECT_DATA_VALID_ERR[0]
MESSAGE = AuthCode.PROJECT_DATA_VALID_ERR[1]
class NoMatchedTicketTypeProcessErr(AuthAPIError):
CODE = AuthCode.NO_MATCHED_TICKET_TYPE_PROCESS_ERR[0]
MESSAGE = AuthCode.NO_MATCHED_TICKET_TYPE_PROCESS_ERR[1]
class TicketCallbackErr(AuthAPIError):
CODE = AuthCode.TICKET_CALLBACK_ERR[0]
MESSAGE = AuthCode.TICKET_CALLBACK_ERR[1]
class NoticeApproveCallbackErr(AuthAPIError):
CODE = AuthCode.NOTICE_APPROVE_CALLBACK_ERR[0]
MESSAGE = AuthCode.NOTICE_APPROVE_CALLBACK_ERR[1]
class NotExistErr(AuthAPIError):
CODE = AuthCode.NOT_EXIST_ERR[0]
MESSAGE = AuthCode.NOT_EXIST_ERR[1]
class NotObjectClassSameWithMeta(AuthAPIError):
CODE = AuthCode.NOT_OBJECT_CLASS_SAME_WITH_META[0]
MESSAGE = AuthCode.NOT_OBJECT_CLASS_SAME_WITH_META[1]
class ParameterErr(AuthAPIError):
CODE = AuthCode.PARAM_ERR[0]
MESSAGE = AuthCode.PARAM_ERR[1]
class RedisConnectError(AuthAPI | = AuthCode.REDIS_CONNECT_ERR[0]
MESSAGE = AuthCode.REDIS_CONNECT_ERR[1]
class ObjectSerilizerNoPKErr(AuthAPIError):
CODE = AuthCode.OBJECT_SERIALIZER_NO_PK_ERR[0]
MESSAGE = AuthCode.OBJECT_SERIALIZER_NO_PK_ERR[1]
class UpdateRoleErr(AuthAPIError):
CODE = AuthCode.UPDATE_ROLE_ERR[0]
MESSAGE = AuthCode.UPDATE_ROLE_ERR[1]
class QueryTooManyResourceErr(AuthAPIError):
CODE = AuthCode.QUERY_TOO_MANY_RESOURCE_ERR[0]
MESSAGE = AuthCode.QUERY_TOO_MANY_RESOURCE_ERR[1]
class InvalidParentResourceTypeErr(AuthAPIError):
CODE = AuthCode.INVALID_PARENT_RESOURCE_TYPE_ERR[0]
MESSAGE = AuthCode.INVALID_PARENT_RESOURCE_TYPE_ERR[1]
class InvalidResourceAttrErr(AuthAPIError):
CODE = AuthCode.INVALID_RESOURCE_ATTR_ERR[0]
MESSAGE = AuthCode.INVALID_RESOURCE_ATTR_ERR[1]
class FilterNotToScopeErr(AuthAPIError):
CODE = AuthCode.FILTER_NOT_TO_SCOPE_ERR[0]
MESSAGE = AuthCode.FILTER_NOT_TO_SCOPE_ERR[1]
class BKIAMNotSupportAttrAutho(AuthAPIError):
CODE = AuthCode.BKIAM_NOT_SUPPOR_ATTR_AUTHO[0]
MESSAGE = AuthCode.BKIAM_NOT_SUPPOR_ATTR_AUTHO[1]
class BKIAMConfigureRestrictionErr(AuthAPIError):
CODE = AuthCode.BKIAM_CONFIGURE_RESTRICTION[0]
MESSAGE = AuthCode.BKIAM_CONFIGURE_RESTRICTION[1]
class BKIAMSyncAuthoErr(AuthAPIError):
CODE = AuthCode.BKIAM_SYNC_AUTHO_ERR[0]
MESSAGE = AuthCode.BKIAM_SYNC_AUTHO_ERR[1]
class BKIAMPolicesCountLimitErr(AuthAPIError):
CODE = AuthCode.BKIAM_POLICIES_COUNT_LIMIT_ERR[0]
MESSAGE = AuthCode.BKIAM_POLICIES_COUNT_LIMIT_ERR[1]
class CoreBaseModelNoPK(AuthAPIError):
CODE = AuthCode.CORE_BASE_MODEL_NO_PK[0]
MESSAGE = AuthCode.CORE_BASE_MODEL_NO_PK[1]
class CoreBaseModelInstanceNotExist(AuthAPIError):
CODE = AuthCode.CORE_BASE_MODEL_INSTANCE_NOT_EXIT[0]
MESSAGE = AuthCode.CORE_BASE_MODEL_INSTANCE_NOT_EXIT[1]
class TicketCreateError(AuthAPIError):
CODE = AuthCode.TICKET_CREATE_ERROR[0]
MESSAGE = AuthCode.TICKET_CREATE_ERROR[1]
class CallBackError(AuthAPIError):
CODE = AuthCode.CALL_BACK_ERROR[0]
MESSAGE = AuthCode.CALL_BACK_ERROR[1]
class ItsmCatalogsNotExist(AuthAPIError):
CODE = AuthCode.ITSM_CATALOGS_NOT_EXIST[0]
MESSAGE = AuthCode.ITSM_CATALOGS_NOT_EXIST[1]
| Error):
CODE | identifier_name |
exceptions.py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from common.errorcodes import ErrorCode
from common.exceptions import BaseAPIError
from django.utils.translation import ugettext_lazy as _
class AuthAPIError(BaseAPIError):
MODULE_CODE = ErrorCode.BKDATA_AUTH
class AuthCode:
# 错误码映射表,目前以三元组定义,包含了错误码、描述和处理方式(可选)
SDK_PERMISSION_DENIED_ERR = ("001", _("资源访问权限不足"))
SDK_PARAM_MISS_ERR = ("002", _("认证参数缺失"), _("请查看API请求文档"))
SDK_AUTHENTICATION_ERR = ("003", _("认证不通过,请提供合法的 BKData 认证信息"), _("请查看API请求文档"))
SDK_INVALID_SECRET_ERR = (
"004",
_("内部模块调用请传递准确的 bk_app_code 和 bk_app_secret"),
_("传递给的变量需要与 dataapi_settings.py 保持一致"),
)
SDK_INVALID_TOKEN_ERR = ("005", _("数据平台授权码不正确"), _("前往平台授权码页面查看授权码"))
SDK_NO_INDENTITY_ERR = ("007", _("未检测到有效的认证信息"), _("请查看API请求文档"))
SDK_WRONG_INDENTIRY_ERR = ("008", _("错误的认证方式"), _("请查看API请求文档"))
SDK_JWT_VERIFY_ERR = ("009", _("ESB 传递的 JWT 字符串解析失败"))
# 单据异常
NO_PERMISSION_ERR = ("101", _("当前用户无单据权限"))
TICKET_STATE_HAS_BEEN_OPERATED_ERR = ("102", _("当前单据状态已审批"))
APPROVAL_RANGE_ERR = ("103", _("审批范围错误"))
NOT_IN_APPROVAL_PROCESS_ERR = ("104", _("未在可审批阶段"))
NO_UPDATE_PERMISSION_ERR = ("105", _("无更新权限"))
OBJECT_CLASS_ERR = ("106", _("object类型错误"))
SUBJECT_CHECK_ERR = ("107", _("主体校验失败"))
SCOPE_CHECK_ERR = ("108", _("申请范围校验失败"))
HAS_ALREADY_EXISTS_ERR = ("109", _("已提交过此权限申请,请勿重复提交"))
OBJECT_NOT_EXIST_ERR = ("110", _("object不存在"))
ACTION_CHECK_ERR = ("111", _("action校验错误"))
NO_PROCESSOR_ERR = ("112", _("没有审批人"))
UNEXPECTED_TICKET_TYPE = ("113", _("单据类型校验错误"))
REPEAT_PERMISSION_INFO = ("114", _("重复的权限信息"))
ROLE_AT_LEAST_ONE_ERR = ("115", _("该角色成员不能为空"))
PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR = ("116", _("申请的权限对象不存在"))
PROJECT_DATA_VALID_ERR = ("117", _("项目所申请的业务数据的区域标签不合法"))
NO_MATCHED_TICKET_TYPE_PROCESS_ERR = ("118", _("没有匹配的单据类型的审批流程"))
TICKET_CALLBACK_ERR = ("119", _("通用单据回调失败"))
NOTICE_APPROVE_CALLBACK_ERR = ("120", _("单据通知审核回调异常"))
NOT_EXIST_ERR = ("120", _("实例不存在"))
NOT_OBJECT_CLASS_SAME_WITH_META = ("121", _("不存在与 Meta 映射的对象类型"))
# data token 异常
TOKEN_NOT_EXIST_ERR = ("201", _("授权码不存在"))
TOKEN_NOT_AUTHORIZED = ("202", _("授权码未完全通过审批"))
TOKEN_EXPIRED_ERR = ("203", _("授权码已过期"))
DATA_SCOPE_FORMAT_ERR = ("204", _("数据范围格式错误"))
DATA_SCOPE_VALID_ERR = ("205", _("不合法的对象范围"))
TOKEN_DISABLED_ERR = ("206", _("授权码已被禁止使用"))
# 资源关系异常
QUERY_TOO_MANY_RESOURCE_ERR = ("401", _("查询太多资源,拒绝返回内容"))
INVALID_PARENT_RESOURCE_TYPE_ERR = ("402", _("非法的父级资源类型"))
INVALID_RESOURCE_ATTR_ERR = ("403", _("非法的资源属性"))
FILTER_NOT_TO_SCOPE_ERR = ("404", _("资源过滤器无法装换为属性范围"))
# IAM 关联异常
BKIAM_NOT_SUPPOR_ATTR_AUTHO = ("501", _("BKIAM 不支持通过接口进行属性授权"))
BKIAM_CONFIGURE_RESTRICTION = ("502", _("BKIAM 配置限制"))
BKIAM_SYNC_AUTHO_ERR = ("503", _("BKIAM 同步失败"))
BKIAM_POLICIES_COUNT_LIMIT_ERR = ("504", _("BKIAM 策略数上限溢出"))
# BaseModel 异常
CORE_BASE_MODEL_NO_PK = ("601", _("基础模型配置不存在PK"))
CORE_BASE_MODEL_INSTANCE_NOT_EXIT = ("602", _("基础模型实例不存在"))
# Others
PERMISSION_DENIED_ERR = ("705", _("角色访问权限不足"))
NO_FUNCTION_ERR = ("706", _("功能还未实现"))
APP_NOT_MATCH_ERR = ("707", _("非法APP发起访问,请检查 APP_CODE"))
OUTER_MODEL_ATTR_ERR = ("708", _("存在不符合预期的 Model 数据"))
PARAM_ERR = ("709", _("参数校验错误"))
REDIS_CONNECT_ERR = ("710", _("无法连接REDIS服务"), _("检查AuthAPI依赖redis服务是否正常"))
OBJECT_SERIALIZER_NO_PK_ERR = ("711", _("ObjectClass 类型定义缺少对主键定义"))
UPDATE_ROLE_ERR = ("712", _("更新角色成员列表错误"))
# ITSM
TICKET_CREATE_ERROR = ("801", _("当前有未完成的单据"))
CALL_BACK_ERROR = ("802", _("回调第三方模块发生错误"))
ITSM_CATALOGS_NOT_EXIST = ("803", _("未在itsm找到相关的目录"))
class PermissionDeniedError(AuthAPIError):
CODE = AuthCode.PERMISSION_DENIED_ERR[0]
MESSAGE = AuthCode.PERMISSION_DENIED_ERR[1]
class NoPermissionError(AuthAPIError):
CODE = AuthCode.NO_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_PERMISSION_ERR[1]
class TicketStateHasBeenOperatedError(AuthAPIError):
CODE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[0]
MESSAGE = AuthCode.TICKET_STATE_HAS_BEEN_OPERATED_ERR[1]
class ApprovalRangeError(AuthAPIError):
CODE = AuthCode.APPROVAL_RANGE_ERR[0]
MESSAGE = AuthCode.APPROVAL_RANGE_ERR[1]
class NotInApprovalProcessError(AuthAPIError):
CODE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[0]
MESSAGE = AuthCode.NOT_IN_APPROVAL_PROCESS_ERR[1]
class NoUpdatePermissionError(AuthAPIError):
CODE = AuthCode.NO_UPDATE_PERMISSION_ERR[0]
MESSAGE = AuthCode.NO_UPDATE_PERMISSION_ERR[1]
class ObjectClassError(AuthAPIError):
CODE = AuthCode.OBJECT_CLASS_ERR[0]
MESSAGE = AuthCode.OBJECT_CLASS_ERR[1]
class SubjectCheckErr(AuthAPIError):
CODE = AuthCode.SUBJECT_CHECK_ERR[0]
MESSAGE = AuthCode.SUBJECT_CHECK_ERR[1]
class UnexpectedTicketTypeErr(AuthAPIError):
CODE = AuthCode.UNEXPECTED_TICKET_TYPE[0]
MESSAGE = AuthCode.UNEXPECTED_TICKET_TYPE[1]
class ScopeCheckErr(AuthAPIError):
CODE = AuthCode.SCOPE_CHECK_ERR[0]
MESSAGE = AuthCode.SCOPE_CHECK_ERR[1]
class NoFunctionErr(AuthAPIError):
CODE = AuthCode.NO_FUNCTION_ERR[0]
MESSAGE = AuthCode.NO_FUNCTION_ERR[1]
class HasAlreadyExistsErr(AuthAPIError):
CODE = AuthCode.HAS_ALREADY_EXISTS_ERR[0]
MESSAGE = AuthCode.HAS_ALREADY_EXISTS_ERR[1]
class ObjectNotExistsErr(AuthAPIError):
CODE = AuthCode.OBJECT_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.OBJECT_NOT_EXIST_ERR[1]
class ActionCheckErr(AuthAPIError):
CODE = AuthCode.ACTION_CHECK_ERR[0]
MESSAGE = AuthCode.ACTION_CHECK_ERR[1]
class NoProcessorErr(AuthAPIError):
CODE = AuthCode.NO_PROCESSOR_ERR[0]
MESSAGE = AuthCode.NO_PROCESSOR_ERR[1]
class RepeatPermissionErr(AuthAPIError):
CODE = AuthCode.REPEAT_PERMISSION_INFO[0]
MESSAGE = AuthCode.REPEAT_PERMISSION_INFO[1]
class RoleAtLeastOneErr(AuthAPIError):
CODE = AuthCode.ROLE_AT_LEAST_ONE_ERR[0]
MESSAGE = AuthCode.ROLE_AT_LEAST_ONE_ERR[1]
class TokenNotExistErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.TOKEN_NOT_EXIST_ERR[1]
class TokenNotAuthorizedErr(AuthAPIError):
CODE = AuthCode.TOKEN_NOT_AUTHORIZED[0]
MESSAGE = AuthCode.TOKEN_NOT_AUTHORIZED[1]
class TokenExpiredErr(AuthAPIError):
CODE = AuthCode.TOKEN_EXPIRED_ERR[0]
MESSAGE = AuthCode.TOKEN_EXPIRED_ERR[1]
class DataScopeFormatErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_FORMAT_ERR[0]
MESSAGE = AuthCode.DATA_SCOPE_FORMAT_ERR[1 | MESSAGE = AuthCode.DATA_SCOPE_VALID_ERR[1]
class AppNotMatchErr(AuthAPIError):
CODE = AuthCode.APP_NOT_MATCH_ERR[0]
MESSAGE = AuthCode.APP_NOT_MATCH_ERR[1]
class PermissionObjectDoseNotExistError(AuthAPIError):
CODE = AuthCode.PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR[0]
MESSAGE = AuthCode.PERMISSION_OBJECT_DOSE_NOT_EXIST_ERR[1]
class TokenDisabledErr(AuthAPIError):
CODE = AuthCode.TOKEN_DISABLED_ERR[0]
MESSAGE = AuthCode.TOKEN_DISABLED_ERR[1]
class OuterModelAttrErr(AuthAPIError):
CODE = AuthCode.OUTER_MODEL_ATTR_ERR[0]
MESSAGE = AuthCode.OUTER_MODEL_ATTR_ERR[1]
class ProjectDataTagValidErr(AuthAPIError):
CODE = AuthCode.PROJECT_DATA_VALID_ERR[0]
MESSAGE = AuthCode.PROJECT_DATA_VALID_ERR[1]
class NoMatchedTicketTypeProcessErr(AuthAPIError):
CODE = AuthCode.NO_MATCHED_TICKET_TYPE_PROCESS_ERR[0]
MESSAGE = AuthCode.NO_MATCHED_TICKET_TYPE_PROCESS_ERR[1]
class TicketCallbackErr(AuthAPIError):
CODE = AuthCode.TICKET_CALLBACK_ERR[0]
MESSAGE = AuthCode.TICKET_CALLBACK_ERR[1]
class NoticeApproveCallbackErr(AuthAPIError):
CODE = AuthCode.NOTICE_APPROVE_CALLBACK_ERR[0]
MESSAGE = AuthCode.NOTICE_APPROVE_CALLBACK_ERR[1]
class NotExistErr(AuthAPIError):
CODE = AuthCode.NOT_EXIST_ERR[0]
MESSAGE = AuthCode.NOT_EXIST_ERR[1]
class NotObjectClassSameWithMeta(AuthAPIError):
CODE = AuthCode.NOT_OBJECT_CLASS_SAME_WITH_META[0]
MESSAGE = AuthCode.NOT_OBJECT_CLASS_SAME_WITH_META[1]
class ParameterErr(AuthAPIError):
CODE = AuthCode.PARAM_ERR[0]
MESSAGE = AuthCode.PARAM_ERR[1]
class RedisConnectError(AuthAPIError):
CODE = AuthCode.REDIS_CONNECT_ERR[0]
MESSAGE = AuthCode.REDIS_CONNECT_ERR[1]
class ObjectSerilizerNoPKErr(AuthAPIError):
CODE = AuthCode.OBJECT_SERIALIZER_NO_PK_ERR[0]
MESSAGE = AuthCode.OBJECT_SERIALIZER_NO_PK_ERR[1]
class UpdateRoleErr(AuthAPIError):
CODE = AuthCode.UPDATE_ROLE_ERR[0]
MESSAGE = AuthCode.UPDATE_ROLE_ERR[1]
class QueryTooManyResourceErr(AuthAPIError):
CODE = AuthCode.QUERY_TOO_MANY_RESOURCE_ERR[0]
MESSAGE = AuthCode.QUERY_TOO_MANY_RESOURCE_ERR[1]
class InvalidParentResourceTypeErr(AuthAPIError):
CODE = AuthCode.INVALID_PARENT_RESOURCE_TYPE_ERR[0]
MESSAGE = AuthCode.INVALID_PARENT_RESOURCE_TYPE_ERR[1]
class InvalidResourceAttrErr(AuthAPIError):
CODE = AuthCode.INVALID_RESOURCE_ATTR_ERR[0]
MESSAGE = AuthCode.INVALID_RESOURCE_ATTR_ERR[1]
class FilterNotToScopeErr(AuthAPIError):
CODE = AuthCode.FILTER_NOT_TO_SCOPE_ERR[0]
MESSAGE = AuthCode.FILTER_NOT_TO_SCOPE_ERR[1]
class BKIAMNotSupportAttrAutho(AuthAPIError):
CODE = AuthCode.BKIAM_NOT_SUPPOR_ATTR_AUTHO[0]
MESSAGE = AuthCode.BKIAM_NOT_SUPPOR_ATTR_AUTHO[1]
class BKIAMConfigureRestrictionErr(AuthAPIError):
CODE = AuthCode.BKIAM_CONFIGURE_RESTRICTION[0]
MESSAGE = AuthCode.BKIAM_CONFIGURE_RESTRICTION[1]
class BKIAMSyncAuthoErr(AuthAPIError):
CODE = AuthCode.BKIAM_SYNC_AUTHO_ERR[0]
MESSAGE = AuthCode.BKIAM_SYNC_AUTHO_ERR[1]
class BKIAMPolicesCountLimitErr(AuthAPIError):
CODE = AuthCode.BKIAM_POLICIES_COUNT_LIMIT_ERR[0]
MESSAGE = AuthCode.BKIAM_POLICIES_COUNT_LIMIT_ERR[1]
class CoreBaseModelNoPK(AuthAPIError):
CODE = AuthCode.CORE_BASE_MODEL_NO_PK[0]
MESSAGE = AuthCode.CORE_BASE_MODEL_NO_PK[1]
class CoreBaseModelInstanceNotExist(AuthAPIError):
CODE = AuthCode.CORE_BASE_MODEL_INSTANCE_NOT_EXIT[0]
MESSAGE = AuthCode.CORE_BASE_MODEL_INSTANCE_NOT_EXIT[1]
class TicketCreateError(AuthAPIError):
CODE = AuthCode.TICKET_CREATE_ERROR[0]
MESSAGE = AuthCode.TICKET_CREATE_ERROR[1]
class CallBackError(AuthAPIError):
CODE = AuthCode.CALL_BACK_ERROR[0]
MESSAGE = AuthCode.CALL_BACK_ERROR[1]
class ItsmCatalogsNotExist(AuthAPIError):
CODE = AuthCode.ITSM_CATALOGS_NOT_EXIST[0]
MESSAGE = AuthCode.ITSM_CATALOGS_NOT_EXIST[1]
| ]
class DataScopeValidErr(AuthAPIError):
CODE = AuthCode.DATA_SCOPE_VALID_ERR[0]
| identifier_body |
main.rs | // required for pest
#![recursion_limit="128"]
mod config;
mod keys;
mod metadata;
mod remote;
mod util;
mod history;
mod chunking;
extern crate ring;
extern crate untrusted;
#[macro_use]
extern crate pest;
#[macro_use]
extern crate clap;
extern crate url;
use url::Url;
use std::io::Write;
use std::error::Error;
use std::fs;
use std::path::{Path,PathBuf};
use metadata::MetaObject;
use history::Restorable;
macro_rules! err_write {
($s: tt) => {
writeln!(std::io::stderr(), $s).ok().unwrap_or(())};
($s: tt, $($e: expr),*) => {
writeln!(std::io::stderr(), $s, $($e,)*).ok().unwrap_or(())}
}
#[allow(dead_code)]
struct GlobalOptions {
data_dir: PathBuf,
keystore: keys::Keystore,
cfg: config::Config,
verbose: bool,
quiet: bool
}
fn fail_error<E: Error>(msg: &str, err: E) {
writeln!(std::io::stderr(), "bkp: {}: {}", msg, err).unwrap();
std::process::exit(1);
}
trait UnwrapOrFail<T> {
/// Unwrap the result or fail with the given error message
fn unwrap_or_fail(self, msg: &str) -> T;
}
impl<T, E: Error> UnwrapOrFail<T> for Result<T, E> {
fn unwrap_or_fail(self, msg: &str) -> T {
match self {
Err(e) => {
fail_error(msg, e);
unreachable!()
},
Ok(x) => x
}
}
}
fn connect_backend(name: String, opts: &GlobalOptions)
-> Result<Box<remote::Backend>, remote::BackendError> {
use remote::BackendError;
if let Some(t) = opts.cfg.find_target(&name) {
remote::connect_tgt(t, &opts.cfg.node_name, &opts.keystore)
} else if let Some(g) = opts.cfg.find_group(&name) {
// bind names to actual targets
let tgts = g.members.iter()
.map(|ref n| opts.cfg.find_target(&n)
.ok_or(BackendError::InvalidOption))
.collect::<Result<Vec<&config::BackupTarget>, BackendError>>()?;
// connect all of them
remote::connect_group(tgts, &opts.cfg.node_name, &opts.keystore)
} else {
Err(BackendError::InvalidOption)
}
}
fn do_dest(args: &clap::ArgMatches, opts: &mut GlobalOptions) {
match args.subcommand() {
("add", Some(m)) => { // add a destination
let name = m.value_of("name").unwrap();
let url = m.value_of("url").unwrap();
let user = m.value_of("user");
let password = m.value_of("password");
// make sure the specified destination doesn't already exist
if opts.cfg.targets.iter().any(|t| {t.name == name}) {
err_write!("bkp: Destination '{}' already exists", name);
std::process::exit(1);
}
// parse the target URL
let url = Url::parse(&url)
.unwrap_or_fail("Cannot parse given URL");
// build the new target
let tgt = config::BackupTarget {
name: name.to_owned(),
url: url,
user: user.map(String::from),
password: password.map(String::from),
key_file: None,
options: config::TargetOptions {
reliable: true,
upload_cost: 1,
download_cost: 1
}
};
opts.cfg.targets.push(tgt);
opts.cfg.save().unwrap_or_fail("Failed to save config file");
},
(s, _) if (s == "list") || s.is_empty() => { // list destinations
let max_left_col = opts.cfg.targets.iter()
.map(|ref x| x.name.len())
.max().unwrap_or(0);
for t in opts.cfg.targets.iter() {
println!("{1:0$} {2}", max_left_col, t.name, t.url.as_str());
}
},
("remove", Some(m)) => { // remove destinations
unimplemented!()
},
("test", Some(m)) => { // test destination connectivity
let mut has_errs = false;
let max_col = m.values_of("name").unwrap()
.map(|ref x| x.len()).max().unwrap_or(0);
for name in m.values_of("name").unwrap() {
let tgt = connect_backend(name.to_owned(), &opts);
match tgt {
Ok(_) => println!("{1:0$}: successful", max_col, name),
Err(e) => {
println!("{1:0$}: {2}", max_col, name, e);
has_errs = true;
}
}
}
if has_errs {
std::process::exit(1);
}
},
(_, _) => panic!("No subcommand handler found")
}
}
fn do_test(args: &clap::ArgMatches, opts: &GlobalOptions) {
let profile = match args.value_of("profile").unwrap() {
"quick" => history::IntegrityTestMode::Quick,
"normal" => history::IntegrityTestMode::Normal,
"slow" => history::IntegrityTestMode::Slow,
"exhaustive" => history::IntegrityTestMode::Exhaustive,
_ => panic!("unexpected test mode string")
};
let names = opts.cfg.targets.iter().map(|x| {x.name.clone()})
.chain(opts.cfg.target_groups.iter().map(|x| {x.name.clone()}));
for t in names {
let b = connect_backend(t.clone(), opts);
if let Err(e) = b {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
}
// construct a history object
let mut b = b.unwrap();
let hist = history::History::new(&mut b);
if let Err(e) = hist {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
}
// run the check
match hist.unwrap().check(profile) {
Err(e) => {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
},
Ok(true) => println!("{}: okay", t),
Ok(false) => println!("{}: failed", t),
}
}
}
fn | (args: &clap::ArgMatches, opts: &GlobalOptions) {
unimplemented!()
}
fn do_clean(args: &clap::ArgMatches, opts: &GlobalOptions) {
unimplemented!()
}
fn do_snap(args: &clap::ArgMatches, opts: &GlobalOptions) {
let remote = args.value_of("remote").unwrap().to_owned();
let snap_paths: Vec<&str> = args.values_of("local").unwrap().collect();
let mut remote = connect_backend(remote, opts)
.unwrap_or_fail("backend connection failed");
// construct a history object
let mut history = history::History::new(&mut remote)
.unwrap_or_fail("failed to configure history layer");
// update paths
let new_tree = history.update_paths(snap_paths)
.unwrap_or_fail("failed to write modified trees");
// build a new snapshot
let snap = history.new_snapshot(new_tree)
.unwrap_or_fail("failed to create snapshot");
println!("snapshot created.");
}
fn do_restore(args: &clap::ArgMatches, opts: &GlobalOptions) {
let remote = args.value_of("remote").unwrap().to_owned();
// TODO: avoid specifying remote by searching for all remotes with a file
let objects: Vec<&Path> = args.values_of("local").unwrap()
.map(Path::new).collect();
let mut remote = connect_backend(remote, opts)
.unwrap_or_fail("backend connection failed");
let mut history = history::History::new(&mut remote)
.unwrap_or_fail("failed to configure history layer");
// TODO: figure out the target time, if any
// find the requested snapshot
// TODO: add command for recovering backups with broken head snapshot
let mut snapshot = history.get_snapshot()
.unwrap_or_fail("failed to read root snapshot");
if snapshot.is_none() {
eprintln!("bkp: cannot restore from empty target");
std::process::exit(1);
}
let snapshot = loop {
match snapshot {
None => {
eprintln!("bkp: no matching snapshot");
// TODO: show most recent one?
std::process::exit(1);
},
Some(snap) => {
// TODO: Add target time check here
if true {
break snap;
}
snapshot = snap.parent()
.unwrap_or_fail("failed to read snapshot");
}
}
};
// retrieve the objects we're interested in
let objects: history::Result<Vec<_>> = objects.into_iter()
.map(|obj| snapshot.get(&obj).map(|r| (obj, r)))
.collect();
let objects = objects.unwrap_or_fail("cannot read stored objects");
// warn about missing files, if any
if objects.iter().any(|x| x.1.is_none()) {
println!("The following paths could not be found:");
for p in objects.iter().filter(|x| x.1.is_none()) {
println!("\t{}", p.0.to_str().unwrap_or("<unprintable path>"));
}
println!("");
use std::ascii::AsciiExt;
let abort = loop {
print!("Do you want to continue restoring? (y/n) ");
std::io::stdout().flush().unwrap();
let mut response = String::new();
std::io::stdin().read_line(&mut response).unwrap();
match response.chars().next().map(|x| x.to_ascii_lowercase()) {
Some('y') => break false, // no abort
Some('n') => break true, // abort
_ => {}, // ask again
}
};
if abort {
println!("aborted");
return;
}
}
let objects: Vec<_> = objects.into_iter()
.filter_map(|(p,o)| o.map(|v| (p, v)))
.collect();
// actually reconstruct them
let base_path = Path::new(args.value_of("into").unwrap_or("/"));
let options = history::RestoreOptions::new()
.overwrite(args.is_present("overwrite"))
.ignore_permissions(args.is_present("no_perms"));
for (path, obj) in objects {
match obj.restore(&base_path, &options) {
Ok(()) => {},
Err(history::Error::InvalidArgument) => {
eprintln!("bkp: possible integrity violation found!");
eprintln!(" invalid object type at path: {}",
path.to_str().unwrap_or("<unprintable>"));
},
Err(e) => fail_error("cannot restore object", e)
}
}
}
fn load_config(pth: &Path) -> config::Config {
let cfg = config::Config::load(&pth);
if let Err(e) = cfg {
if let config::ConfigErr::IOError(ref err) = e {
if err.kind() == std::io::ErrorKind::NotFound {
err_write!("Creating new configuration file");
// try to create a new config
let cfg = config::Config::default();
cfg.save().ok().unwrap_or(());
return cfg
}
}
let errstr = match e {
config::ConfigErr::ParseError(x) => x,
config::ConfigErr::IOError(x) => String::from(x.description())
};
writeln!(std::io::stderr(),
"bkp: Cannot load config file: {}", errstr).unwrap();
std::process::exit(1);
}
return cfg.unwrap();
}
fn main() {
let opt_matches = clap_app!(bkp =>
(version: "0.1")
(author: "Noah Zentzis <[email protected]>")
(about: "Automated system backup utility")
(@arg CONFIG: -c --config +takes_value "Specifies a config file to use")
(@arg DATADIR: -D --data-dir +takes_value "Specify the local data path")
(@arg BACKEND: -t --target +takes_value
"Override the default destination")
(@arg VERBOSE: -v --verbose "Enable verbose terminal output")
(@arg QUIET: -q --quiet "Silence non-error terminal output")
(@subcommand dest =>
(about: "Query and modify available backup destinations")
(@subcommand add =>
(about: "Create a new destination")
(@arg name: +required "The name of the new destination")
(@arg url: +required {|s| {Url::parse(&s).map(|_| ())
.map_err(|_| String::from("Not a valid URL"))}}
"The new destination's URL" )
(@arg user: -u --user +takes_value "Set the associated username")
(@arg password: -p --password +takes_value
"Set the associated password"))
(@subcommand list =>
(about: "List the available destinations")
(@arg no_groups: -n --("no-groups")
"Don't show grouped destinations"))
(@subcommand remove =>
(about: "Remove an existing destination")
(@arg name: +required "The destination name to remove")
(@arg scrub: -S --scrub "Remove existing backups from the target"))
(@subcommand test =>
(about: "Test connectivity to a destination")
(@arg name: +required * "The destination to test")))
(@subcommand test =>
(about: "Test integrity of existing backups")
(@arg profile: +takes_value
possible_values(&["quick", "normal", "slow", "exhaustive"])
default_value("normal")
"The test profile to run")
(@arg all: -a --all
"Test backups from all machines rather than just this one"))
(@subcommand stat =>
(about: "Show backup statistics")
(@arg dest: +takes_value ...
"Only show data about the given destinations")
(@arg remote: -r --remote
"Query remote servers, bypassing local caches"))
(@subcommand clean =>
(about: "Remove backup data matching specific criteria. \
All given predicates must match in order for data to be removed.")
(@arg dest: +takes_value ...
"Only remove data from the given destinations")
(@arg dry_run: -n --("dry-run")
"Don't remove anything, just show what would be done")
(@group predicates =>
(@attributes +multiple +required)
(@arg snap_type: -t --type +takes_value
possible_values(&["diff", "full"])
"Match data in snapshots with type")
(@arg older_than: -o --("older-than") +takes_value
"Match data older than a certain age")
(@arg newer_than: -N --("newer-than") +takes_value
"Match data newer than a certain age")
(@arg exists: -e --exists +takes_value
possible_values(&["yes", "no"])
"Match data based on whether it exists on the host")))
(@subcommand snap =>
(about: "Take a snapshot of local files")
(@arg remote: +takes_value "Remote to store data in")
(@arg local: +takes_value ... "Files or directories to snapshot")
(@arg no_trust_mtime: -T --("no-trust-mtime")
"Use content hashes to check for file changes rather than FS's mtime"))
(@subcommand restore =>
(about: "Restore local files from backup")
(@arg remote: +required "Remote to restore from")
(@arg local: ... min_values(1) "Files or directories to restore")
(@arg as_of: -t --time +takes_value
"Restore to most recent snapshot before given date/time")
(@arg overwrite: -o --overwrite "Overwrite existing local files")
(@arg from: -f --from +takes_value "Restore data from another machine")
(@arg no_perms: -p --("no-perms")
"Don't restore filesystem permissions")
(@arg no_attrs: -a --("no-attrs") "Don't restore file metadata")
(@arg into: -i --into conflicts_with[overwrite] +takes_value
"Restore to a given path")
)
).get_matches();
// load a config file
let config_path = opt_matches
.value_of("CONFIG")
.map(Path::new)
.map(Path::to_path_buf)
.unwrap_or(std::env::home_dir().unwrap().join(".bkprc"));
let cfg = load_config(&config_path);
// create the data dir if needed
let data_dir = opt_matches.value_of("DATADIR").map(Path::new)
.map(Path::to_path_buf)
.unwrap_or(std::env::home_dir().unwrap().join(".bkp"));
if let Err(e) = fs::metadata(&data_dir) {
if e.kind() == std::io::ErrorKind::NotFound {
if fs::create_dir(&data_dir).is_err() {
writeln!(std::io::stderr(), "bkp: Cannot create directory: {}",
data_dir.display()).unwrap();
std::process::exit(1);
}
} else {
writeln!(std::io::stderr(), "bkp: Cannot access directory: {}",
data_dir.display()).unwrap();
std::process::exit(1);
}
}
// open the key store
let kspath = data_dir.join("keystore");
let ks = match fs::metadata(&kspath) {
Ok(_) => match keys::Keystore::open(&kspath) {
Ok(k) => k,
Err(e) => {
err_write!("bkp: Cannot open keystore: {}", e.description());
std::process::exit(1);
}
},
Err(e) => if e.kind() == std::io::ErrorKind::NotFound {
match keys::Keystore::create(&kspath) {
Ok(k) => k,
Err(e) => {
err_write!("bkp: Cannot create keystore: {}", e.description());
std::process::exit(1);
}
}
} else {
writeln!(std::io::stderr(), "bkp: Cannot access keystore: {}",
kspath.display()).unwrap();
std::process::exit(1);
}
};
// parse global flags
let mut global_flags = GlobalOptions {
cfg: cfg,
verbose: opt_matches.is_present("VERBOSE"),
quiet: opt_matches.is_present("QUIET"),
data_dir: data_dir,
keystore: ks
};
// figure out what to do
match opt_matches.subcommand() {
("", _) => { println!("bkp: No subcommand specified"); },
("dest", Some(m)) => do_dest(m, &mut global_flags),
("test", Some(m)) => do_test(m, &global_flags),
("stat", Some(m)) => do_stat(m, &global_flags),
("clean", Some(m)) => do_clean(m, &global_flags),
("snap", Some(m)) => do_snap(m, &global_flags),
("restore", Some(m)) => do_restore(m, &global_flags),
(_, _) => panic!("No subcommand handler found!")
}
}
| do_stat | identifier_name |
main.rs | // required for pest
#![recursion_limit="128"]
mod config;
mod keys;
mod metadata;
mod remote;
mod util;
mod history;
mod chunking;
extern crate ring;
extern crate untrusted;
#[macro_use]
extern crate pest;
#[macro_use]
extern crate clap;
extern crate url;
use url::Url;
use std::io::Write;
use std::error::Error;
use std::fs;
use std::path::{Path,PathBuf};
use metadata::MetaObject;
use history::Restorable;
macro_rules! err_write {
($s: tt) => {
writeln!(std::io::stderr(), $s).ok().unwrap_or(())};
($s: tt, $($e: expr),*) => {
writeln!(std::io::stderr(), $s, $($e,)*).ok().unwrap_or(())}
}
#[allow(dead_code)]
struct GlobalOptions {
data_dir: PathBuf,
keystore: keys::Keystore,
cfg: config::Config,
verbose: bool,
quiet: bool
}
fn fail_error<E: Error>(msg: &str, err: E) {
writeln!(std::io::stderr(), "bkp: {}: {}", msg, err).unwrap();
std::process::exit(1);
}
trait UnwrapOrFail<T> {
/// Unwrap the result or fail with the given error message
fn unwrap_or_fail(self, msg: &str) -> T;
}
impl<T, E: Error> UnwrapOrFail<T> for Result<T, E> {
fn unwrap_or_fail(self, msg: &str) -> T {
match self {
Err(e) => {
fail_error(msg, e);
unreachable!()
},
Ok(x) => x
}
}
}
fn connect_backend(name: String, opts: &GlobalOptions)
-> Result<Box<remote::Backend>, remote::BackendError> {
use remote::BackendError;
if let Some(t) = opts.cfg.find_target(&name) {
remote::connect_tgt(t, &opts.cfg.node_name, &opts.keystore)
} else if let Some(g) = opts.cfg.find_group(&name) {
// bind names to actual targets
let tgts = g.members.iter()
.map(|ref n| opts.cfg.find_target(&n)
.ok_or(BackendError::InvalidOption))
.collect::<Result<Vec<&config::BackupTarget>, BackendError>>()?;
// connect all of them
remote::connect_group(tgts, &opts.cfg.node_name, &opts.keystore)
} else {
Err(BackendError::InvalidOption)
}
}
fn do_dest(args: &clap::ArgMatches, opts: &mut GlobalOptions) {
match args.subcommand() {
("add", Some(m)) => { // add a destination
let name = m.value_of("name").unwrap();
let url = m.value_of("url").unwrap();
let user = m.value_of("user");
let password = m.value_of("password");
// make sure the specified destination doesn't already exist
if opts.cfg.targets.iter().any(|t| {t.name == name}) {
err_write!("bkp: Destination '{}' already exists", name);
std::process::exit(1);
}
// parse the target URL
let url = Url::parse(&url)
.unwrap_or_fail("Cannot parse given URL");
// build the new target
let tgt = config::BackupTarget {
name: name.to_owned(),
url: url,
user: user.map(String::from),
password: password.map(String::from),
key_file: None,
options: config::TargetOptions {
reliable: true,
upload_cost: 1,
download_cost: 1
}
};
opts.cfg.targets.push(tgt);
opts.cfg.save().unwrap_or_fail("Failed to save config file");
},
(s, _) if (s == "list") || s.is_empty() => { // list destinations
let max_left_col = opts.cfg.targets.iter()
.map(|ref x| x.name.len())
.max().unwrap_or(0);
for t in opts.cfg.targets.iter() {
println!("{1:0$} {2}", max_left_col, t.name, t.url.as_str());
}
},
("remove", Some(m)) => { // remove destinations
unimplemented!()
},
("test", Some(m)) => { // test destination connectivity
let mut has_errs = false;
let max_col = m.values_of("name").unwrap()
.map(|ref x| x.len()).max().unwrap_or(0);
for name in m.values_of("name").unwrap() {
let tgt = connect_backend(name.to_owned(), &opts);
match tgt {
Ok(_) => println!("{1:0$}: successful", max_col, name),
Err(e) => {
println!("{1:0$}: {2}", max_col, name, e);
has_errs = true;
}
}
}
if has_errs {
std::process::exit(1);
}
},
(_, _) => panic!("No subcommand handler found")
}
}
fn do_test(args: &clap::ArgMatches, opts: &GlobalOptions) {
let profile = match args.value_of("profile").unwrap() {
"quick" => history::IntegrityTestMode::Quick,
"normal" => history::IntegrityTestMode::Normal,
"slow" => history::IntegrityTestMode::Slow,
"exhaustive" => history::IntegrityTestMode::Exhaustive,
_ => panic!("unexpected test mode string")
};
let names = opts.cfg.targets.iter().map(|x| {x.name.clone()})
.chain(opts.cfg.target_groups.iter().map(|x| {x.name.clone()}));
for t in names {
let b = connect_backend(t.clone(), opts);
if let Err(e) = b {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
}
// construct a history object
let mut b = b.unwrap();
let hist = history::History::new(&mut b);
if let Err(e) = hist {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
}
// run the check
match hist.unwrap().check(profile) {
Err(e) => {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
},
Ok(true) => println!("{}: okay", t),
Ok(false) => println!("{}: failed", t),
}
}
}
fn do_stat(args: &clap::ArgMatches, opts: &GlobalOptions) {
unimplemented!()
}
fn do_clean(args: &clap::ArgMatches, opts: &GlobalOptions) {
unimplemented!()
}
fn do_snap(args: &clap::ArgMatches, opts: &GlobalOptions) {
let remote = args.value_of("remote").unwrap().to_owned();
let snap_paths: Vec<&str> = args.values_of("local").unwrap().collect();
let mut remote = connect_backend(remote, opts)
.unwrap_or_fail("backend connection failed");
// construct a history object
let mut history = history::History::new(&mut remote)
.unwrap_or_fail("failed to configure history layer");
// update paths
let new_tree = history.update_paths(snap_paths)
.unwrap_or_fail("failed to write modified trees");
// build a new snapshot
let snap = history.new_snapshot(new_tree)
.unwrap_or_fail("failed to create snapshot");
println!("snapshot created.");
}
fn do_restore(args: &clap::ArgMatches, opts: &GlobalOptions) {
let remote = args.value_of("remote").unwrap().to_owned();
// TODO: avoid specifying remote by searching for all remotes with a file
let objects: Vec<&Path> = args.values_of("local").unwrap()
.map(Path::new).collect();
let mut remote = connect_backend(remote, opts)
.unwrap_or_fail("backend connection failed");
let mut history = history::History::new(&mut remote)
.unwrap_or_fail("failed to configure history layer");
// TODO: figure out the target time, if any
// find the requested snapshot
// TODO: add command for recovering backups with broken head snapshot
let mut snapshot = history.get_snapshot()
.unwrap_or_fail("failed to read root snapshot");
if snapshot.is_none() {
eprintln!("bkp: cannot restore from empty target");
std::process::exit(1);
}
let snapshot = loop {
match snapshot {
None => {
eprintln!("bkp: no matching snapshot");
// TODO: show most recent one?
std::process::exit(1);
},
Some(snap) => {
// TODO: Add target time check here
if true {
break snap;
}
snapshot = snap.parent()
.unwrap_or_fail("failed to read snapshot");
}
}
};
// retrieve the objects we're interested in
let objects: history::Result<Vec<_>> = objects.into_iter()
.map(|obj| snapshot.get(&obj).map(|r| (obj, r)))
.collect();
let objects = objects.unwrap_or_fail("cannot read stored objects");
// warn about missing files, if any
if objects.iter().any(|x| x.1.is_none()) {
println!("The following paths could not be found:");
for p in objects.iter().filter(|x| x.1.is_none()) {
println!("\t{}", p.0.to_str().unwrap_or("<unprintable path>"));
}
println!("");
use std::ascii::AsciiExt;
let abort = loop {
print!("Do you want to continue restoring? (y/n) ");
std::io::stdout().flush().unwrap();
let mut response = String::new();
std::io::stdin().read_line(&mut response).unwrap();
match response.chars().next().map(|x| x.to_ascii_lowercase()) {
Some('y') => break false, // no abort
Some('n') => break true, // abort
_ => {}, // ask again
}
};
if abort {
println!("aborted");
return;
}
}
let objects: Vec<_> = objects.into_iter()
.filter_map(|(p,o)| o.map(|v| (p, v)))
.collect();
// actually reconstruct them
let base_path = Path::new(args.value_of("into").unwrap_or("/"));
let options = history::RestoreOptions::new()
.overwrite(args.is_present("overwrite"))
.ignore_permissions(args.is_present("no_perms"));
for (path, obj) in objects {
match obj.restore(&base_path, &options) {
Ok(()) => {},
Err(history::Error::InvalidArgument) => {
eprintln!("bkp: possible integrity violation found!");
eprintln!(" invalid object type at path: {}",
path.to_str().unwrap_or("<unprintable>"));
},
Err(e) => fail_error("cannot restore object", e)
}
}
}
fn load_config(pth: &Path) -> config::Config {
let cfg = config::Config::load(&pth);
if let Err(e) = cfg {
if let config::ConfigErr::IOError(ref err) = e {
if err.kind() == std::io::ErrorKind::NotFound {
err_write!("Creating new configuration file");
// try to create a new config
let cfg = config::Config::default();
cfg.save().ok().unwrap_or(());
return cfg
}
}
let errstr = match e {
config::ConfigErr::ParseError(x) => x,
config::ConfigErr::IOError(x) => String::from(x.description())
};
writeln!(std::io::stderr(),
"bkp: Cannot load config file: {}", errstr).unwrap();
std::process::exit(1);
}
return cfg.unwrap();
}
fn main() {
let opt_matches = clap_app!(bkp =>
(version: "0.1")
(author: "Noah Zentzis <[email protected]>")
(about: "Automated system backup utility")
(@arg CONFIG: -c --config +takes_value "Specifies a config file to use")
(@arg DATADIR: -D --data-dir +takes_value "Specify the local data path")
(@arg BACKEND: -t --target +takes_value
"Override the default destination")
(@arg VERBOSE: -v --verbose "Enable verbose terminal output")
(@arg QUIET: -q --quiet "Silence non-error terminal output")
(@subcommand dest =>
(about: "Query and modify available backup destinations")
(@subcommand add =>
(about: "Create a new destination")
(@arg name: +required "The name of the new destination")
(@arg url: +required {|s| {Url::parse(&s).map(|_| ())
.map_err(|_| String::from("Not a valid URL"))}}
"The new destination's URL" )
(@arg user: -u --user +takes_value "Set the associated username")
(@arg password: -p --password +takes_value
"Set the associated password"))
(@subcommand list =>
(about: "List the available destinations")
(@arg no_groups: -n --("no-groups")
"Don't show grouped destinations"))
(@subcommand remove =>
(about: "Remove an existing destination")
(@arg name: +required "The destination name to remove")
(@arg scrub: -S --scrub "Remove existing backups from the target"))
(@subcommand test =>
(about: "Test connectivity to a destination")
(@arg name: +required * "The destination to test")))
(@subcommand test =>
(about: "Test integrity of existing backups")
(@arg profile: +takes_value
possible_values(&["quick", "normal", "slow", "exhaustive"])
default_value("normal")
"The test profile to run")
(@arg all: -a --all
"Test backups from all machines rather than just this one"))
(@subcommand stat =>
(about: "Show backup statistics")
(@arg dest: +takes_value ...
"Only show data about the given destinations")
(@arg remote: -r --remote
"Query remote servers, bypassing local caches"))
(@subcommand clean =>
(about: "Remove backup data matching specific criteria. \
All given predicates must match in order for data to be removed.")
(@arg dest: +takes_value ...
"Only remove data from the given destinations")
(@arg dry_run: -n --("dry-run")
"Don't remove anything, just show what would be done")
(@group predicates =>
(@attributes +multiple +required)
(@arg snap_type: -t --type +takes_value
possible_values(&["diff", "full"])
"Match data in snapshots with type")
(@arg older_than: -o --("older-than") +takes_value
"Match data older than a certain age")
(@arg newer_than: -N --("newer-than") +takes_value
"Match data newer than a certain age")
(@arg exists: -e --exists +takes_value
possible_values(&["yes", "no"])
"Match data based on whether it exists on the host")))
(@subcommand snap =>
(about: "Take a snapshot of local files")
(@arg remote: +takes_value "Remote to store data in")
(@arg local: +takes_value ... "Files or directories to snapshot")
(@arg no_trust_mtime: -T --("no-trust-mtime")
"Use content hashes to check for file changes rather than FS's mtime"))
(@subcommand restore =>
(about: "Restore local files from backup")
(@arg remote: +required "Remote to restore from")
(@arg local: ... min_values(1) "Files or directories to restore")
(@arg as_of: -t --time +takes_value
"Restore to most recent snapshot before given date/time")
(@arg overwrite: -o --overwrite "Overwrite existing local files")
(@arg from: -f --from +takes_value "Restore data from another machine")
(@arg no_perms: -p --("no-perms")
"Don't restore filesystem permissions")
(@arg no_attrs: -a --("no-attrs") "Don't restore file metadata")
(@arg into: -i --into conflicts_with[overwrite] +takes_value
"Restore to a given path")
)
).get_matches();
// load a config file
let config_path = opt_matches
.value_of("CONFIG")
.map(Path::new)
.map(Path::to_path_buf)
.unwrap_or(std::env::home_dir().unwrap().join(".bkprc"));
let cfg = load_config(&config_path);
// create the data dir if needed
let data_dir = opt_matches.value_of("DATADIR").map(Path::new)
.map(Path::to_path_buf)
.unwrap_or(std::env::home_dir().unwrap().join(".bkp"));
if let Err(e) = fs::metadata(&data_dir) {
if e.kind() == std::io::ErrorKind::NotFound {
if fs::create_dir(&data_dir).is_err() {
writeln!(std::io::stderr(), "bkp: Cannot create directory: {}",
data_dir.display()).unwrap();
std::process::exit(1);
}
} else {
writeln!(std::io::stderr(), "bkp: Cannot access directory: {}",
data_dir.display()).unwrap();
std::process::exit(1);
}
}
// open the key store
let kspath = data_dir.join("keystore");
let ks = match fs::metadata(&kspath) {
Ok(_) => match keys::Keystore::open(&kspath) {
Ok(k) => k,
Err(e) => {
err_write!("bkp: Cannot open keystore: {}", e.description());
std::process::exit(1);
}
},
Err(e) => if e.kind() == std::io::ErrorKind::NotFound {
match keys::Keystore::create(&kspath) {
Ok(k) => k,
Err(e) => {
err_write!("bkp: Cannot create keystore: {}", e.description());
std::process::exit(1);
}
}
} else {
writeln!(std::io::stderr(), "bkp: Cannot access keystore: {}",
kspath.display()).unwrap();
std::process::exit(1);
}
};
// parse global flags
let mut global_flags = GlobalOptions {
cfg: cfg,
verbose: opt_matches.is_present("VERBOSE"),
quiet: opt_matches.is_present("QUIET"),
data_dir: data_dir,
keystore: ks
};
// figure out what to do
match opt_matches.subcommand() {
("", _) => { println!("bkp: No subcommand specified"); },
("dest", Some(m)) => do_dest(m, &mut global_flags),
("test", Some(m)) => do_test(m, &global_flags),
("stat", Some(m)) => do_stat(m, &global_flags),
("clean", Some(m)) => do_clean(m, &global_flags),
("snap", Some(m)) => do_snap(m, &global_flags), | ("restore", Some(m)) => do_restore(m, &global_flags),
(_, _) => panic!("No subcommand handler found!")
}
} | random_line_split |
|
main.rs | // required for pest
#![recursion_limit="128"]
mod config;
mod keys;
mod metadata;
mod remote;
mod util;
mod history;
mod chunking;
extern crate ring;
extern crate untrusted;
#[macro_use]
extern crate pest;
#[macro_use]
extern crate clap;
extern crate url;
use url::Url;
use std::io::Write;
use std::error::Error;
use std::fs;
use std::path::{Path,PathBuf};
use metadata::MetaObject;
use history::Restorable;
macro_rules! err_write {
($s: tt) => {
writeln!(std::io::stderr(), $s).ok().unwrap_or(())};
($s: tt, $($e: expr),*) => {
writeln!(std::io::stderr(), $s, $($e,)*).ok().unwrap_or(())}
}
#[allow(dead_code)]
struct GlobalOptions {
data_dir: PathBuf,
keystore: keys::Keystore,
cfg: config::Config,
verbose: bool,
quiet: bool
}
fn fail_error<E: Error>(msg: &str, err: E) {
writeln!(std::io::stderr(), "bkp: {}: {}", msg, err).unwrap();
std::process::exit(1);
}
trait UnwrapOrFail<T> {
/// Unwrap the result or fail with the given error message
fn unwrap_or_fail(self, msg: &str) -> T;
}
impl<T, E: Error> UnwrapOrFail<T> for Result<T, E> {
fn unwrap_or_fail(self, msg: &str) -> T {
match self {
Err(e) => {
fail_error(msg, e);
unreachable!()
},
Ok(x) => x
}
}
}
fn connect_backend(name: String, opts: &GlobalOptions)
-> Result<Box<remote::Backend>, remote::BackendError> {
use remote::BackendError;
if let Some(t) = opts.cfg.find_target(&name) {
remote::connect_tgt(t, &opts.cfg.node_name, &opts.keystore)
} else if let Some(g) = opts.cfg.find_group(&name) {
// bind names to actual targets
let tgts = g.members.iter()
.map(|ref n| opts.cfg.find_target(&n)
.ok_or(BackendError::InvalidOption))
.collect::<Result<Vec<&config::BackupTarget>, BackendError>>()?;
// connect all of them
remote::connect_group(tgts, &opts.cfg.node_name, &opts.keystore)
} else {
Err(BackendError::InvalidOption)
}
}
fn do_dest(args: &clap::ArgMatches, opts: &mut GlobalOptions) {
match args.subcommand() {
("add", Some(m)) => { // add a destination
let name = m.value_of("name").unwrap();
let url = m.value_of("url").unwrap();
let user = m.value_of("user");
let password = m.value_of("password");
// make sure the specified destination doesn't already exist
if opts.cfg.targets.iter().any(|t| {t.name == name}) {
err_write!("bkp: Destination '{}' already exists", name);
std::process::exit(1);
}
// parse the target URL
let url = Url::parse(&url)
.unwrap_or_fail("Cannot parse given URL");
// build the new target
let tgt = config::BackupTarget {
name: name.to_owned(),
url: url,
user: user.map(String::from),
password: password.map(String::from),
key_file: None,
options: config::TargetOptions {
reliable: true,
upload_cost: 1,
download_cost: 1
}
};
opts.cfg.targets.push(tgt);
opts.cfg.save().unwrap_or_fail("Failed to save config file");
},
(s, _) if (s == "list") || s.is_empty() => { // list destinations
let max_left_col = opts.cfg.targets.iter()
.map(|ref x| x.name.len())
.max().unwrap_or(0);
for t in opts.cfg.targets.iter() {
println!("{1:0$} {2}", max_left_col, t.name, t.url.as_str());
}
},
("remove", Some(m)) => { // remove destinations
unimplemented!()
},
("test", Some(m)) => { // test destination connectivity
let mut has_errs = false;
let max_col = m.values_of("name").unwrap()
.map(|ref x| x.len()).max().unwrap_or(0);
for name in m.values_of("name").unwrap() {
let tgt = connect_backend(name.to_owned(), &opts);
match tgt {
Ok(_) => println!("{1:0$}: successful", max_col, name),
Err(e) => {
println!("{1:0$}: {2}", max_col, name, e);
has_errs = true;
}
}
}
if has_errs {
std::process::exit(1);
}
},
(_, _) => panic!("No subcommand handler found")
}
}
fn do_test(args: &clap::ArgMatches, opts: &GlobalOptions) {
let profile = match args.value_of("profile").unwrap() {
"quick" => history::IntegrityTestMode::Quick,
"normal" => history::IntegrityTestMode::Normal,
"slow" => history::IntegrityTestMode::Slow,
"exhaustive" => history::IntegrityTestMode::Exhaustive,
_ => panic!("unexpected test mode string")
};
let names = opts.cfg.targets.iter().map(|x| {x.name.clone()})
.chain(opts.cfg.target_groups.iter().map(|x| {x.name.clone()}));
for t in names {
let b = connect_backend(t.clone(), opts);
if let Err(e) = b {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
}
// construct a history object
let mut b = b.unwrap();
let hist = history::History::new(&mut b);
if let Err(e) = hist {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
}
// run the check
match hist.unwrap().check(profile) {
Err(e) => {
println!("bkp: skipping destination '{}': {}", t, e);
continue;
},
Ok(true) => println!("{}: okay", t),
Ok(false) => println!("{}: failed", t),
}
}
}
fn do_stat(args: &clap::ArgMatches, opts: &GlobalOptions) {
unimplemented!()
}
fn do_clean(args: &clap::ArgMatches, opts: &GlobalOptions) {
unimplemented!()
}
fn do_snap(args: &clap::ArgMatches, opts: &GlobalOptions) {
let remote = args.value_of("remote").unwrap().to_owned();
let snap_paths: Vec<&str> = args.values_of("local").unwrap().collect();
let mut remote = connect_backend(remote, opts)
.unwrap_or_fail("backend connection failed");
// construct a history object
let mut history = history::History::new(&mut remote)
.unwrap_or_fail("failed to configure history layer");
// update paths
let new_tree = history.update_paths(snap_paths)
.unwrap_or_fail("failed to write modified trees");
// build a new snapshot
let snap = history.new_snapshot(new_tree)
.unwrap_or_fail("failed to create snapshot");
println!("snapshot created.");
}
fn do_restore(args: &clap::ArgMatches, opts: &GlobalOptions) {
let remote = args.value_of("remote").unwrap().to_owned();
// TODO: avoid specifying remote by searching for all remotes with a file
let objects: Vec<&Path> = args.values_of("local").unwrap()
.map(Path::new).collect();
let mut remote = connect_backend(remote, opts)
.unwrap_or_fail("backend connection failed");
let mut history = history::History::new(&mut remote)
.unwrap_or_fail("failed to configure history layer");
// TODO: figure out the target time, if any
// find the requested snapshot
// TODO: add command for recovering backups with broken head snapshot
let mut snapshot = history.get_snapshot()
.unwrap_or_fail("failed to read root snapshot");
if snapshot.is_none() {
eprintln!("bkp: cannot restore from empty target");
std::process::exit(1);
}
let snapshot = loop {
match snapshot {
None => {
eprintln!("bkp: no matching snapshot");
// TODO: show most recent one?
std::process::exit(1);
},
Some(snap) => {
// TODO: Add target time check here
if true {
break snap;
}
snapshot = snap.parent()
.unwrap_or_fail("failed to read snapshot");
}
}
};
// retrieve the objects we're interested in
let objects: history::Result<Vec<_>> = objects.into_iter()
.map(|obj| snapshot.get(&obj).map(|r| (obj, r)))
.collect();
let objects = objects.unwrap_or_fail("cannot read stored objects");
// warn about missing files, if any
if objects.iter().any(|x| x.1.is_none()) {
println!("The following paths could not be found:");
for p in objects.iter().filter(|x| x.1.is_none()) {
println!("\t{}", p.0.to_str().unwrap_or("<unprintable path>"));
}
println!("");
use std::ascii::AsciiExt;
let abort = loop {
print!("Do you want to continue restoring? (y/n) ");
std::io::stdout().flush().unwrap();
let mut response = String::new();
std::io::stdin().read_line(&mut response).unwrap();
match response.chars().next().map(|x| x.to_ascii_lowercase()) {
Some('y') => break false, // no abort
Some('n') => break true, // abort
_ => {}, // ask again
}
};
if abort {
println!("aborted");
return;
}
}
let objects: Vec<_> = objects.into_iter()
.filter_map(|(p,o)| o.map(|v| (p, v)))
.collect();
// actually reconstruct them
let base_path = Path::new(args.value_of("into").unwrap_or("/"));
let options = history::RestoreOptions::new()
.overwrite(args.is_present("overwrite"))
.ignore_permissions(args.is_present("no_perms"));
for (path, obj) in objects {
match obj.restore(&base_path, &options) {
Ok(()) => {},
Err(history::Error::InvalidArgument) => {
eprintln!("bkp: possible integrity violation found!");
eprintln!(" invalid object type at path: {}",
path.to_str().unwrap_or("<unprintable>"));
},
Err(e) => fail_error("cannot restore object", e)
}
}
}
fn load_config(pth: &Path) -> config::Config {
let cfg = config::Config::load(&pth);
if let Err(e) = cfg {
if let config::ConfigErr::IOError(ref err) = e {
if err.kind() == std::io::ErrorKind::NotFound {
err_write!("Creating new configuration file");
// try to create a new config
let cfg = config::Config::default();
cfg.save().ok().unwrap_or(());
return cfg
}
}
let errstr = match e {
config::ConfigErr::ParseError(x) => x,
config::ConfigErr::IOError(x) => String::from(x.description())
};
writeln!(std::io::stderr(),
"bkp: Cannot load config file: {}", errstr).unwrap();
std::process::exit(1);
}
return cfg.unwrap();
}
fn main() | {
let opt_matches = clap_app!(bkp =>
(version: "0.1")
(author: "Noah Zentzis <[email protected]>")
(about: "Automated system backup utility")
(@arg CONFIG: -c --config +takes_value "Specifies a config file to use")
(@arg DATADIR: -D --data-dir +takes_value "Specify the local data path")
(@arg BACKEND: -t --target +takes_value
"Override the default destination")
(@arg VERBOSE: -v --verbose "Enable verbose terminal output")
(@arg QUIET: -q --quiet "Silence non-error terminal output")
(@subcommand dest =>
(about: "Query and modify available backup destinations")
(@subcommand add =>
(about: "Create a new destination")
(@arg name: +required "The name of the new destination")
(@arg url: +required {|s| {Url::parse(&s).map(|_| ())
.map_err(|_| String::from("Not a valid URL"))}}
"The new destination's URL" )
(@arg user: -u --user +takes_value "Set the associated username")
(@arg password: -p --password +takes_value
"Set the associated password"))
(@subcommand list =>
(about: "List the available destinations")
(@arg no_groups: -n --("no-groups")
"Don't show grouped destinations"))
(@subcommand remove =>
(about: "Remove an existing destination")
(@arg name: +required "The destination name to remove")
(@arg scrub: -S --scrub "Remove existing backups from the target"))
(@subcommand test =>
(about: "Test connectivity to a destination")
(@arg name: +required * "The destination to test")))
(@subcommand test =>
(about: "Test integrity of existing backups")
(@arg profile: +takes_value
possible_values(&["quick", "normal", "slow", "exhaustive"])
default_value("normal")
"The test profile to run")
(@arg all: -a --all
"Test backups from all machines rather than just this one"))
(@subcommand stat =>
(about: "Show backup statistics")
(@arg dest: +takes_value ...
"Only show data about the given destinations")
(@arg remote: -r --remote
"Query remote servers, bypassing local caches"))
(@subcommand clean =>
(about: "Remove backup data matching specific criteria. \
All given predicates must match in order for data to be removed.")
(@arg dest: +takes_value ...
"Only remove data from the given destinations")
(@arg dry_run: -n --("dry-run")
"Don't remove anything, just show what would be done")
(@group predicates =>
(@attributes +multiple +required)
(@arg snap_type: -t --type +takes_value
possible_values(&["diff", "full"])
"Match data in snapshots with type")
(@arg older_than: -o --("older-than") +takes_value
"Match data older than a certain age")
(@arg newer_than: -N --("newer-than") +takes_value
"Match data newer than a certain age")
(@arg exists: -e --exists +takes_value
possible_values(&["yes", "no"])
"Match data based on whether it exists on the host")))
(@subcommand snap =>
(about: "Take a snapshot of local files")
(@arg remote: +takes_value "Remote to store data in")
(@arg local: +takes_value ... "Files or directories to snapshot")
(@arg no_trust_mtime: -T --("no-trust-mtime")
"Use content hashes to check for file changes rather than FS's mtime"))
(@subcommand restore =>
(about: "Restore local files from backup")
(@arg remote: +required "Remote to restore from")
(@arg local: ... min_values(1) "Files or directories to restore")
(@arg as_of: -t --time +takes_value
"Restore to most recent snapshot before given date/time")
(@arg overwrite: -o --overwrite "Overwrite existing local files")
(@arg from: -f --from +takes_value "Restore data from another machine")
(@arg no_perms: -p --("no-perms")
"Don't restore filesystem permissions")
(@arg no_attrs: -a --("no-attrs") "Don't restore file metadata")
(@arg into: -i --into conflicts_with[overwrite] +takes_value
"Restore to a given path")
)
).get_matches();
// load a config file
let config_path = opt_matches
.value_of("CONFIG")
.map(Path::new)
.map(Path::to_path_buf)
.unwrap_or(std::env::home_dir().unwrap().join(".bkprc"));
let cfg = load_config(&config_path);
// create the data dir if needed
let data_dir = opt_matches.value_of("DATADIR").map(Path::new)
.map(Path::to_path_buf)
.unwrap_or(std::env::home_dir().unwrap().join(".bkp"));
if let Err(e) = fs::metadata(&data_dir) {
if e.kind() == std::io::ErrorKind::NotFound {
if fs::create_dir(&data_dir).is_err() {
writeln!(std::io::stderr(), "bkp: Cannot create directory: {}",
data_dir.display()).unwrap();
std::process::exit(1);
}
} else {
writeln!(std::io::stderr(), "bkp: Cannot access directory: {}",
data_dir.display()).unwrap();
std::process::exit(1);
}
}
// open the key store
let kspath = data_dir.join("keystore");
let ks = match fs::metadata(&kspath) {
Ok(_) => match keys::Keystore::open(&kspath) {
Ok(k) => k,
Err(e) => {
err_write!("bkp: Cannot open keystore: {}", e.description());
std::process::exit(1);
}
},
Err(e) => if e.kind() == std::io::ErrorKind::NotFound {
match keys::Keystore::create(&kspath) {
Ok(k) => k,
Err(e) => {
err_write!("bkp: Cannot create keystore: {}", e.description());
std::process::exit(1);
}
}
} else {
writeln!(std::io::stderr(), "bkp: Cannot access keystore: {}",
kspath.display()).unwrap();
std::process::exit(1);
}
};
// parse global flags
let mut global_flags = GlobalOptions {
cfg: cfg,
verbose: opt_matches.is_present("VERBOSE"),
quiet: opt_matches.is_present("QUIET"),
data_dir: data_dir,
keystore: ks
};
// figure out what to do
match opt_matches.subcommand() {
("", _) => { println!("bkp: No subcommand specified"); },
("dest", Some(m)) => do_dest(m, &mut global_flags),
("test", Some(m)) => do_test(m, &global_flags),
("stat", Some(m)) => do_stat(m, &global_flags),
("clean", Some(m)) => do_clean(m, &global_flags),
("snap", Some(m)) => do_snap(m, &global_flags),
("restore", Some(m)) => do_restore(m, &global_flags),
(_, _) => panic!("No subcommand handler found!")
}
} | identifier_body |
|
show_solution.py | #!/usr/bin/env python3
import sys, os
import random
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import shapely.geometry as geometry
from shapely.ops import cascaded_union, polygonize
import math
from matplotlib.pyplot import arrow
import dubins
this_script_path = os.path.dirname(__file__)
path_to_utils = os.path.join(this_script_path, "utils")
sys.path.append(path_to_utils)
import figure_utils
import orienteering_utils
from orienteering_utils import ProblemType
legend_font_size = 24
tick_font_size = 20
NUM_POINTS_TO_GEN = 16
SCATTER_SIZE = 80
FIG_HEIGHT = 7.5
SHOW_FIGURE = True
RESULT_FILE = "../sources/results/results.log"
RESULT_FILE = os.path.join(this_script_path, RESULT_FILE)
#use nice latex fonts if latex is installed
#figure_utils.configure_latex_fonts_latex()
data_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)
print("using the last results")
record = data_vns_sop[-1]
print("record", record)
problem_type = ProblemType.UNKNOWN
PROBLEM_FILE = record['PROBLEM_FILE']
PROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)
if "datasets/sop/" in PROBLEM_FILE:
print("showing SOP")
problem_type = ProblemType.SOP
SAVE_TO_FIGURE = "solution_sop.png"
elif "datasets/dop_sop_dataset/" in PROBLEM_FILE:
print("showing DOP")
problem_type = ProblemType.DOP
SAVE_TO_FIGURE = "solution_dop.png"
elif "datasets/opn_sop_dataset/" in PROBLEM_FILE:
print("showing OPN")
problem_type = ProblemType.OPN
SAVE_TO_FIGURE = "solution_opn.png"
else:
error("can not decide problem type based on problem file location")
problem_type = ProblemType.UNKNOWN
op = orienteering_utils.SetOrienteeringProblemDefinition()
op.load_problem_file(PROBLEM_FILE)
nodes = op.nodes
sets_prices = op.get_sets_prices()
sets = op.get_sets()
original_nodes = op.get_set_centers()
result_target_ids = record['RESULT_TARGET_IDS']
result_cluster_ids = record['RESULT_CLUSTER_IDS']
result_rewards = record['REWARDS']
print("problem loaded")
print("result_target_ids:", result_target_ids)
print("result_cluster_ids:", result_cluster_ids)
print("result_rewards", result_rewards)
print("sets_prices", sets_prices)
print("sets", sets)
print("nodes", nodes)
# for the DOP only
result_head_angs = []
sampling_heading = len(sets[0])
calc_reward = 0
for clust_idx in range(len(result_cluster_ids)):
clust = result_cluster_ids[clust_idx]
node = result_target_ids[clust_idx]
if problem_type == ProblemType.DOP:
node_inside_cluster = node - sets[clust][0]
# result_node_inside_cluster.append(node_inside_cluster)
head_ang = math.pi + (2 * math.pi * node_inside_cluster) / sampling_heading
result_head_angs.append(head_ang)
calc_reward += sets_prices[clust]
if node not in sets[clust]:
print("what the hell, it is not good")
print("calc_reward", calc_reward)
mycmap = plt.cm.get_cmap('RdYlBu_r')
maxx, maxy = -sys.float_info.max,-sys.float_info.max
minx, miny = sys.float_info.max,sys.float_info.max
circle_radiuses = np.ones([len(nodes), 1])
circle_radiuses1 = np.multiply(2.0, circle_radiuses)
nodes_w_rewards = np.zeros((len(nodes), 3))
if problem_type == ProblemType.DOP:
xses = [i[0] for i in original_nodes]
yses = [i[1] for i in original_nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(original_nodes), 3))
for nidx in range(len(original_nodes)):
nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]
nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]
nodes_w_rewards[nidx, 2] = sets_prices[nidx]
elif problem_type == ProblemType.OPN :
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
else:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
minrew = min(nodes_w_rewards[:, 2])
maxrew = max(nodes_w_rewards[:, 2])
cNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew))
mycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)
fig_width = FIG_HEIGHT*(maxx-minx)/(maxy-miny)
figsize = (fig_width*0.9,FIG_HEIGHT)
print(figsize)
fig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
circles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], circle_radiuses1, c=nodes_w_rewards[:, 2] , alpha=0.05, edgecolor='black', linewidth=0.9, linestyle=':')
sc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=nodes_w_rewards[:, 2], cmap=mycmap , alpha=1.0, s=1, facecolor='black', lw=0.5)
plt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)
# print(nodes_w_rewards[:, 2])
if problem_type == ProblemType.DOP:
for nidx1 in range(len(nodes_w_rewards)):
points = []
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for hind in range(sampling_heading):
head_ang = math.pi + (2 * math.pi * hind) / sampling_heading
arrow_len = 30
arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), arrow_len * math.sin(head_ang))
set_rew = nodes_w_rewards[nidx1, 2]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)
elif problem_type == ProblemType.OPN:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
|
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
else:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
for node_idx in range(1, len(result_target_ids)):
if problem_type == ProblemType.DOP:
step_size = 20
turning_radius = op.dubins_radius
node = result_cluster_ids[node_idx]
node_prew = result_cluster_ids[node_idx - 1]
q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1], result_head_angs[node_idx]]
q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][1], result_head_angs[node_idx - 1]]
path = dubins.shortest_path(q_start, q_end, turning_radius)
qs, _ = path.sample_many(step_size)
# length_dub += math.ceil(path.path_length())
xses = [item[0] for item in qs]
yses = [item[1] for item in qs]
print(node_prew, '->', node, ",", q_start, '->', q_end)
plt.plot(xses, yses, '-g', lw=1.6)
elif problem_type == ProblemType.OPN:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ",", node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)
else:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ",", node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)
ax = plt.gca()
ax.axis('equal')
figure_utils.no_axis(ax)
cbar_position = [0.20, 0.05, 0.6, 0.03]
cbar_ax = fig.add_axes(cbar_position)
cb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')
cb.ax.tick_params(labelsize=tick_font_size)
cb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)
# offset = 0.08
fig.subplots_adjust(left=-0.035, right=1.035 , top=1.07 , bottom=0.0)
plt.savefig(SAVE_TO_FIGURE, dpi=300)
if SHOW_FIGURE:
plt.show()
| node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2) | conditional_block |
show_solution.py | #!/usr/bin/env python3
import sys, os
import random
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import shapely.geometry as geometry
from shapely.ops import cascaded_union, polygonize
import math
from matplotlib.pyplot import arrow
import dubins
this_script_path = os.path.dirname(__file__)
path_to_utils = os.path.join(this_script_path, "utils")
sys.path.append(path_to_utils)
import figure_utils
import orienteering_utils
from orienteering_utils import ProblemType
legend_font_size = 24
tick_font_size = 20
NUM_POINTS_TO_GEN = 16
SCATTER_SIZE = 80
FIG_HEIGHT = 7.5
SHOW_FIGURE = True
RESULT_FILE = "../sources/results/results.log"
RESULT_FILE = os.path.join(this_script_path, RESULT_FILE)
#use nice latex fonts if latex is installed
#figure_utils.configure_latex_fonts_latex()
data_vns_sop = orienteering_utils.parse_op_log(RESULT_FILE)
print("using the last results")
record = data_vns_sop[-1]
print("record", record)
problem_type = ProblemType.UNKNOWN
PROBLEM_FILE = record['PROBLEM_FILE']
PROBLEM_FILE = os.path.join(this_script_path, PROBLEM_FILE)
if "datasets/sop/" in PROBLEM_FILE: |
elif "datasets/dop_sop_dataset/" in PROBLEM_FILE:
print("showing DOP")
problem_type = ProblemType.DOP
SAVE_TO_FIGURE = "solution_dop.png"
elif "datasets/opn_sop_dataset/" in PROBLEM_FILE:
print("showing OPN")
problem_type = ProblemType.OPN
SAVE_TO_FIGURE = "solution_opn.png"
else:
error("can not decide problem type based on problem file location")
problem_type = ProblemType.UNKNOWN
op = orienteering_utils.SetOrienteeringProblemDefinition()
op.load_problem_file(PROBLEM_FILE)
nodes = op.nodes
sets_prices = op.get_sets_prices()
sets = op.get_sets()
original_nodes = op.get_set_centers()
result_target_ids = record['RESULT_TARGET_IDS']
result_cluster_ids = record['RESULT_CLUSTER_IDS']
result_rewards = record['REWARDS']
print("problem loaded")
print("result_target_ids:", result_target_ids)
print("result_cluster_ids:", result_cluster_ids)
print("result_rewards", result_rewards)
print("sets_prices", sets_prices)
print("sets", sets)
print("nodes", nodes)
# for the DOP only
result_head_angs = []
sampling_heading = len(sets[0])
calc_reward = 0
for clust_idx in range(len(result_cluster_ids)):
clust = result_cluster_ids[clust_idx]
node = result_target_ids[clust_idx]
if problem_type == ProblemType.DOP:
node_inside_cluster = node - sets[clust][0]
# result_node_inside_cluster.append(node_inside_cluster)
head_ang = math.pi + (2 * math.pi * node_inside_cluster) / sampling_heading
result_head_angs.append(head_ang)
calc_reward += sets_prices[clust]
if node not in sets[clust]:
print("what the hell, it is not good")
print("calc_reward", calc_reward)
mycmap = plt.cm.get_cmap('RdYlBu_r')
maxx, maxy = -sys.float_info.max,-sys.float_info.max
minx, miny = sys.float_info.max,sys.float_info.max
circle_radiuses = np.ones([len(nodes), 1])
circle_radiuses1 = np.multiply(2.0, circle_radiuses)
nodes_w_rewards = np.zeros((len(nodes), 3))
if problem_type == ProblemType.DOP:
xses = [i[0] for i in original_nodes]
yses = [i[1] for i in original_nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(original_nodes), 3))
for nidx in range(len(original_nodes)):
nodes_w_rewards[nidx, 0] = original_nodes[nidx][0]
nodes_w_rewards[nidx, 1] = original_nodes[nidx][1]
nodes_w_rewards[nidx, 2] = sets_prices[nidx]
elif problem_type == ProblemType.OPN :
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
else:
xses = [nodes[i][0] for i in nodes]
yses = [nodes[i][1] for i in nodes]
maxx = max(xses)
minx = min(xses)
maxy = max(yses)
miny = min(yses)
nodes_w_rewards = np.zeros((len(nodes), 3))
for nidx in nodes:
nodes_w_rewards[nidx, 0] = nodes[nidx][0]
nodes_w_rewards[nidx, 1] = nodes[nidx][1]
for set_idx in sets:
if nidx in sets[set_idx]:
nodes_w_rewards[nidx, 2] = sets_prices[set_idx]
break
minrew = min(nodes_w_rewards[:, 2])
maxrew = max(nodes_w_rewards[:, 2])
cNorm = mpl.colors.Normalize(vmin=minrew, vmax=maxrew + 0.1 * (maxrew - minrew))
mycmapScalarMap = mpl.cm.ScalarMappable(norm=cNorm, cmap=mycmap)
fig_width = FIG_HEIGHT*(maxx-minx)/(maxy-miny)
figsize = (fig_width*0.9,FIG_HEIGHT)
print(figsize)
fig = plt.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
circles = figure_utils.circles(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], circle_radiuses1, c=nodes_w_rewards[:, 2] , alpha=0.05, edgecolor='black', linewidth=0.9, linestyle=':')
sc = plt.scatter(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], c=nodes_w_rewards[:, 2], cmap=mycmap , alpha=1.0, s=1, facecolor='black', lw=0.5)
plt.plot(nodes_w_rewards[:, 0], nodes_w_rewards[:, 1], 'ok', ms=4.0)
# print(nodes_w_rewards[:, 2])
if problem_type == ProblemType.DOP:
for nidx1 in range(len(nodes_w_rewards)):
points = []
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for hind in range(sampling_heading):
head_ang = math.pi + (2 * math.pi * hind) / sampling_heading
arrow_len = 30
arrow(node1[0], node1[1], arrow_len * math.cos(head_ang), arrow_len * math.sin(head_ang))
set_rew = nodes_w_rewards[nidx1, 2]
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(40), fc=color)
elif problem_type == ProblemType.OPN:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
else:
for set_idx in reversed(sorted(sets.keys())):
points = []
set_rew = sets_prices[set_idx]
for nidx1 in sets[set_idx]:
node1 = nodes_w_rewards[nidx1, :]
points.append([node1[0], node1[1]])
for nidx2 in sets[set_idx]:
if(nidx1 != nidx2):
node2 = nodes_w_rewards[nidx2, :]
# plt.plot([node1[0], node2[0] ], [node1[1], node2[1] ], '-k', lw=0.2)
alpha = 0.0
concave_hull = figure_utils.alpha_shape(points, alpha=alpha)
color = mycmapScalarMap.to_rgba(set_rew)
figure_utils.plot_polygon(concave_hull.buffer(25), fc=color)
for node_idx in range(1, len(result_target_ids)):
if problem_type == ProblemType.DOP:
step_size = 20
turning_radius = op.dubins_radius
node = result_cluster_ids[node_idx]
node_prew = result_cluster_ids[node_idx - 1]
q_start = [nodes_w_rewards[node, 0], nodes_w_rewards[node, 1], result_head_angs[node_idx]]
q_end = [nodes_w_rewards[node_prew][0], nodes_w_rewards[node_prew][1], result_head_angs[node_idx - 1]]
path = dubins.shortest_path(q_start, q_end, turning_radius)
qs, _ = path.sample_many(step_size)
# length_dub += math.ceil(path.path_length())
xses = [item[0] for item in qs]
yses = [item[1] for item in qs]
print(node_prew, '->', node, ",", q_start, '->', q_end)
plt.plot(xses, yses, '-g', lw=1.6)
elif problem_type == ProblemType.OPN:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ",", node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)
else:
node = result_target_ids[node_idx]
node_prew = result_target_ids[node_idx - 1]
node_pos = [nodes[node][0], nodes[node][1]]
node_pos_prew = [nodes[node_prew][0], nodes[node_prew][1]]
print(node_prew, '->', node, ",", node_pos_prew, '->', node_pos)
plt.plot([node_pos_prew[0], node_pos[0] ], [node_pos_prew[1], node_pos[1] ], '-g', lw=1.6)
ax = plt.gca()
ax.axis('equal')
figure_utils.no_axis(ax)
cbar_position = [0.20, 0.05, 0.6, 0.03]
cbar_ax = fig.add_axes(cbar_position)
cb = plt.colorbar(sc, cax=cbar_ax, orientation='horizontal')
cb.ax.tick_params(labelsize=tick_font_size)
cb.set_label('profit', labelpad=-65.0, y=0.8, fontsize=legend_font_size)
# offset = 0.08
fig.subplots_adjust(left=-0.035, right=1.035 , top=1.07 , bottom=0.0)
plt.savefig(SAVE_TO_FIGURE, dpi=300)
if SHOW_FIGURE:
plt.show() | print("showing SOP")
problem_type = ProblemType.SOP
SAVE_TO_FIGURE = "solution_sop.png" | random_line_split |
pager.py | #!/usr/bin/python2.7
# Copyright 2012 Room77, Inc.
# Author: Uygar Oztekin
import os
import sys
import re
import argparse
import datetime
import json
import string
class Pager:
"""
Cronable pager script for alerts. Monitors emails send to specified address.
For each email with a subject that matches the criteria, monitors the thread.
* For initial email, sends a message to this week's primary contact.
* If anyone replies to the original thread (e.g. the primary contact), alert
associated with that thread is muted.
* If the script is run a second time and there is still no reply, backup
contact is alerted and alert is muted.
Temporary status files between runs are stored under the status_dir. Hence
user that runs the pager must be able to create files under that directory.
"""
config_file = os.path.dirname(os.path.realpath(__file__))+"/pager_config.txt"
status_dir = "/tmp/pager/"
status_file = status_dir + "pager_status.json"
monitor_email = ""
monitor_pass = ""
monitor_phone = ""
alert_match_pattern = ".*ALARM.*"
reply_match_pattern = "^R[eE]:.*ALARM.*"
dry_run = 0
offset_days = 0
_status = dict()
_active_list = []
_sent_one = 0
class Status:
NEW = "new"
OLD = "old"
REPLIED = "replied"
def Init(self):
data = open(self.config_file)
self._active_list = []
for line in data.readlines() :
if re.match("^#", line) : continue
line = line.strip()
m = re.match(r"(?P<phone>[\d]{3}-[\d]{3}-[\d]{4})[\t](?P<email>\w+[@][\w\.]+)", line)
self._active_list += [(m.group("phone").translate(None, "-+"), m.group("email"))]
def _InfoString(self):
out = ""
list = self._active_list
out += "Pager rotation contains " + str(len(list)) + " contacts:\n"
for p in list:
out += p[0] + "\t" + p[1] +"\n"
primary = self._GetPrimary();
backup = (primary + 1) % len(list)
out += "\nCurrent contacts:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
out += "\n\nPlease turn your phones to MAX volume to ensure you receive pages."
if self.offset_days != 0:
primary = self._GetPrimary(self.offset_days);
backup = (primary + 1) % len(list)
out += "\n\nContacts " + str(self.offset_days) + " days from now:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
return out
def Info(self):
print(self._InfoString())
def MailInfo(self, sender, receiver):
import smtplib
# sending html email described here:
# http://stackoverflow.com/questions/882712/sending-html-email-in-python
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart('alternative')
msg['Subject'] = "Pager rotation notification"
msg['From'] = sender
msg['To'] = receiver
html = """\
<html>
<head></head>
<body>
<p>""" + re.sub("\n", "<br>", self._InfoString()) + """</p>
<p>Pager rotation happens every monday at 00:00.</p>
<p>If the primary / backup contacts are unavailable, please
<ol>
<li>modify pager_config.txt</li>
<li>test your changes via ./pager.py --info</li>
<li>check them in and git push changes to be reflected</li>
<li>run ./pager.py --mail_info to broadcast the new info</li>
</ol>
</p>
</body>
</html>"""
# Record the MIME types of both parts - text/plain and text/html.
#part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
#msg.attach(part1)
msg.attach(part2)
smtp = smtplib.SMTP('localhost')
smtp.sendmail(sender, receiver, msg.as_string());
def SendAlert(self, msg, index = 0):
list = self._active_list
n = (self._GetPrimary() + index) % len(list)
print("Paging " + list[n][1] + " with message: " + msg)
if not self.dry_run: self._SendAlertToPhone(list[n][0], msg)
def _PrintStatus(self):
if len(self._status) > 0: print("Status : Message ID")
for key, value in list(self._status.items()):
print(value["status"] + " : " + key)
def _ProcessStatus(self):
more = "\nSee https://docs.google.com/a/room77.com/document/d/1YMrE5nM4aTG65ah6B3G_TxOG3vEKs4E84yKc4iAxH38/edit"
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 0)
if value["status"] == self.Status.OLD:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 1)
# print value["status"] + " : " + key
remove_list = [k for (k,v) in list(self._status.items()) if v["status"] == self.Status.OLD or v["status"] == self.Status.REPLIED ]
for key in remove_list: del(self._status[key])
def _ReadStatusFile(self):
if not os.path.exists(self.status_file): return
data = open(self.status_file).read()
if (data == None or data == ""): return
self._status = json.loads(data)
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW: self._status[key]["status"] = self.Status.OLD
def _WriteStatusFile(self):
if self.dry_run == 1: self._status.clear()
data = open(self.status_file, "w")
json.dump(self._status, data)
data.close()
def _ProcessMail(self, mid, rid, date, sender, subject, body):
if rid in self._status:
# Existing alert.
self._status[rid]["status"] = self.Status.REPLIED
elif mid not in self._status:
# New alert.
self._status[mid] = dict()
self._status[mid]["status"] = self.Status.NEW
self._status[mid]["date"] = date
self._status[mid]["subject"] = subject
self._status[mid]["body"] = body
if re.match(self.reply_match_pattern, subject):
print("At least one reply found muting current alarms")
self.dry_run = 1
def _FetchMails(self):
import poplib
mail = poplib.POP3_SSL("pop.gmail.com")
mail.user(self.monitor_email)
mail.pass_(self.monitor_pass)
n = len(mail.list()[1])
print("Found " + str(n) + " new emails.")
for i in range(n):
mid = rid = date = sender = subject = body = ""
body_started = 0
for line in mail.retr(i+1)[1]:
if body_started :
body = body + line + '\n'
else:
if re.match("^Message-ID: ", line, re.IGNORECASE) : mid = line[line.find(" ")+1:]
if re.match("^In-Reply-To: ", line, re.IGNORECASE) : rid = line[line.find(" ")+1:]
if re.match("^Subject: ", line, re.IGNORECASE) : subject = line[line.find(" ")+1:]
if re.match("^Date:", line, re.IGNORECASE) : date = line[line.find(" ")+1:]
if re.match("^From: ", line, re.IGNORECASE) : sender = line[line.find(" ")+1:]
if not body_started and re.match("^$", line) : body_started = 1
if re.match(self.alert_match_pattern, subject) : self._ProcessMail(mid, rid, date, sender, subject, body)
mail.dele(i+1)
mail.quit()
# self._PrintStatus()
self._ProcessStatus()
self._WriteStatusFile()
self._PrintStatus()
def _GetPrimary(self, offset_days = 0) :
# If offset_days is 0, number of days from some monday. mod 7 of this number
# is 0 if it is a monday. Rotation occurs at 00:00, every monday.
days = (datetime.datetime.today() - datetime.datetime.utcfromtimestamp(0)).days + 3 + offset_days
return days // 7 % len(self._active_list)
def _SendAlertToPhone(self, phone, msg):
if not self._sent_one:
import code
from googlevoice import Voice
voice = Voice()
voice.login(self.monitor_email, self.monitor_pass)
voice.send_sms(phone, msg) # Send an SMS.
voice.call(phone, self.monitor_phone, 3) # Call the person as well.
self._sent_one = 1
def Run(self):
if not os.path.exists(self.status_dir):
os.makedirs(self.status_dir)
self._ReadStatusFile()
return self._FetchMails()
def main():
# Fill in the default values relevant for you to avoid adding the flags for each run.
parser = argparse.ArgumentParser(description="Handle pager rotations and alerts.");
parser.add_argument("--info", action = "store_true", help="Outputs active pager duty list and current primary and backup.");
parser.add_argument("--mail_info", action = "store_true", help="Mails active pager duty list and current primary and backup to the specified email address.");
parser.add_argument("--dry_run", action = "store_true", help = "Do not wake people up. Output information to console instead.");
parser.add_argument("--call", type = int, help = "Offset of person on pager duty to call. 0 means primary, 1 means backup, 2 means secondary backup etc.");
parser.add_argument("--sender", type = str, default = "", help="In mail_info mode, send the email from this address.");
parser.add_argument("--receiver", type = str, default = "", help="In mail_info mode, send the email to this address.");
parser.add_argument("--offset_days", type = int, default = 0, help = "Offset to add to current time. This can be used to compute primary / backup at a future / past time.");
parser.add_argument("--msg", type = str, default = "An alert has been issued. Please check your email.", help = "Message to send (for SMS portion)");
parser.add_argument("--monitor_email", type = str, default = "", help = "Email address to monitor for the alarm pattern. Needs to be a google account with gvoice.");
parser.add_argument("--monitor_pass", type = str, default = "", help = "Password for the monitor email address.");
parser.add_argument("--monitor_phone", type = str, default = "", help = "Google voice phone number associated with the account. Example: 15551231234");
args = parser.parse_args()
pager = Pager() | pager.monitor_pass = args.monitor_pass
pager.monitor_phone = args.monitor_phone
if args.dry_run: pager.dry_run = 1
if args.info: return pager.Info()
if args.mail_info: return pager.MailInfo(args.sender, args.receiver)
if args.call != None: return pager.SendAlert(args.msg, args.call)
return pager.Run()
if __name__ == '__main__':
sys.exit(main()) | pager.Init()
pager.offset_days = args.offset_days
pager.monitor_email = args.monitor_email | random_line_split |
pager.py | #!/usr/bin/python2.7
# Copyright 2012 Room77, Inc.
# Author: Uygar Oztekin
import os
import sys
import re
import argparse
import datetime
import json
import string
class Pager:
"""
Cronable pager script for alerts. Monitors emails send to specified address.
For each email with a subject that matches the criteria, monitors the thread.
* For initial email, sends a message to this week's primary contact.
* If anyone replies to the original thread (e.g. the primary contact), alert
associated with that thread is muted.
* If the script is run a second time and there is still no reply, backup
contact is alerted and alert is muted.
Temporary status files between runs are stored under the status_dir. Hence
user that runs the pager must be able to create files under that directory.
"""
config_file = os.path.dirname(os.path.realpath(__file__))+"/pager_config.txt"
status_dir = "/tmp/pager/"
status_file = status_dir + "pager_status.json"
monitor_email = ""
monitor_pass = ""
monitor_phone = ""
alert_match_pattern = ".*ALARM.*"
reply_match_pattern = "^R[eE]:.*ALARM.*"
dry_run = 0
offset_days = 0
_status = dict()
_active_list = []
_sent_one = 0
class Status:
NEW = "new"
OLD = "old"
REPLIED = "replied"
def Init(self):
data = open(self.config_file)
self._active_list = []
for line in data.readlines() :
if re.match("^#", line) : continue
line = line.strip()
m = re.match(r"(?P<phone>[\d]{3}-[\d]{3}-[\d]{4})[\t](?P<email>\w+[@][\w\.]+)", line)
self._active_list += [(m.group("phone").translate(None, "-+"), m.group("email"))]
def _InfoString(self):
out = ""
list = self._active_list
out += "Pager rotation contains " + str(len(list)) + " contacts:\n"
for p in list:
out += p[0] + "\t" + p[1] +"\n"
primary = self._GetPrimary();
backup = (primary + 1) % len(list)
out += "\nCurrent contacts:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
out += "\n\nPlease turn your phones to MAX volume to ensure you receive pages."
if self.offset_days != 0:
primary = self._GetPrimary(self.offset_days);
backup = (primary + 1) % len(list)
out += "\n\nContacts " + str(self.offset_days) + " days from now:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
return out
def Info(self):
print(self._InfoString())
def MailInfo(self, sender, receiver):
import smtplib
# sending html email described here:
# http://stackoverflow.com/questions/882712/sending-html-email-in-python
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart('alternative')
msg['Subject'] = "Pager rotation notification"
msg['From'] = sender
msg['To'] = receiver
html = """\
<html>
<head></head>
<body>
<p>""" + re.sub("\n", "<br>", self._InfoString()) + """</p>
<p>Pager rotation happens every monday at 00:00.</p>
<p>If the primary / backup contacts are unavailable, please
<ol>
<li>modify pager_config.txt</li>
<li>test your changes via ./pager.py --info</li>
<li>check them in and git push changes to be reflected</li>
<li>run ./pager.py --mail_info to broadcast the new info</li>
</ol>
</p>
</body>
</html>"""
# Record the MIME types of both parts - text/plain and text/html.
#part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
#msg.attach(part1)
msg.attach(part2)
smtp = smtplib.SMTP('localhost')
smtp.sendmail(sender, receiver, msg.as_string());
def SendAlert(self, msg, index = 0):
list = self._active_list
n = (self._GetPrimary() + index) % len(list)
print("Paging " + list[n][1] + " with message: " + msg)
if not self.dry_run: self._SendAlertToPhone(list[n][0], msg)
def _PrintStatus(self):
if len(self._status) > 0: print("Status : Message ID")
for key, value in list(self._status.items()):
print(value["status"] + " : " + key)
def _ProcessStatus(self):
more = "\nSee https://docs.google.com/a/room77.com/document/d/1YMrE5nM4aTG65ah6B3G_TxOG3vEKs4E84yKc4iAxH38/edit"
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 0)
if value["status"] == self.Status.OLD:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 1)
# print value["status"] + " : " + key
remove_list = [k for (k,v) in list(self._status.items()) if v["status"] == self.Status.OLD or v["status"] == self.Status.REPLIED ]
for key in remove_list: del(self._status[key])
def _ReadStatusFile(self):
if not os.path.exists(self.status_file): return
data = open(self.status_file).read()
if (data == None or data == ""): return
self._status = json.loads(data)
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW: self._status[key]["status"] = self.Status.OLD
def _WriteStatusFile(self):
if self.dry_run == 1: self._status.clear()
data = open(self.status_file, "w")
json.dump(self._status, data)
data.close()
def _ProcessMail(self, mid, rid, date, sender, subject, body):
if rid in self._status:
# Existing alert.
self._status[rid]["status"] = self.Status.REPLIED
elif mid not in self._status:
# New alert.
self._status[mid] = dict()
self._status[mid]["status"] = self.Status.NEW
self._status[mid]["date"] = date
self._status[mid]["subject"] = subject
self._status[mid]["body"] = body
if re.match(self.reply_match_pattern, subject):
print("At least one reply found muting current alarms")
self.dry_run = 1
def _FetchMails(self):
import poplib
mail = poplib.POP3_SSL("pop.gmail.com")
mail.user(self.monitor_email)
mail.pass_(self.monitor_pass)
n = len(mail.list()[1])
print("Found " + str(n) + " new emails.")
for i in range(n):
mid = rid = date = sender = subject = body = ""
body_started = 0
for line in mail.retr(i+1)[1]:
if body_started :
body = body + line + '\n'
else:
if re.match("^Message-ID: ", line, re.IGNORECASE) : mid = line[line.find(" ")+1:]
if re.match("^In-Reply-To: ", line, re.IGNORECASE) : rid = line[line.find(" ")+1:]
if re.match("^Subject: ", line, re.IGNORECASE) : subject = line[line.find(" ")+1:]
if re.match("^Date:", line, re.IGNORECASE) : date = line[line.find(" ")+1:]
if re.match("^From: ", line, re.IGNORECASE) : sender = line[line.find(" ")+1:]
if not body_started and re.match("^$", line) : body_started = 1
if re.match(self.alert_match_pattern, subject) : self._ProcessMail(mid, rid, date, sender, subject, body)
mail.dele(i+1)
mail.quit()
# self._PrintStatus()
self._ProcessStatus()
self._WriteStatusFile()
self._PrintStatus()
def _GetPrimary(self, offset_days = 0) :
# If offset_days is 0, number of days from some monday. mod 7 of this number
# is 0 if it is a monday. Rotation occurs at 00:00, every monday.
days = (datetime.datetime.today() - datetime.datetime.utcfromtimestamp(0)).days + 3 + offset_days
return days // 7 % len(self._active_list)
def _SendAlertToPhone(self, phone, msg):
if not self._sent_one:
|
def Run(self):
if not os.path.exists(self.status_dir):
os.makedirs(self.status_dir)
self._ReadStatusFile()
return self._FetchMails()
def main():
# Fill in the default values relevant for you to avoid adding the flags for each run.
parser = argparse.ArgumentParser(description="Handle pager rotations and alerts.");
parser.add_argument("--info", action = "store_true", help="Outputs active pager duty list and current primary and backup.");
parser.add_argument("--mail_info", action = "store_true", help="Mails active pager duty list and current primary and backup to the specified email address.");
parser.add_argument("--dry_run", action = "store_true", help = "Do not wake people up. Output information to console instead.");
parser.add_argument("--call", type = int, help = "Offset of person on pager duty to call. 0 means primary, 1 means backup, 2 means secondary backup etc.");
parser.add_argument("--sender", type = str, default = "", help="In mail_info mode, send the email from this address.");
parser.add_argument("--receiver", type = str, default = "", help="In mail_info mode, send the email to this address.");
parser.add_argument("--offset_days", type = int, default = 0, help = "Offset to add to current time. This can be used to compute primary / backup at a future / past time.");
parser.add_argument("--msg", type = str, default = "An alert has been issued. Please check your email.", help = "Message to send (for SMS portion)");
parser.add_argument("--monitor_email", type = str, default = "", help = "Email address to monitor for the alarm pattern. Needs to be a google account with gvoice.");
parser.add_argument("--monitor_pass", type = str, default = "", help = "Password for the monitor email address.");
parser.add_argument("--monitor_phone", type = str, default = "", help = "Google voice phone number associated with the account. Example: 15551231234");
args = parser.parse_args()
pager = Pager()
pager.Init()
pager.offset_days = args.offset_days
pager.monitor_email = args.monitor_email
pager.monitor_pass = args.monitor_pass
pager.monitor_phone = args.monitor_phone
if args.dry_run: pager.dry_run = 1
if args.info: return pager.Info()
if args.mail_info: return pager.MailInfo(args.sender, args.receiver)
if args.call != None: return pager.SendAlert(args.msg, args.call)
return pager.Run()
if __name__ == '__main__':
sys.exit(main())
| import code
from googlevoice import Voice
voice = Voice()
voice.login(self.monitor_email, self.monitor_pass)
voice.send_sms(phone, msg) # Send an SMS.
voice.call(phone, self.monitor_phone, 3) # Call the person as well.
self._sent_one = 1 | conditional_block |
pager.py | #!/usr/bin/python2.7
# Copyright 2012 Room77, Inc.
# Author: Uygar Oztekin
import os
import sys
import re
import argparse
import datetime
import json
import string
class Pager:
"""
Cronable pager script for alerts. Monitors emails send to specified address.
For each email with a subject that matches the criteria, monitors the thread.
* For initial email, sends a message to this week's primary contact.
* If anyone replies to the original thread (e.g. the primary contact), alert
associated with that thread is muted.
* If the script is run a second time and there is still no reply, backup
contact is alerted and alert is muted.
Temporary status files between runs are stored under the status_dir. Hence
user that runs the pager must be able to create files under that directory.
"""
config_file = os.path.dirname(os.path.realpath(__file__))+"/pager_config.txt"
status_dir = "/tmp/pager/"
status_file = status_dir + "pager_status.json"
monitor_email = ""
monitor_pass = ""
monitor_phone = ""
alert_match_pattern = ".*ALARM.*"
reply_match_pattern = "^R[eE]:.*ALARM.*"
dry_run = 0
offset_days = 0
_status = dict()
_active_list = []
_sent_one = 0
class Status:
NEW = "new"
OLD = "old"
REPLIED = "replied"
def Init(self):
data = open(self.config_file)
self._active_list = []
for line in data.readlines() :
if re.match("^#", line) : continue
line = line.strip()
m = re.match(r"(?P<phone>[\d]{3}-[\d]{3}-[\d]{4})[\t](?P<email>\w+[@][\w\.]+)", line)
self._active_list += [(m.group("phone").translate(None, "-+"), m.group("email"))]
def _InfoString(self):
out = ""
list = self._active_list
out += "Pager rotation contains " + str(len(list)) + " contacts:\n"
for p in list:
out += p[0] + "\t" + p[1] +"\n"
primary = self._GetPrimary();
backup = (primary + 1) % len(list)
out += "\nCurrent contacts:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
out += "\n\nPlease turn your phones to MAX volume to ensure you receive pages."
if self.offset_days != 0:
primary = self._GetPrimary(self.offset_days);
backup = (primary + 1) % len(list)
out += "\n\nContacts " + str(self.offset_days) + " days from now:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
return out
def Info(self):
print(self._InfoString())
def MailInfo(self, sender, receiver):
import smtplib
# sending html email described here:
# http://stackoverflow.com/questions/882712/sending-html-email-in-python
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart('alternative')
msg['Subject'] = "Pager rotation notification"
msg['From'] = sender
msg['To'] = receiver
html = """\
<html>
<head></head>
<body>
<p>""" + re.sub("\n", "<br>", self._InfoString()) + """</p>
<p>Pager rotation happens every monday at 00:00.</p>
<p>If the primary / backup contacts are unavailable, please
<ol>
<li>modify pager_config.txt</li>
<li>test your changes via ./pager.py --info</li>
<li>check them in and git push changes to be reflected</li>
<li>run ./pager.py --mail_info to broadcast the new info</li>
</ol>
</p>
</body>
</html>"""
# Record the MIME types of both parts - text/plain and text/html.
#part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
#msg.attach(part1)
msg.attach(part2)
smtp = smtplib.SMTP('localhost')
smtp.sendmail(sender, receiver, msg.as_string());
def SendAlert(self, msg, index = 0):
list = self._active_list
n = (self._GetPrimary() + index) % len(list)
print("Paging " + list[n][1] + " with message: " + msg)
if not self.dry_run: self._SendAlertToPhone(list[n][0], msg)
def _PrintStatus(self):
if len(self._status) > 0: print("Status : Message ID")
for key, value in list(self._status.items()):
print(value["status"] + " : " + key)
def _ProcessStatus(self):
more = "\nSee https://docs.google.com/a/room77.com/document/d/1YMrE5nM4aTG65ah6B3G_TxOG3vEKs4E84yKc4iAxH38/edit"
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 0)
if value["status"] == self.Status.OLD:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 1)
# print value["status"] + " : " + key
remove_list = [k for (k,v) in list(self._status.items()) if v["status"] == self.Status.OLD or v["status"] == self.Status.REPLIED ]
for key in remove_list: del(self._status[key])
def _ReadStatusFile(self):
if not os.path.exists(self.status_file): return
data = open(self.status_file).read()
if (data == None or data == ""): return
self._status = json.loads(data)
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW: self._status[key]["status"] = self.Status.OLD
def _WriteStatusFile(self):
if self.dry_run == 1: self._status.clear()
data = open(self.status_file, "w")
json.dump(self._status, data)
data.close()
def _ProcessMail(self, mid, rid, date, sender, subject, body):
if rid in self._status:
# Existing alert.
self._status[rid]["status"] = self.Status.REPLIED
elif mid not in self._status:
# New alert.
self._status[mid] = dict()
self._status[mid]["status"] = self.Status.NEW
self._status[mid]["date"] = date
self._status[mid]["subject"] = subject
self._status[mid]["body"] = body
if re.match(self.reply_match_pattern, subject):
print("At least one reply found muting current alarms")
self.dry_run = 1
def _FetchMails(self):
import poplib
mail = poplib.POP3_SSL("pop.gmail.com")
mail.user(self.monitor_email)
mail.pass_(self.monitor_pass)
n = len(mail.list()[1])
print("Found " + str(n) + " new emails.")
for i in range(n):
mid = rid = date = sender = subject = body = ""
body_started = 0
for line in mail.retr(i+1)[1]:
if body_started :
body = body + line + '\n'
else:
if re.match("^Message-ID: ", line, re.IGNORECASE) : mid = line[line.find(" ")+1:]
if re.match("^In-Reply-To: ", line, re.IGNORECASE) : rid = line[line.find(" ")+1:]
if re.match("^Subject: ", line, re.IGNORECASE) : subject = line[line.find(" ")+1:]
if re.match("^Date:", line, re.IGNORECASE) : date = line[line.find(" ")+1:]
if re.match("^From: ", line, re.IGNORECASE) : sender = line[line.find(" ")+1:]
if not body_started and re.match("^$", line) : body_started = 1
if re.match(self.alert_match_pattern, subject) : self._ProcessMail(mid, rid, date, sender, subject, body)
mail.dele(i+1)
mail.quit()
# self._PrintStatus()
self._ProcessStatus()
self._WriteStatusFile()
self._PrintStatus()
def _GetPrimary(self, offset_days = 0) :
# If offset_days is 0, number of days from some monday. mod 7 of this number
# is 0 if it is a monday. Rotation occurs at 00:00, every monday.
|
def _SendAlertToPhone(self, phone, msg):
if not self._sent_one:
import code
from googlevoice import Voice
voice = Voice()
voice.login(self.monitor_email, self.monitor_pass)
voice.send_sms(phone, msg) # Send an SMS.
voice.call(phone, self.monitor_phone, 3) # Call the person as well.
self._sent_one = 1
def Run(self):
if not os.path.exists(self.status_dir):
os.makedirs(self.status_dir)
self._ReadStatusFile()
return self._FetchMails()
def main():
# Fill in the default values relevant for you to avoid adding the flags for each run.
parser = argparse.ArgumentParser(description="Handle pager rotations and alerts.");
parser.add_argument("--info", action = "store_true", help="Outputs active pager duty list and current primary and backup.");
parser.add_argument("--mail_info", action = "store_true", help="Mails active pager duty list and current primary and backup to the specified email address.");
parser.add_argument("--dry_run", action = "store_true", help = "Do not wake people up. Output information to console instead.");
parser.add_argument("--call", type = int, help = "Offset of person on pager duty to call. 0 means primary, 1 means backup, 2 means secondary backup etc.");
parser.add_argument("--sender", type = str, default = "", help="In mail_info mode, send the email from this address.");
parser.add_argument("--receiver", type = str, default = "", help="In mail_info mode, send the email to this address.");
parser.add_argument("--offset_days", type = int, default = 0, help = "Offset to add to current time. This can be used to compute primary / backup at a future / past time.");
parser.add_argument("--msg", type = str, default = "An alert has been issued. Please check your email.", help = "Message to send (for SMS portion)");
parser.add_argument("--monitor_email", type = str, default = "", help = "Email address to monitor for the alarm pattern. Needs to be a google account with gvoice.");
parser.add_argument("--monitor_pass", type = str, default = "", help = "Password for the monitor email address.");
parser.add_argument("--monitor_phone", type = str, default = "", help = "Google voice phone number associated with the account. Example: 15551231234");
args = parser.parse_args()
pager = Pager()
pager.Init()
pager.offset_days = args.offset_days
pager.monitor_email = args.monitor_email
pager.monitor_pass = args.monitor_pass
pager.monitor_phone = args.monitor_phone
if args.dry_run: pager.dry_run = 1
if args.info: return pager.Info()
if args.mail_info: return pager.MailInfo(args.sender, args.receiver)
if args.call != None: return pager.SendAlert(args.msg, args.call)
return pager.Run()
if __name__ == '__main__':
sys.exit(main())
| days = (datetime.datetime.today() - datetime.datetime.utcfromtimestamp(0)).days + 3 + offset_days
return days // 7 % len(self._active_list) | identifier_body |
pager.py | #!/usr/bin/python2.7
# Copyright 2012 Room77, Inc.
# Author: Uygar Oztekin
import os
import sys
import re
import argparse
import datetime
import json
import string
class Pager:
"""
Cronable pager script for alerts. Monitors emails send to specified address.
For each email with a subject that matches the criteria, monitors the thread.
* For initial email, sends a message to this week's primary contact.
* If anyone replies to the original thread (e.g. the primary contact), alert
associated with that thread is muted.
* If the script is run a second time and there is still no reply, backup
contact is alerted and alert is muted.
Temporary status files between runs are stored under the status_dir. Hence
user that runs the pager must be able to create files under that directory.
"""
config_file = os.path.dirname(os.path.realpath(__file__))+"/pager_config.txt"
status_dir = "/tmp/pager/"
status_file = status_dir + "pager_status.json"
monitor_email = ""
monitor_pass = ""
monitor_phone = ""
alert_match_pattern = ".*ALARM.*"
reply_match_pattern = "^R[eE]:.*ALARM.*"
dry_run = 0
offset_days = 0
_status = dict()
_active_list = []
_sent_one = 0
class Status:
NEW = "new"
OLD = "old"
REPLIED = "replied"
def Init(self):
data = open(self.config_file)
self._active_list = []
for line in data.readlines() :
if re.match("^#", line) : continue
line = line.strip()
m = re.match(r"(?P<phone>[\d]{3}-[\d]{3}-[\d]{4})[\t](?P<email>\w+[@][\w\.]+)", line)
self._active_list += [(m.group("phone").translate(None, "-+"), m.group("email"))]
def _InfoString(self):
out = ""
list = self._active_list
out += "Pager rotation contains " + str(len(list)) + " contacts:\n"
for p in list:
out += p[0] + "\t" + p[1] +"\n"
primary = self._GetPrimary();
backup = (primary + 1) % len(list)
out += "\nCurrent contacts:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
out += "\n\nPlease turn your phones to MAX volume to ensure you receive pages."
if self.offset_days != 0:
primary = self._GetPrimary(self.offset_days);
backup = (primary + 1) % len(list)
out += "\n\nContacts " + str(self.offset_days) + " days from now:"
out += "\nPrimary: " + str(list[primary])
out += "\nBackup: " + str(list[backup])
return out
def Info(self):
print(self._InfoString())
def MailInfo(self, sender, receiver):
import smtplib
# sending html email described here:
# http://stackoverflow.com/questions/882712/sending-html-email-in-python
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart('alternative')
msg['Subject'] = "Pager rotation notification"
msg['From'] = sender
msg['To'] = receiver
html = """\
<html>
<head></head>
<body>
<p>""" + re.sub("\n", "<br>", self._InfoString()) + """</p>
<p>Pager rotation happens every monday at 00:00.</p>
<p>If the primary / backup contacts are unavailable, please
<ol>
<li>modify pager_config.txt</li>
<li>test your changes via ./pager.py --info</li>
<li>check them in and git push changes to be reflected</li>
<li>run ./pager.py --mail_info to broadcast the new info</li>
</ol>
</p>
</body>
</html>"""
# Record the MIME types of both parts - text/plain and text/html.
#part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
#msg.attach(part1)
msg.attach(part2)
smtp = smtplib.SMTP('localhost')
smtp.sendmail(sender, receiver, msg.as_string());
def SendAlert(self, msg, index = 0):
list = self._active_list
n = (self._GetPrimary() + index) % len(list)
print("Paging " + list[n][1] + " with message: " + msg)
if not self.dry_run: self._SendAlertToPhone(list[n][0], msg)
def _PrintStatus(self):
if len(self._status) > 0: print("Status : Message ID")
for key, value in list(self._status.items()):
print(value["status"] + " : " + key)
def _ProcessStatus(self):
more = "\nSee https://docs.google.com/a/room77.com/document/d/1YMrE5nM4aTG65ah6B3G_TxOG3vEKs4E84yKc4iAxH38/edit"
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 0)
if value["status"] == self.Status.OLD:
self.SendAlert("Alert. Please check your email. Subject: " + value["subject"] + more, 1)
# print value["status"] + " : " + key
remove_list = [k for (k,v) in list(self._status.items()) if v["status"] == self.Status.OLD or v["status"] == self.Status.REPLIED ]
for key in remove_list: del(self._status[key])
def _ReadStatusFile(self):
if not os.path.exists(self.status_file): return
data = open(self.status_file).read()
if (data == None or data == ""): return
self._status = json.loads(data)
for key, value in list(self._status.items()):
if value["status"] == self.Status.NEW: self._status[key]["status"] = self.Status.OLD
def _WriteStatusFile(self):
if self.dry_run == 1: self._status.clear()
data = open(self.status_file, "w")
json.dump(self._status, data)
data.close()
def _ProcessMail(self, mid, rid, date, sender, subject, body):
if rid in self._status:
# Existing alert.
self._status[rid]["status"] = self.Status.REPLIED
elif mid not in self._status:
# New alert.
self._status[mid] = dict()
self._status[mid]["status"] = self.Status.NEW
self._status[mid]["date"] = date
self._status[mid]["subject"] = subject
self._status[mid]["body"] = body
if re.match(self.reply_match_pattern, subject):
print("At least one reply found muting current alarms")
self.dry_run = 1
def _FetchMails(self):
import poplib
mail = poplib.POP3_SSL("pop.gmail.com")
mail.user(self.monitor_email)
mail.pass_(self.monitor_pass)
n = len(mail.list()[1])
print("Found " + str(n) + " new emails.")
for i in range(n):
mid = rid = date = sender = subject = body = ""
body_started = 0
for line in mail.retr(i+1)[1]:
if body_started :
body = body + line + '\n'
else:
if re.match("^Message-ID: ", line, re.IGNORECASE) : mid = line[line.find(" ")+1:]
if re.match("^In-Reply-To: ", line, re.IGNORECASE) : rid = line[line.find(" ")+1:]
if re.match("^Subject: ", line, re.IGNORECASE) : subject = line[line.find(" ")+1:]
if re.match("^Date:", line, re.IGNORECASE) : date = line[line.find(" ")+1:]
if re.match("^From: ", line, re.IGNORECASE) : sender = line[line.find(" ")+1:]
if not body_started and re.match("^$", line) : body_started = 1
if re.match(self.alert_match_pattern, subject) : self._ProcessMail(mid, rid, date, sender, subject, body)
mail.dele(i+1)
mail.quit()
# self._PrintStatus()
self._ProcessStatus()
self._WriteStatusFile()
self._PrintStatus()
def _GetPrimary(self, offset_days = 0) :
# If offset_days is 0, number of days from some monday. mod 7 of this number
# is 0 if it is a monday. Rotation occurs at 00:00, every monday.
days = (datetime.datetime.today() - datetime.datetime.utcfromtimestamp(0)).days + 3 + offset_days
return days // 7 % len(self._active_list)
def _SendAlertToPhone(self, phone, msg):
if not self._sent_one:
import code
from googlevoice import Voice
voice = Voice()
voice.login(self.monitor_email, self.monitor_pass)
voice.send_sms(phone, msg) # Send an SMS.
voice.call(phone, self.monitor_phone, 3) # Call the person as well.
self._sent_one = 1
def | (self):
if not os.path.exists(self.status_dir):
os.makedirs(self.status_dir)
self._ReadStatusFile()
return self._FetchMails()
def main():
# Fill in the default values relevant for you to avoid adding the flags for each run.
parser = argparse.ArgumentParser(description="Handle pager rotations and alerts.");
parser.add_argument("--info", action = "store_true", help="Outputs active pager duty list and current primary and backup.");
parser.add_argument("--mail_info", action = "store_true", help="Mails active pager duty list and current primary and backup to the specified email address.");
parser.add_argument("--dry_run", action = "store_true", help = "Do not wake people up. Output information to console instead.");
parser.add_argument("--call", type = int, help = "Offset of person on pager duty to call. 0 means primary, 1 means backup, 2 means secondary backup etc.");
parser.add_argument("--sender", type = str, default = "", help="In mail_info mode, send the email from this address.");
parser.add_argument("--receiver", type = str, default = "", help="In mail_info mode, send the email to this address.");
parser.add_argument("--offset_days", type = int, default = 0, help = "Offset to add to current time. This can be used to compute primary / backup at a future / past time.");
parser.add_argument("--msg", type = str, default = "An alert has been issued. Please check your email.", help = "Message to send (for SMS portion)");
parser.add_argument("--monitor_email", type = str, default = "", help = "Email address to monitor for the alarm pattern. Needs to be a google account with gvoice.");
parser.add_argument("--monitor_pass", type = str, default = "", help = "Password for the monitor email address.");
parser.add_argument("--monitor_phone", type = str, default = "", help = "Google voice phone number associated with the account. Example: 15551231234");
args = parser.parse_args()
pager = Pager()
pager.Init()
pager.offset_days = args.offset_days
pager.monitor_email = args.monitor_email
pager.monitor_pass = args.monitor_pass
pager.monitor_phone = args.monitor_phone
if args.dry_run: pager.dry_run = 1
if args.info: return pager.Info()
if args.mail_info: return pager.MailInfo(args.sender, args.receiver)
if args.call != None: return pager.SendAlert(args.msg, args.call)
return pager.Run()
if __name__ == '__main__':
sys.exit(main())
| Run | identifier_name |
main.go | // Copyright 2021 Optakt Labs OÜ
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
package main
import (
"context"
"os"
"os/signal"
"runtime"
"time"
"github.com/dgraph-io/badger/v2"
"github.com/prometheus/tsdb/wal"
"github.com/rs/zerolog"
"github.com/spf13/pflag"
"github.com/optakt/flow-dps/codec/zbor"
"github.com/optakt/flow-dps/metrics/output"
"github.com/optakt/flow-dps/metrics/rcrowley"
"github.com/optakt/flow-dps/models/dps"
"github.com/optakt/flow-dps/service/chain"
"github.com/optakt/flow-dps/service/feeder"
"github.com/optakt/flow-dps/service/forest"
"github.com/optakt/flow-dps/service/index"
"github.com/optakt/flow-dps/service/loader"
"github.com/optakt/flow-dps/service/mapper"
"github.com/optakt/flow-dps/service/metrics"
"github.com/optakt/flow-dps/service/storage"
)
const (
success = 0
failure = 1
)
func m | ) {
os.Exit(run())
}
func run() int {
// Signal catching for clean shutdown.
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
// Command line parameter initialization.
var (
flagCheckpoint string
flagData string
flagForce bool
flagIndex string
flagIndexAll bool
flagIndexCollections bool
flagIndexGuarantees bool
flagIndexCommit bool
flagIndexEvents bool
flagIndexHeader bool
flagIndexPayloads bool
flagIndexResults bool
flagIndexTransactions bool
flagIndexSeals bool
flagLevel string
flagMetrics bool
flagMetricsInterval time.Duration
flagSkipBootstrap bool
flagTrie string
)
pflag.StringVarP(&flagCheckpoint, "checkpoint", "c", "", "checkpoint file for state trie")
pflag.StringVarP(&flagData, "data", "d", "", "database directory for protocol data")
pflag.BoolVarP(&flagForce, "force", "f", false, "overwrite existing index database")
pflag.StringVarP(&flagIndex, "index", "i", "index", "database directory for state index")
pflag.BoolVarP(&flagIndexAll, "index-all", "a", false, "index everything")
pflag.BoolVar(&flagIndexCollections, "index-collections", false, "index collections")
pflag.BoolVar(&flagIndexGuarantees, "index-guarantees", false, "index collection guarantees")
pflag.BoolVar(&flagIndexCommit, "index-commits", false, "index commits")
pflag.BoolVar(&flagIndexEvents, "index-events", false, "index events")
pflag.BoolVar(&flagIndexHeader, "index-headers", false, "index headers")
pflag.BoolVar(&flagIndexPayloads, "index-payloads", false, "index payloads")
pflag.BoolVar(&flagIndexResults, "index-results", false, "index transaction results")
pflag.BoolVar(&flagIndexTransactions, "index-transactions", false, "index transactions")
pflag.BoolVar(&flagIndexSeals, "index-seals", false, "index seals")
pflag.StringVarP(&flagLevel, "level", "l", "info", "log output level")
pflag.BoolVarP(&flagMetrics, "metrics", "m", false, "enable metrics collection and output")
pflag.DurationVar(&flagMetricsInterval, "metrics-interval", 5*time.Minute, "defines the interval of metrics output to log")
pflag.BoolVar(&flagSkipBootstrap, "skip-bootstrap", false, "enable skipping checkpoint register payloads indexing")
pflag.StringVarP(&flagTrie, "trie", "t", "", "data directory for state ledger")
pflag.Parse()
// Increase the GOMAXPROCS value in order to use the full IOPS available, see:
// https://groups.google.com/g/golang-nuts/c/jPb_h3TvlKE
_ = runtime.GOMAXPROCS(128)
// Logger initialization.
zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() }
log := zerolog.New(os.Stderr).With().Timestamp().Logger().Level(zerolog.DebugLevel)
level, err := zerolog.ParseLevel(flagLevel)
if err != nil {
log.Error().Str("level", flagLevel).Err(err).Msg("could not parse log level")
return failure
}
log = log.Level(level)
// Ensure that at least one index is specified.
if !flagIndexAll && !flagIndexCommit && !flagIndexHeader && !flagIndexPayloads && !flagIndexCollections &&
!flagIndexGuarantees && !flagIndexTransactions && !flagIndexResults && !flagIndexEvents && !flagIndexSeals {
log.Error().Str("level", flagLevel).Msg("no indexing option specified, use -a/--all to build all indexes")
pflag.Usage()
return failure
}
// Fail if IndexAll is specified along with other index flags, as this would most likely mean that the user does
// not understand what they are doing.
if flagIndexAll && (flagIndexCommit || flagIndexHeader || flagIndexPayloads || flagIndexGuarantees ||
flagIndexCollections || flagIndexTransactions || flagIndexResults || flagIndexEvents || flagIndexSeals) {
log.Error().Str("level", flagLevel).Msg("-a/--all is mutually exclusive with specific indexing flags")
pflag.Usage()
return failure
}
// Open index database.
db, err := badger.Open(dps.DefaultOptions(flagIndex))
if err != nil {
log.Error().Str("index", flagIndex).Err(err).Msg("could not open index DB")
return failure
}
defer db.Close()
// Open protocol state database.
data, err := badger.Open(dps.DefaultOptions(flagData))
if err != nil {
log.Error().Err(err).Msg("could not open blockchain database")
return failure
}
defer data.Close()
// We initialize a metrics logger regardless of whether metrics are enabled;
// it will just do nothing if there are no registered metrics.
mout := output.New(log, flagMetricsInterval)
// The storage library is initialized with a codec and provides functions to
// interact with a Badger database while encoding and compressing
// transparently.
var codec dps.Codec
codec, err = zbor.NewCodec()
if err != nil {
log.Error().Err(err).Msg("could not initialize storage codec")
return failure
}
if flagMetrics {
size := rcrowley.NewSize("store")
mout.Register(size)
codec = metrics.NewCodec(codec, size)
}
storage := storage.New(codec)
// Check if index already exists.
_, err = index.NewReader(db, storage).First()
indexExists := err == nil
if indexExists && !flagForce {
log.Error().Err(err).Msg("index already exists, manually delete it or use (-f, --force) to overwrite it")
return failure
}
// The loader component is responsible for loading and decoding the checkpoint.
load := loader.New(
loader.WithCheckpointPath(flagCheckpoint),
)
// The chain is responsible for reading blockchain data from the protocol state.
var disk dps.Chain
disk = chain.FromDisk(data)
if flagMetrics {
time := rcrowley.NewTime("read")
mout.Register(time)
disk = metrics.NewChain(disk, time)
}
// Feeder is responsible for reading the write-ahead log of the execution state.
segments, err := wal.NewSegmentsReader(flagTrie)
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not open segments reader")
return failure
}
feed, err := feeder.FromDisk(wal.NewReader(segments))
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not initialize feeder")
return failure
}
// Writer is responsible for writing the index data to the index database.
index := index.NewWriter(db, storage)
defer func() {
err := index.Close()
if err != nil {
log.Error().Err(err).Msg("could not close index")
}
}()
write := dps.Writer(index)
if flagMetrics {
time := rcrowley.NewTime("write")
mout.Register(time)
write = metrics.NewWriter(write, time)
}
// Initialize the transitions with the dependencies and add them to the FSM.
transitions := mapper.NewTransitions(log, load, disk, feed, write,
mapper.WithIndexCommit(flagIndexAll || flagIndexCommit),
mapper.WithIndexHeader(flagIndexAll || flagIndexHeader),
mapper.WithIndexCollections(flagIndexAll || flagIndexCollections),
mapper.WithIndexGuarantees(flagIndexAll || flagIndexGuarantees),
mapper.WithIndexTransactions(flagIndexAll || flagIndexTransactions),
mapper.WithIndexResults(flagIndexAll || flagIndexResults),
mapper.WithIndexEvents(flagIndexAll || flagIndexEvents),
mapper.WithIndexPayloads(flagIndexAll || flagIndexPayloads),
mapper.WithIndexSeals(flagIndexAll || flagIndexSeals),
mapper.WithSkipBootstrap(flagSkipBootstrap),
)
forest := forest.New()
state := mapper.EmptyState(forest)
fsm := mapper.NewFSM(state,
mapper.WithTransition(mapper.StatusEmpty, transitions.BootstrapState),
mapper.WithTransition(mapper.StatusUpdating, transitions.UpdateTree),
mapper.WithTransition(mapper.StatusMatched, transitions.CollectRegisters),
mapper.WithTransition(mapper.StatusCollected, transitions.IndexRegisters),
mapper.WithTransition(mapper.StatusIndexed, transitions.ForwardHeight),
mapper.WithTransition(mapper.StatusForwarded, transitions.IndexChain),
)
// This section launches the main executing components in their own
// goroutine, so they can run concurrently. Afterwards, we wait for an
// interrupt signal in order to proceed with the next section.
done := make(chan struct{})
failed := make(chan struct{})
go func() {
start := time.Now()
log.Info().Time("start", start).Msg("Flow DPS Indexer starting")
err := fsm.Run()
if err != nil {
log.Warn().Err(err).Msg("Flow DPS Indexer failed")
close(failed)
} else {
close(done)
}
finish := time.Now()
duration := finish.Sub(start)
log.Info().Time("finish", finish).Str("duration", duration.Round(time.Second).String()).Msg("Flow DPS Indexer stopped")
}()
// Start metrics output.
if flagMetrics {
mout.Run()
}
select {
case <-sig:
log.Info().Msg("Flow DPS Indexer stopping")
case <-done:
log.Info().Msg("Flow DPS Indexer done")
case <-failed:
log.Warn().Msg("Flow DPS Indexer aborted")
return failure
}
go func() {
<-sig
log.Warn().Msg("forcing exit")
os.Exit(1)
}()
// Stop metrics output.
if flagMetrics {
mout.Stop()
}
// The following code starts a shut down with a certain timeout and makes
// sure that the main executing components are shutting down within the
// allocated shutdown time. Otherwise, we will force the shutdown and log
// an error. We then wait for shutdown on each component to complete.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err = fsm.Stop(ctx)
if err != nil {
log.Error().Err(err).Msg("could not stop indexer")
return failure
}
return success
}
| ain( | identifier_name |
main.go | // Copyright 2021 Optakt Labs OÜ
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
package main
import (
"context"
"os"
"os/signal"
"runtime"
"time"
"github.com/dgraph-io/badger/v2"
"github.com/prometheus/tsdb/wal"
"github.com/rs/zerolog"
"github.com/spf13/pflag"
"github.com/optakt/flow-dps/codec/zbor"
"github.com/optakt/flow-dps/metrics/output"
"github.com/optakt/flow-dps/metrics/rcrowley"
"github.com/optakt/flow-dps/models/dps"
"github.com/optakt/flow-dps/service/chain"
"github.com/optakt/flow-dps/service/feeder"
"github.com/optakt/flow-dps/service/forest"
"github.com/optakt/flow-dps/service/index"
"github.com/optakt/flow-dps/service/loader"
"github.com/optakt/flow-dps/service/mapper"
"github.com/optakt/flow-dps/service/metrics"
"github.com/optakt/flow-dps/service/storage"
)
const (
success = 0
failure = 1
)
func main() {
os.Exit(run())
}
func run() int {
// Signal catching for clean shutdown.
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
// Command line parameter initialization.
var (
flagCheckpoint string
flagData string
flagForce bool
flagIndex string
flagIndexAll bool
flagIndexCollections bool
flagIndexGuarantees bool
flagIndexCommit bool
flagIndexEvents bool
flagIndexHeader bool
flagIndexPayloads bool
flagIndexResults bool
flagIndexTransactions bool
flagIndexSeals bool
flagLevel string
flagMetrics bool
flagMetricsInterval time.Duration
flagSkipBootstrap bool
flagTrie string
)
pflag.StringVarP(&flagCheckpoint, "checkpoint", "c", "", "checkpoint file for state trie")
pflag.StringVarP(&flagData, "data", "d", "", "database directory for protocol data")
pflag.BoolVarP(&flagForce, "force", "f", false, "overwrite existing index database")
pflag.StringVarP(&flagIndex, "index", "i", "index", "database directory for state index")
pflag.BoolVarP(&flagIndexAll, "index-all", "a", false, "index everything")
pflag.BoolVar(&flagIndexCollections, "index-collections", false, "index collections")
pflag.BoolVar(&flagIndexGuarantees, "index-guarantees", false, "index collection guarantees")
pflag.BoolVar(&flagIndexCommit, "index-commits", false, "index commits")
pflag.BoolVar(&flagIndexEvents, "index-events", false, "index events")
pflag.BoolVar(&flagIndexHeader, "index-headers", false, "index headers")
pflag.BoolVar(&flagIndexPayloads, "index-payloads", false, "index payloads")
pflag.BoolVar(&flagIndexResults, "index-results", false, "index transaction results")
pflag.BoolVar(&flagIndexTransactions, "index-transactions", false, "index transactions")
pflag.BoolVar(&flagIndexSeals, "index-seals", false, "index seals")
pflag.StringVarP(&flagLevel, "level", "l", "info", "log output level")
pflag.BoolVarP(&flagMetrics, "metrics", "m", false, "enable metrics collection and output")
pflag.DurationVar(&flagMetricsInterval, "metrics-interval", 5*time.Minute, "defines the interval of metrics output to log")
pflag.BoolVar(&flagSkipBootstrap, "skip-bootstrap", false, "enable skipping checkpoint register payloads indexing")
pflag.StringVarP(&flagTrie, "trie", "t", "", "data directory for state ledger")
pflag.Parse()
// Increase the GOMAXPROCS value in order to use the full IOPS available, see:
// https://groups.google.com/g/golang-nuts/c/jPb_h3TvlKE
_ = runtime.GOMAXPROCS(128)
// Logger initialization.
zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() }
log := zerolog.New(os.Stderr).With().Timestamp().Logger().Level(zerolog.DebugLevel)
level, err := zerolog.ParseLevel(flagLevel)
if err != nil {
log.Error().Str("level", flagLevel).Err(err).Msg("could not parse log level")
return failure
}
log = log.Level(level)
// Ensure that at least one index is specified.
if !flagIndexAll && !flagIndexCommit && !flagIndexHeader && !flagIndexPayloads && !flagIndexCollections &&
!flagIndexGuarantees && !flagIndexTransactions && !flagIndexResults && !flagIndexEvents && !flagIndexSeals {
log.Error().Str("level", flagLevel).Msg("no indexing option specified, use -a/--all to build all indexes")
pflag.Usage()
return failure
}
// Fail if IndexAll is specified along with other index flags, as this would most likely mean that the user does
// not understand what they are doing.
if flagIndexAll && (flagIndexCommit || flagIndexHeader || flagIndexPayloads || flagIndexGuarantees ||
flagIndexCollections || flagIndexTransactions || flagIndexResults || flagIndexEvents || flagIndexSeals) {
log.Error().Str("level", flagLevel).Msg("-a/--all is mutually exclusive with specific indexing flags")
pflag.Usage()
return failure
}
// Open index database.
db, err := badger.Open(dps.DefaultOptions(flagIndex))
if err != nil {
log.Error().Str("index", flagIndex).Err(err).Msg("could not open index DB")
return failure
}
defer db.Close()
// Open protocol state database.
data, err := badger.Open(dps.DefaultOptions(flagData))
if err != nil {
log.Error().Err(err).Msg("could not open blockchain database")
return failure
}
defer data.Close()
// We initialize a metrics logger regardless of whether metrics are enabled;
// it will just do nothing if there are no registered metrics.
mout := output.New(log, flagMetricsInterval)
// The storage library is initialized with a codec and provides functions to
// interact with a Badger database while encoding and compressing
// transparently.
var codec dps.Codec
codec, err = zbor.NewCodec()
if err != nil {
log.Error().Err(err).Msg("could not initialize storage codec")
return failure
}
if flagMetrics { | storage := storage.New(codec)
// Check if index already exists.
_, err = index.NewReader(db, storage).First()
indexExists := err == nil
if indexExists && !flagForce {
log.Error().Err(err).Msg("index already exists, manually delete it or use (-f, --force) to overwrite it")
return failure
}
// The loader component is responsible for loading and decoding the checkpoint.
load := loader.New(
loader.WithCheckpointPath(flagCheckpoint),
)
// The chain is responsible for reading blockchain data from the protocol state.
var disk dps.Chain
disk = chain.FromDisk(data)
if flagMetrics {
time := rcrowley.NewTime("read")
mout.Register(time)
disk = metrics.NewChain(disk, time)
}
// Feeder is responsible for reading the write-ahead log of the execution state.
segments, err := wal.NewSegmentsReader(flagTrie)
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not open segments reader")
return failure
}
feed, err := feeder.FromDisk(wal.NewReader(segments))
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not initialize feeder")
return failure
}
// Writer is responsible for writing the index data to the index database.
index := index.NewWriter(db, storage)
defer func() {
err := index.Close()
if err != nil {
log.Error().Err(err).Msg("could not close index")
}
}()
write := dps.Writer(index)
if flagMetrics {
time := rcrowley.NewTime("write")
mout.Register(time)
write = metrics.NewWriter(write, time)
}
// Initialize the transitions with the dependencies and add them to the FSM.
transitions := mapper.NewTransitions(log, load, disk, feed, write,
mapper.WithIndexCommit(flagIndexAll || flagIndexCommit),
mapper.WithIndexHeader(flagIndexAll || flagIndexHeader),
mapper.WithIndexCollections(flagIndexAll || flagIndexCollections),
mapper.WithIndexGuarantees(flagIndexAll || flagIndexGuarantees),
mapper.WithIndexTransactions(flagIndexAll || flagIndexTransactions),
mapper.WithIndexResults(flagIndexAll || flagIndexResults),
mapper.WithIndexEvents(flagIndexAll || flagIndexEvents),
mapper.WithIndexPayloads(flagIndexAll || flagIndexPayloads),
mapper.WithIndexSeals(flagIndexAll || flagIndexSeals),
mapper.WithSkipBootstrap(flagSkipBootstrap),
)
forest := forest.New()
state := mapper.EmptyState(forest)
fsm := mapper.NewFSM(state,
mapper.WithTransition(mapper.StatusEmpty, transitions.BootstrapState),
mapper.WithTransition(mapper.StatusUpdating, transitions.UpdateTree),
mapper.WithTransition(mapper.StatusMatched, transitions.CollectRegisters),
mapper.WithTransition(mapper.StatusCollected, transitions.IndexRegisters),
mapper.WithTransition(mapper.StatusIndexed, transitions.ForwardHeight),
mapper.WithTransition(mapper.StatusForwarded, transitions.IndexChain),
)
// This section launches the main executing components in their own
// goroutine, so they can run concurrently. Afterwards, we wait for an
// interrupt signal in order to proceed with the next section.
done := make(chan struct{})
failed := make(chan struct{})
go func() {
start := time.Now()
log.Info().Time("start", start).Msg("Flow DPS Indexer starting")
err := fsm.Run()
if err != nil {
log.Warn().Err(err).Msg("Flow DPS Indexer failed")
close(failed)
} else {
close(done)
}
finish := time.Now()
duration := finish.Sub(start)
log.Info().Time("finish", finish).Str("duration", duration.Round(time.Second).String()).Msg("Flow DPS Indexer stopped")
}()
// Start metrics output.
if flagMetrics {
mout.Run()
}
select {
case <-sig:
log.Info().Msg("Flow DPS Indexer stopping")
case <-done:
log.Info().Msg("Flow DPS Indexer done")
case <-failed:
log.Warn().Msg("Flow DPS Indexer aborted")
return failure
}
go func() {
<-sig
log.Warn().Msg("forcing exit")
os.Exit(1)
}()
// Stop metrics output.
if flagMetrics {
mout.Stop()
}
// The following code starts a shut down with a certain timeout and makes
// sure that the main executing components are shutting down within the
// allocated shutdown time. Otherwise, we will force the shutdown and log
// an error. We then wait for shutdown on each component to complete.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err = fsm.Stop(ctx)
if err != nil {
log.Error().Err(err).Msg("could not stop indexer")
return failure
}
return success
}
|
size := rcrowley.NewSize("store")
mout.Register(size)
codec = metrics.NewCodec(codec, size)
}
| conditional_block |
main.go | // Copyright 2021 Optakt Labs OÜ
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
package main
import (
"context"
"os"
"os/signal"
"runtime"
"time"
"github.com/dgraph-io/badger/v2"
"github.com/prometheus/tsdb/wal"
"github.com/rs/zerolog"
"github.com/spf13/pflag"
"github.com/optakt/flow-dps/codec/zbor"
"github.com/optakt/flow-dps/metrics/output"
"github.com/optakt/flow-dps/metrics/rcrowley"
"github.com/optakt/flow-dps/models/dps"
"github.com/optakt/flow-dps/service/chain"
"github.com/optakt/flow-dps/service/feeder"
"github.com/optakt/flow-dps/service/forest"
"github.com/optakt/flow-dps/service/index"
"github.com/optakt/flow-dps/service/loader"
"github.com/optakt/flow-dps/service/mapper"
"github.com/optakt/flow-dps/service/metrics"
"github.com/optakt/flow-dps/service/storage"
)
const (
success = 0
failure = 1
)
func main() { |
func run() int {
// Signal catching for clean shutdown.
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
// Command line parameter initialization.
var (
flagCheckpoint string
flagData string
flagForce bool
flagIndex string
flagIndexAll bool
flagIndexCollections bool
flagIndexGuarantees bool
flagIndexCommit bool
flagIndexEvents bool
flagIndexHeader bool
flagIndexPayloads bool
flagIndexResults bool
flagIndexTransactions bool
flagIndexSeals bool
flagLevel string
flagMetrics bool
flagMetricsInterval time.Duration
flagSkipBootstrap bool
flagTrie string
)
pflag.StringVarP(&flagCheckpoint, "checkpoint", "c", "", "checkpoint file for state trie")
pflag.StringVarP(&flagData, "data", "d", "", "database directory for protocol data")
pflag.BoolVarP(&flagForce, "force", "f", false, "overwrite existing index database")
pflag.StringVarP(&flagIndex, "index", "i", "index", "database directory for state index")
pflag.BoolVarP(&flagIndexAll, "index-all", "a", false, "index everything")
pflag.BoolVar(&flagIndexCollections, "index-collections", false, "index collections")
pflag.BoolVar(&flagIndexGuarantees, "index-guarantees", false, "index collection guarantees")
pflag.BoolVar(&flagIndexCommit, "index-commits", false, "index commits")
pflag.BoolVar(&flagIndexEvents, "index-events", false, "index events")
pflag.BoolVar(&flagIndexHeader, "index-headers", false, "index headers")
pflag.BoolVar(&flagIndexPayloads, "index-payloads", false, "index payloads")
pflag.BoolVar(&flagIndexResults, "index-results", false, "index transaction results")
pflag.BoolVar(&flagIndexTransactions, "index-transactions", false, "index transactions")
pflag.BoolVar(&flagIndexSeals, "index-seals", false, "index seals")
pflag.StringVarP(&flagLevel, "level", "l", "info", "log output level")
pflag.BoolVarP(&flagMetrics, "metrics", "m", false, "enable metrics collection and output")
pflag.DurationVar(&flagMetricsInterval, "metrics-interval", 5*time.Minute, "defines the interval of metrics output to log")
pflag.BoolVar(&flagSkipBootstrap, "skip-bootstrap", false, "enable skipping checkpoint register payloads indexing")
pflag.StringVarP(&flagTrie, "trie", "t", "", "data directory for state ledger")
pflag.Parse()
// Increase the GOMAXPROCS value in order to use the full IOPS available, see:
// https://groups.google.com/g/golang-nuts/c/jPb_h3TvlKE
_ = runtime.GOMAXPROCS(128)
// Logger initialization.
zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() }
log := zerolog.New(os.Stderr).With().Timestamp().Logger().Level(zerolog.DebugLevel)
level, err := zerolog.ParseLevel(flagLevel)
if err != nil {
log.Error().Str("level", flagLevel).Err(err).Msg("could not parse log level")
return failure
}
log = log.Level(level)
// Ensure that at least one index is specified.
if !flagIndexAll && !flagIndexCommit && !flagIndexHeader && !flagIndexPayloads && !flagIndexCollections &&
!flagIndexGuarantees && !flagIndexTransactions && !flagIndexResults && !flagIndexEvents && !flagIndexSeals {
log.Error().Str("level", flagLevel).Msg("no indexing option specified, use -a/--all to build all indexes")
pflag.Usage()
return failure
}
// Fail if IndexAll is specified along with other index flags, as this would most likely mean that the user does
// not understand what they are doing.
if flagIndexAll && (flagIndexCommit || flagIndexHeader || flagIndexPayloads || flagIndexGuarantees ||
flagIndexCollections || flagIndexTransactions || flagIndexResults || flagIndexEvents || flagIndexSeals) {
log.Error().Str("level", flagLevel).Msg("-a/--all is mutually exclusive with specific indexing flags")
pflag.Usage()
return failure
}
// Open index database.
db, err := badger.Open(dps.DefaultOptions(flagIndex))
if err != nil {
log.Error().Str("index", flagIndex).Err(err).Msg("could not open index DB")
return failure
}
defer db.Close()
// Open protocol state database.
data, err := badger.Open(dps.DefaultOptions(flagData))
if err != nil {
log.Error().Err(err).Msg("could not open blockchain database")
return failure
}
defer data.Close()
// We initialize a metrics logger regardless of whether metrics are enabled;
// it will just do nothing if there are no registered metrics.
mout := output.New(log, flagMetricsInterval)
// The storage library is initialized with a codec and provides functions to
// interact with a Badger database while encoding and compressing
// transparently.
var codec dps.Codec
codec, err = zbor.NewCodec()
if err != nil {
log.Error().Err(err).Msg("could not initialize storage codec")
return failure
}
if flagMetrics {
size := rcrowley.NewSize("store")
mout.Register(size)
codec = metrics.NewCodec(codec, size)
}
storage := storage.New(codec)
// Check if index already exists.
_, err = index.NewReader(db, storage).First()
indexExists := err == nil
if indexExists && !flagForce {
log.Error().Err(err).Msg("index already exists, manually delete it or use (-f, --force) to overwrite it")
return failure
}
// The loader component is responsible for loading and decoding the checkpoint.
load := loader.New(
loader.WithCheckpointPath(flagCheckpoint),
)
// The chain is responsible for reading blockchain data from the protocol state.
var disk dps.Chain
disk = chain.FromDisk(data)
if flagMetrics {
time := rcrowley.NewTime("read")
mout.Register(time)
disk = metrics.NewChain(disk, time)
}
// Feeder is responsible for reading the write-ahead log of the execution state.
segments, err := wal.NewSegmentsReader(flagTrie)
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not open segments reader")
return failure
}
feed, err := feeder.FromDisk(wal.NewReader(segments))
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not initialize feeder")
return failure
}
// Writer is responsible for writing the index data to the index database.
index := index.NewWriter(db, storage)
defer func() {
err := index.Close()
if err != nil {
log.Error().Err(err).Msg("could not close index")
}
}()
write := dps.Writer(index)
if flagMetrics {
time := rcrowley.NewTime("write")
mout.Register(time)
write = metrics.NewWriter(write, time)
}
// Initialize the transitions with the dependencies and add them to the FSM.
transitions := mapper.NewTransitions(log, load, disk, feed, write,
mapper.WithIndexCommit(flagIndexAll || flagIndexCommit),
mapper.WithIndexHeader(flagIndexAll || flagIndexHeader),
mapper.WithIndexCollections(flagIndexAll || flagIndexCollections),
mapper.WithIndexGuarantees(flagIndexAll || flagIndexGuarantees),
mapper.WithIndexTransactions(flagIndexAll || flagIndexTransactions),
mapper.WithIndexResults(flagIndexAll || flagIndexResults),
mapper.WithIndexEvents(flagIndexAll || flagIndexEvents),
mapper.WithIndexPayloads(flagIndexAll || flagIndexPayloads),
mapper.WithIndexSeals(flagIndexAll || flagIndexSeals),
mapper.WithSkipBootstrap(flagSkipBootstrap),
)
forest := forest.New()
state := mapper.EmptyState(forest)
fsm := mapper.NewFSM(state,
mapper.WithTransition(mapper.StatusEmpty, transitions.BootstrapState),
mapper.WithTransition(mapper.StatusUpdating, transitions.UpdateTree),
mapper.WithTransition(mapper.StatusMatched, transitions.CollectRegisters),
mapper.WithTransition(mapper.StatusCollected, transitions.IndexRegisters),
mapper.WithTransition(mapper.StatusIndexed, transitions.ForwardHeight),
mapper.WithTransition(mapper.StatusForwarded, transitions.IndexChain),
)
// This section launches the main executing components in their own
// goroutine, so they can run concurrently. Afterwards, we wait for an
// interrupt signal in order to proceed with the next section.
done := make(chan struct{})
failed := make(chan struct{})
go func() {
start := time.Now()
log.Info().Time("start", start).Msg("Flow DPS Indexer starting")
err := fsm.Run()
if err != nil {
log.Warn().Err(err).Msg("Flow DPS Indexer failed")
close(failed)
} else {
close(done)
}
finish := time.Now()
duration := finish.Sub(start)
log.Info().Time("finish", finish).Str("duration", duration.Round(time.Second).String()).Msg("Flow DPS Indexer stopped")
}()
// Start metrics output.
if flagMetrics {
mout.Run()
}
select {
case <-sig:
log.Info().Msg("Flow DPS Indexer stopping")
case <-done:
log.Info().Msg("Flow DPS Indexer done")
case <-failed:
log.Warn().Msg("Flow DPS Indexer aborted")
return failure
}
go func() {
<-sig
log.Warn().Msg("forcing exit")
os.Exit(1)
}()
// Stop metrics output.
if flagMetrics {
mout.Stop()
}
// The following code starts a shut down with a certain timeout and makes
// sure that the main executing components are shutting down within the
// allocated shutdown time. Otherwise, we will force the shutdown and log
// an error. We then wait for shutdown on each component to complete.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err = fsm.Stop(ctx)
if err != nil {
log.Error().Err(err).Msg("could not stop indexer")
return failure
}
return success
}
|
os.Exit(run())
}
| identifier_body |
main.go | // Copyright 2021 Optakt Labs OÜ
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
package main
import (
"context"
"os"
"os/signal"
"runtime"
"time"
"github.com/dgraph-io/badger/v2"
"github.com/prometheus/tsdb/wal"
"github.com/rs/zerolog"
"github.com/spf13/pflag"
"github.com/optakt/flow-dps/codec/zbor"
"github.com/optakt/flow-dps/metrics/output"
"github.com/optakt/flow-dps/metrics/rcrowley"
"github.com/optakt/flow-dps/models/dps"
"github.com/optakt/flow-dps/service/chain"
"github.com/optakt/flow-dps/service/feeder"
"github.com/optakt/flow-dps/service/forest"
"github.com/optakt/flow-dps/service/index"
"github.com/optakt/flow-dps/service/loader"
"github.com/optakt/flow-dps/service/mapper"
"github.com/optakt/flow-dps/service/metrics"
"github.com/optakt/flow-dps/service/storage"
)
const (
success = 0
failure = 1
)
func main() {
os.Exit(run())
}
func run() int {
// Signal catching for clean shutdown.
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
// Command line parameter initialization.
var (
flagCheckpoint string
flagData string
flagForce bool
flagIndex string
flagIndexAll bool
flagIndexCollections bool
flagIndexGuarantees bool
flagIndexCommit bool
flagIndexEvents bool
flagIndexHeader bool
flagIndexPayloads bool
flagIndexResults bool
flagIndexTransactions bool
flagIndexSeals bool
flagLevel string
flagMetrics bool
flagMetricsInterval time.Duration
flagSkipBootstrap bool
flagTrie string
)
pflag.StringVarP(&flagCheckpoint, "checkpoint", "c", "", "checkpoint file for state trie")
pflag.StringVarP(&flagData, "data", "d", "", "database directory for protocol data")
pflag.BoolVarP(&flagForce, "force", "f", false, "overwrite existing index database")
pflag.StringVarP(&flagIndex, "index", "i", "index", "database directory for state index")
pflag.BoolVarP(&flagIndexAll, "index-all", "a", false, "index everything")
pflag.BoolVar(&flagIndexCollections, "index-collections", false, "index collections")
pflag.BoolVar(&flagIndexGuarantees, "index-guarantees", false, "index collection guarantees")
pflag.BoolVar(&flagIndexCommit, "index-commits", false, "index commits")
pflag.BoolVar(&flagIndexEvents, "index-events", false, "index events")
pflag.BoolVar(&flagIndexHeader, "index-headers", false, "index headers")
pflag.BoolVar(&flagIndexPayloads, "index-payloads", false, "index payloads")
pflag.BoolVar(&flagIndexResults, "index-results", false, "index transaction results")
pflag.BoolVar(&flagIndexTransactions, "index-transactions", false, "index transactions")
pflag.BoolVar(&flagIndexSeals, "index-seals", false, "index seals")
pflag.StringVarP(&flagLevel, "level", "l", "info", "log output level")
pflag.BoolVarP(&flagMetrics, "metrics", "m", false, "enable metrics collection and output")
pflag.DurationVar(&flagMetricsInterval, "metrics-interval", 5*time.Minute, "defines the interval of metrics output to log")
pflag.BoolVar(&flagSkipBootstrap, "skip-bootstrap", false, "enable skipping checkpoint register payloads indexing")
pflag.StringVarP(&flagTrie, "trie", "t", "", "data directory for state ledger")
pflag.Parse()
// Increase the GOMAXPROCS value in order to use the full IOPS available, see:
// https://groups.google.com/g/golang-nuts/c/jPb_h3TvlKE
_ = runtime.GOMAXPROCS(128)
// Logger initialization.
zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() }
log := zerolog.New(os.Stderr).With().Timestamp().Logger().Level(zerolog.DebugLevel)
level, err := zerolog.ParseLevel(flagLevel)
if err != nil {
log.Error().Str("level", flagLevel).Err(err).Msg("could not parse log level")
return failure
}
log = log.Level(level)
// Ensure that at least one index is specified.
if !flagIndexAll && !flagIndexCommit && !flagIndexHeader && !flagIndexPayloads && !flagIndexCollections &&
!flagIndexGuarantees && !flagIndexTransactions && !flagIndexResults && !flagIndexEvents && !flagIndexSeals {
log.Error().Str("level", flagLevel).Msg("no indexing option specified, use -a/--all to build all indexes")
pflag.Usage()
return failure
}
// Fail if IndexAll is specified along with other index flags, as this would most likely mean that the user does
// not understand what they are doing.
if flagIndexAll && (flagIndexCommit || flagIndexHeader || flagIndexPayloads || flagIndexGuarantees ||
flagIndexCollections || flagIndexTransactions || flagIndexResults || flagIndexEvents || flagIndexSeals) {
log.Error().Str("level", flagLevel).Msg("-a/--all is mutually exclusive with specific indexing flags")
pflag.Usage()
return failure
}
// Open index database.
db, err := badger.Open(dps.DefaultOptions(flagIndex))
if err != nil {
log.Error().Str("index", flagIndex).Err(err).Msg("could not open index DB")
return failure
}
defer db.Close()
// Open protocol state database.
data, err := badger.Open(dps.DefaultOptions(flagData))
if err != nil {
log.Error().Err(err).Msg("could not open blockchain database")
return failure
}
defer data.Close()
// We initialize a metrics logger regardless of whether metrics are enabled;
// it will just do nothing if there are no registered metrics.
mout := output.New(log, flagMetricsInterval)
// The storage library is initialized with a codec and provides functions to
// interact with a Badger database while encoding and compressing
// transparently.
var codec dps.Codec
codec, err = zbor.NewCodec()
if err != nil {
log.Error().Err(err).Msg("could not initialize storage codec")
return failure
}
if flagMetrics {
size := rcrowley.NewSize("store")
mout.Register(size)
codec = metrics.NewCodec(codec, size)
}
storage := storage.New(codec)
// Check if index already exists.
_, err = index.NewReader(db, storage).First()
indexExists := err == nil
if indexExists && !flagForce {
log.Error().Err(err).Msg("index already exists, manually delete it or use (-f, --force) to overwrite it")
return failure
}
// The loader component is responsible for loading and decoding the checkpoint.
load := loader.New(
loader.WithCheckpointPath(flagCheckpoint),
)
// The chain is responsible for reading blockchain data from the protocol state.
var disk dps.Chain
disk = chain.FromDisk(data)
if flagMetrics {
time := rcrowley.NewTime("read")
mout.Register(time)
disk = metrics.NewChain(disk, time)
}
// Feeder is responsible for reading the write-ahead log of the execution state.
segments, err := wal.NewSegmentsReader(flagTrie)
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not open segments reader")
return failure
}
feed, err := feeder.FromDisk(wal.NewReader(segments))
if err != nil {
log.Error().Str("trie", flagTrie).Err(err).Msg("could not initialize feeder")
return failure
}
// Writer is responsible for writing the index data to the index database.
index := index.NewWriter(db, storage)
defer func() {
err := index.Close()
if err != nil {
log.Error().Err(err).Msg("could not close index")
}
}()
write := dps.Writer(index)
if flagMetrics {
time := rcrowley.NewTime("write")
mout.Register(time)
write = metrics.NewWriter(write, time)
}
// Initialize the transitions with the dependencies and add them to the FSM.
transitions := mapper.NewTransitions(log, load, disk, feed, write,
mapper.WithIndexCommit(flagIndexAll || flagIndexCommit),
mapper.WithIndexHeader(flagIndexAll || flagIndexHeader),
mapper.WithIndexCollections(flagIndexAll || flagIndexCollections),
mapper.WithIndexGuarantees(flagIndexAll || flagIndexGuarantees),
mapper.WithIndexTransactions(flagIndexAll || flagIndexTransactions),
mapper.WithIndexResults(flagIndexAll || flagIndexResults),
mapper.WithIndexEvents(flagIndexAll || flagIndexEvents),
mapper.WithIndexPayloads(flagIndexAll || flagIndexPayloads),
mapper.WithIndexSeals(flagIndexAll || flagIndexSeals),
mapper.WithSkipBootstrap(flagSkipBootstrap),
)
forest := forest.New()
state := mapper.EmptyState(forest)
fsm := mapper.NewFSM(state,
mapper.WithTransition(mapper.StatusEmpty, transitions.BootstrapState),
mapper.WithTransition(mapper.StatusUpdating, transitions.UpdateTree),
mapper.WithTransition(mapper.StatusMatched, transitions.CollectRegisters),
mapper.WithTransition(mapper.StatusCollected, transitions.IndexRegisters),
mapper.WithTransition(mapper.StatusIndexed, transitions.ForwardHeight),
mapper.WithTransition(mapper.StatusForwarded, transitions.IndexChain), | // goroutine, so they can run concurrently. Afterwards, we wait for an
// interrupt signal in order to proceed with the next section.
done := make(chan struct{})
failed := make(chan struct{})
go func() {
start := time.Now()
log.Info().Time("start", start).Msg("Flow DPS Indexer starting")
err := fsm.Run()
if err != nil {
log.Warn().Err(err).Msg("Flow DPS Indexer failed")
close(failed)
} else {
close(done)
}
finish := time.Now()
duration := finish.Sub(start)
log.Info().Time("finish", finish).Str("duration", duration.Round(time.Second).String()).Msg("Flow DPS Indexer stopped")
}()
// Start metrics output.
if flagMetrics {
mout.Run()
}
select {
case <-sig:
log.Info().Msg("Flow DPS Indexer stopping")
case <-done:
log.Info().Msg("Flow DPS Indexer done")
case <-failed:
log.Warn().Msg("Flow DPS Indexer aborted")
return failure
}
go func() {
<-sig
log.Warn().Msg("forcing exit")
os.Exit(1)
}()
// Stop metrics output.
if flagMetrics {
mout.Stop()
}
// The following code starts a shut down with a certain timeout and makes
// sure that the main executing components are shutting down within the
// allocated shutdown time. Otherwise, we will force the shutdown and log
// an error. We then wait for shutdown on each component to complete.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err = fsm.Stop(ctx)
if err != nil {
log.Error().Err(err).Msg("could not stop indexer")
return failure
}
return success
} | )
// This section launches the main executing components in their own | random_line_split |
fixtures.go | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"fmt"
"log"
"net"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"k8s.io/component-base/featuregate"
"antrea.io/antrea/pkg/agent/config"
)
func skipIfNotBenchmarkTest(tb testing.TB) {
if !testOptions.withBench {
tb.Skipf("Skipping benchmark test: %s", tb.Name())
}
}
func skipIfNotAntreaIPAMTest(tb testing.TB) {
if !testOptions.enableAntreaIPAM {
tb.Skipf("Skipping AntreaIPAM test: %s", tb.Name())
}
}
func skipIfAntreaIPAMTest(tb testing.TB) {
if testOptions.enableAntreaIPAM {
tb.Skipf("Skipping test when running AntreaIPAM: %s", tb.Name())
}
}
func skipIfProviderIs(tb testing.TB, name string, reason string) {
if testOptions.providerName == name {
tb.Skipf("Skipping test for the '%s' provider: %s", name, reason)
}
}
func skipIfNotRequired(tb testing.TB, keys ...string) {
for _, v := range keys {
if strings.Contains(testOptions.skipCases, v) {
tb.Skipf("Skipping test as %s is in skip list %s", v, testOptions.skipCases)
}
}
}
func skipIfNumNodesLessThan(tb testing.TB, required int) {
if clusterInfo.numNodes < required {
tb.Skipf("Skipping test as it requires %d different Nodes but cluster only has %d", required, clusterInfo.numNodes)
}
}
func skipIfRunCoverage(tb testing.TB, reason string) {
if testOptions.enableCoverage {
tb.Skipf("Skipping test for the '%s' when run coverage: %s", tb.Name(), reason)
}
}
func skipIfNotIPv4Cluster(tb testing.TB) {
if clusterInfo.podV4NetworkCIDR == "" {
tb.Skipf("Skipping test as it requires IPv4 addresses but the IPv4 network CIDR is not set")
}
}
func skipIfIPv6Cluster(tb testing.TB) {
if clusterInfo.podV6NetworkCIDR != "" {
tb.Skipf("Skipping test as it is not supported in IPv6 cluster")
}
}
func skipIfNotIPv6Cluster(tb testing.TB) {
if clusterInfo.podV6NetworkCIDR == "" {
tb.Skipf("Skipping test as it requires IPv6 addresses but the IPv6 network CIDR is not set")
}
}
func skipIfMissingKernelModule(tb testing.TB, nodeName string, requiredModules []string) {
for _, module := range requiredModules {
// modprobe with "--dry-run" does not require root privileges
cmd := fmt.Sprintf("modprobe --dry-run %s", module)
rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd)
if err != nil {
tb.Skipf("Skipping test as modprobe could not be run to confirm the presence of module '%s': %v", module, err)
}
if rc != 0 {
tb.Skipf("Skipping test as modprobe exited with an error when trying to confirm the presence of module '%s' - stdout: %s - stderr: %s", module, stdout, stderr)
}
}
tb.Logf("The following modules have been found on Node '%s': %v", nodeName, requiredModules)
}
func skipIfEncapModeIsNot(tb testing.TB, data *TestData, encapMode config.TrafficEncapModeType) {
currentEncapMode, err := data.GetEncapMode()
if err != nil {
tb.Fatalf("Failed to get encap mode: %v", err)
}
if currentEncapMode != encapMode {
tb.Skipf("Skipping test for encap mode '%s', test requires '%s'", currentEncapMode.String(), encapMode.String())
}
}
func skipIfHasWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) != 0 {
tb.Skipf("Skipping test as the cluster has Windows Nodes")
}
}
func skipIfNoWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) == 0 {
tb.Skipf("Skipping test as the cluster has no Windows Nodes")
}
}
func skipIfFeatureDisabled(tb testing.TB, feature featuregate.Feature, checkAgent bool, checkController bool) {
if checkAgent {
if featureGate, err := GetAgentFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Agent: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Agent", feature)
}
}
if checkController {
if featureGate, err := GetControllerFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Controller: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Controller", feature)
}
}
}
func ensureAntreaRunning(data *TestData) error {
log.Println("Applying Antrea YAML")
if err := data.deployAntrea(deployAntreaDefault); err != nil {
return err
}
log.Println("Waiting for all Antrea DaemonSet Pods")
if err := data.waitForAntreaDaemonSetPods(defaultTimeout); err != nil {
return err
}
log.Println("Checking CoreDNS deployment")
if err := data.checkCoreDNSPods(defaultTimeout); err != nil {
return err
}
return nil
}
func createDirectory(path string) error {
return os.Mkdir(path, 0700)
}
func (data *TestData) setupLogDirectoryForTest(testName string) error {
path := filepath.Join(testOptions.logsExportDir, testName)
// remove directory if it already exists. This ensures that we start with an empty
// directory
_ = os.RemoveAll(path)
err := createDirectory(path)
if err != nil {
return err
}
data.logsDirForTestCase = path
return nil
}
func setupTest(tb testing.TB) (*TestData, error) {
if err := testData.setupLogDirectoryForTest(tb.Name()); err != nil {
tb.Errorf("Error creating logs directory '%s': %v", testData.logsDirForTestCase, err)
return nil, err
}
success := false
defer func() {
if !success {
tb.Fail()
exportLogs(tb, testData, "afterSetupTest", true)
}
}()
tb.Logf("Creating '%s' K8s Namespace", testNamespace)
if err := ensureAntreaRunning(testData); err != nil {
return nil, err
}
if err := testData.createTestNamespace(); err != nil {
return nil, err
}
success = true
return testData, nil
}
func setupTestWithIPFIXCollector(tb testing.TB) (*TestData, bool, bool, error) {
v4Enabled := clusterInfo.podV4NetworkCIDR != ""
v6Enabled := clusterInfo.podV6NetworkCIDR != ""
testData, err := setupTest(tb)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
// Create pod using ipfix collector image
if err = testData.createPodOnNode("ipfix-collector", testNamespace, "", ipfixCollectorImage, nil, nil, nil, nil, true, nil); err != nil {
tb.Errorf("Error when creating the ipfix collector Pod: %v", err)
}
ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testNamespace)
if err != nil || len(ipfixCollectorIP.ipStrings) == 0 {
tb.Errorf("Error when waiting to get ipfix collector Pod IP: %v", err)
return nil, v4Enabled, v6Enabled, err
}
var ipStr string
if v6Enabled && ipfixCollectorIP.ipv6 != nil {
ipStr = ipfixCollectorIP.ipv6.String()
} else {
ipStr = ipfixCollectorIP.ipv4.String()
}
ipfixCollectorAddr := fmt.Sprintf("%s:tcp", net.JoinHostPort(ipStr, ipfixCollectorPort))
faClusterIPAddr := ""
tb.Logf("Applying flow aggregator YAML with ipfix collector address: %s", ipfixCollectorAddr)
faClusterIP, err := testData.deployFlowAggregator(ipfixCollectorAddr)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
if testOptions.providerName == "kind" {
// In Kind cluster, there are issues with DNS name resolution on worker nodes.
// Please note that CoreDNS services are forced on to control-plane Node.
faClusterIPAddr = fmt.Sprintf("%s:%s:tls", faClusterIP, ipfixCollectorPort)
}
tb.Logf("Deploying flow exporter with collector address: %s", faClusterIPAddr)
if err = testData.deployAntreaFlowExporter(faClusterIPAddr); err != nil {
return testData, v4Enabled, v6Enabled, err
}
tb.Logf("Checking CoreDNS deployment")
if err = testData.checkCoreDNSPods(defaultTimeout); err != nil {
return testData, v4Enabled, v6Enabled, err
}
return testData, v4Enabled, v6Enabled, nil
}
func exportLogs(tb testing.TB, data *TestData, logsSubDir string, writeNodeLogs bool) {
if tb.Skipped() {
return
}
// if test was successful and --logs-export-on-success was not provided, we do not export
// any logs.
if !tb.Failed() && !testOptions.logsExportOnSuccess {
return
}
const timeFormat = "Jan02-15-04-05"
timeStamp := time.Now().Format(timeFormat)
logsDir := filepath.Join(data.logsDirForTestCase, fmt.Sprintf("%s.%s", logsSubDir, timeStamp))
err := createDirectory(logsDir)
if err != nil {
tb.Errorf("Error when creating logs directory '%s': %v", logsDir, err)
return
}
tb.Logf("Exporting test logs to '%s'", logsDir)
// for now we just retrieve the logs for the Antrea Pods, but maybe we can find a good way to
// retrieve the logs for the test Pods in the future (before deleting them) if it is useful
// for debugging.
// getPodWriter creates the file with name nodeName-podName-suffix. It returns nil if the
// file cannot be created. File must be closed by the caller.
getPodWriter := func(nodeName, podName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s-%s", nodeName, podName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// runKubectl runs the provided kubectl command on the control-plane Node and returns the
// output. It returns an empty string in case of error.
runKubectl := func(cmd string) string {
rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd)
if err != nil || rc != 0 {
tb.Errorf("Error when running this kubectl command on control-plane Node: %s", cmd)
return ""
}
return stdout
}
// dump the logs for Antrea Pods to disk.
writePodLogs := func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "logs")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s logs --all-containers %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
}
data.forAllMatchingPodsInNamespace("k8s-app=kube-proxy", kubeNamespace, writePodLogs)
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, writePodLogs)
// dump the logs for monitoring Pods to disk.
data.forAllMatchingPodsInNamespace("", monitoringNamespace, writePodLogs)
// dump the logs for flow-aggregator Pods to disk.
data.forAllMatchingPodsInNamespace("", flowAggregatorNamespace, writePodLogs)
// dump the output of "kubectl describe" for Antrea pods to disk.
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "describe")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s describe pod %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
})
if !writeNodeLogs {
return
}
// getNodeWriter creates the file with name nodeName-suffix. It returns nil if the file
// cannot be created. File must be closed by the caller.
getNodeWriter := func(nodeName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s", nodeName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// export kubelet logs with journalctl for each Node. If the Nodes do not use journalctl we
// print a log message. If kubelet is not run with systemd, the log file will be empty.
if err := forAllNodes(func(nodeName string) error {
const numLines = 100
// --no-pager ensures the command does not hang.
cmd := fmt.Sprintf("journalctl -u kubelet -n %d --no-pager", numLines)
if clusterInfo.nodesOS[nodeName] == "windows" {
cmd = "Get-EventLog -LogName \"System\" -Source \"Service Control Manager\" | grep kubelet ; Get-EventLog -LogName \"Application\" -Source \"nssm\" | grep kubelet"
}
rc, stdout, _, err := RunCommandOnNode(nodeName, cmd)
if err != nil || rc != 0 {
// return an error and skip subsequent Nodes
return fmt.Errorf("error when running journalctl on Node '%s', is it available? Error: %v", nodeName, err)
}
w := getNodeWriter(nodeName, "kubelet")
if w == nil {
// move on to the next Node
return nil
}
defer w.Close()
w.WriteString(stdout)
return nil
}); err != nil {
tb.Logf("Error when exporting kubelet logs: %v", err)
}
}
func teardownFlowAggregator(tb testing.TB, data *TestData) {
if testOptions.enableCoverage {
if err := testData.gracefulExitFlowAggregator(testOptions.coverageDir); err != nil {
tb.Fatalf("Error when gracefully exiting Flow Aggregator: %v", err)
}
}
tb.Logf("Deleting '%s' K8s Namespace", flowAggregatorNamespace)
if err := data.deleteNamespace(flowAggregatorNamespace, defaultTimeout); err != nil {
tb.Logf("Error when tearing down flow aggregator: %v", err)
}
}
func | (tb testing.TB, data *TestData) {
exportLogs(tb, data, "beforeTeardown", true)
if empty, _ := IsDirEmpty(data.logsDirForTestCase); empty {
_ = os.Remove(data.logsDirForTestCase)
}
tb.Logf("Deleting '%s' K8s Namespace", testNamespace)
if err := data.deleteTestNamespace(defaultTimeout); err != nil {
tb.Logf("Error when tearing down test: %v", err)
}
}
func deletePodWrapper(tb testing.TB, data *TestData, namespace, name string) {
tb.Logf("Deleting Pod '%s'", name)
if err := data.deletePod(namespace, name); err != nil {
tb.Logf("Error when deleting Pod: %v", err)
}
}
// createTestBusyboxPods creates the desired number of busybox Pods and wait for their IP address to
// become available. This is a common patter in our tests, so having this helper function makes
// sense. It calls Fatalf in case of error, so it must be called from the goroutine running the test
// or benchmark function. You can create all the Pods on the same Node by setting nodeName. If
// nodeName is the empty string, each Pod will be created on an arbitrary
// Node. createTestBusyboxPods returns the cleanupFn function which can be used to delete the
// created Pods. Pods are created in parallel to reduce the time required to run the tests.
func createTestBusyboxPods(tb testing.TB, data *TestData, num int, ns string, nodeName string) (
podNames []string, podIPs []*PodIPs, cleanupFn func(),
) {
cleanupFn = func() {
var wg sync.WaitGroup
for _, podName := range podNames {
wg.Add(1)
go func(name string) {
deletePodWrapper(tb, data, ns, name)
wg.Done()
}(podName)
}
wg.Wait()
}
type podData struct {
podName string
podIP *PodIPs
err error
}
createPodAndGetIP := func() (string, *PodIPs, error) {
podName := randName("test-pod-")
tb.Logf("Creating a busybox test Pod '%s' and waiting for IP", podName)
if err := data.createBusyboxPodOnNode(podName, ns, nodeName, false); err != nil {
tb.Errorf("Error when creating busybox test Pod '%s': %v", podName, err)
return "", nil, err
}
podIP, err := data.podWaitForIPs(defaultTimeout, podName, ns)
if err != nil {
tb.Errorf("Error when waiting for IP for Pod '%s': %v", podName, err)
return podName, nil, err
}
return podName, podIP, nil
}
podsCh := make(chan podData, num)
for i := 0; i < num; i++ {
go func() {
podName, podIP, err := createPodAndGetIP()
podsCh <- podData{podName, podIP, err}
}()
}
errCnt := 0
for i := 0; i < num; i++ {
pod := <-podsCh
if pod.podName != "" {
podNames = append(podNames, pod.podName)
podIPs = append(podIPs, pod.podIP)
}
if pod.err != nil {
errCnt++
}
}
if errCnt > 0 {
defer cleanupFn()
tb.Fatalf("%d / %d Pods could not be created successfully", errCnt, num)
}
return podNames, podIPs, cleanupFn
}
| teardownTest | identifier_name |
fixtures.go | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"fmt"
"log"
"net"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"k8s.io/component-base/featuregate"
"antrea.io/antrea/pkg/agent/config"
)
func skipIfNotBenchmarkTest(tb testing.TB) {
if !testOptions.withBench {
tb.Skipf("Skipping benchmark test: %s", tb.Name())
}
}
func skipIfNotAntreaIPAMTest(tb testing.TB) {
if !testOptions.enableAntreaIPAM {
tb.Skipf("Skipping AntreaIPAM test: %s", tb.Name())
}
}
func skipIfAntreaIPAMTest(tb testing.TB) {
if testOptions.enableAntreaIPAM {
tb.Skipf("Skipping test when running AntreaIPAM: %s", tb.Name())
}
}
func skipIfProviderIs(tb testing.TB, name string, reason string) {
if testOptions.providerName == name {
tb.Skipf("Skipping test for the '%s' provider: %s", name, reason)
}
}
func skipIfNotRequired(tb testing.TB, keys ...string) {
for _, v := range keys {
if strings.Contains(testOptions.skipCases, v) {
tb.Skipf("Skipping test as %s is in skip list %s", v, testOptions.skipCases)
}
}
}
func skipIfNumNodesLessThan(tb testing.TB, required int) {
if clusterInfo.numNodes < required {
tb.Skipf("Skipping test as it requires %d different Nodes but cluster only has %d", required, clusterInfo.numNodes)
}
}
func skipIfRunCoverage(tb testing.TB, reason string) {
if testOptions.enableCoverage {
tb.Skipf("Skipping test for the '%s' when run coverage: %s", tb.Name(), reason)
}
}
func skipIfNotIPv4Cluster(tb testing.TB) {
if clusterInfo.podV4NetworkCIDR == "" {
tb.Skipf("Skipping test as it requires IPv4 addresses but the IPv4 network CIDR is not set")
}
}
func skipIfIPv6Cluster(tb testing.TB) {
if clusterInfo.podV6NetworkCIDR != "" {
tb.Skipf("Skipping test as it is not supported in IPv6 cluster")
}
}
func skipIfNotIPv6Cluster(tb testing.TB) {
if clusterInfo.podV6NetworkCIDR == "" {
tb.Skipf("Skipping test as it requires IPv6 addresses but the IPv6 network CIDR is not set")
}
}
func skipIfMissingKernelModule(tb testing.TB, nodeName string, requiredModules []string) {
for _, module := range requiredModules {
// modprobe with "--dry-run" does not require root privileges
cmd := fmt.Sprintf("modprobe --dry-run %s", module)
rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd)
if err != nil {
tb.Skipf("Skipping test as modprobe could not be run to confirm the presence of module '%s': %v", module, err)
}
if rc != 0 {
tb.Skipf("Skipping test as modprobe exited with an error when trying to confirm the presence of module '%s' - stdout: %s - stderr: %s", module, stdout, stderr)
}
}
tb.Logf("The following modules have been found on Node '%s': %v", nodeName, requiredModules)
}
func skipIfEncapModeIsNot(tb testing.TB, data *TestData, encapMode config.TrafficEncapModeType) {
currentEncapMode, err := data.GetEncapMode()
if err != nil {
tb.Fatalf("Failed to get encap mode: %v", err)
}
if currentEncapMode != encapMode {
tb.Skipf("Skipping test for encap mode '%s', test requires '%s'", currentEncapMode.String(), encapMode.String())
}
}
func skipIfHasWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) != 0 {
tb.Skipf("Skipping test as the cluster has Windows Nodes")
}
}
func skipIfNoWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) == 0 {
tb.Skipf("Skipping test as the cluster has no Windows Nodes")
}
}
func skipIfFeatureDisabled(tb testing.TB, feature featuregate.Feature, checkAgent bool, checkController bool) {
if checkAgent {
if featureGate, err := GetAgentFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Agent: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Agent", feature)
}
}
if checkController {
if featureGate, err := GetControllerFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Controller: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Controller", feature)
}
}
}
func ensureAntreaRunning(data *TestData) error {
log.Println("Applying Antrea YAML")
if err := data.deployAntrea(deployAntreaDefault); err != nil {
return err
}
log.Println("Waiting for all Antrea DaemonSet Pods")
if err := data.waitForAntreaDaemonSetPods(defaultTimeout); err != nil {
return err
}
log.Println("Checking CoreDNS deployment")
if err := data.checkCoreDNSPods(defaultTimeout); err != nil {
return err
}
return nil
}
func createDirectory(path string) error {
return os.Mkdir(path, 0700)
}
func (data *TestData) setupLogDirectoryForTest(testName string) error {
path := filepath.Join(testOptions.logsExportDir, testName)
// remove directory if it already exists. This ensures that we start with an empty
// directory
_ = os.RemoveAll(path)
err := createDirectory(path)
if err != nil {
return err
}
data.logsDirForTestCase = path
return nil
}
func setupTest(tb testing.TB) (*TestData, error) {
if err := testData.setupLogDirectoryForTest(tb.Name()); err != nil {
tb.Errorf("Error creating logs directory '%s': %v", testData.logsDirForTestCase, err)
return nil, err
}
success := false
defer func() {
if !success {
tb.Fail()
exportLogs(tb, testData, "afterSetupTest", true)
}
}()
tb.Logf("Creating '%s' K8s Namespace", testNamespace)
if err := ensureAntreaRunning(testData); err != nil {
return nil, err
}
if err := testData.createTestNamespace(); err != nil {
return nil, err
}
success = true
return testData, nil
}
func setupTestWithIPFIXCollector(tb testing.TB) (*TestData, bool, bool, error) {
v4Enabled := clusterInfo.podV4NetworkCIDR != ""
v6Enabled := clusterInfo.podV6NetworkCIDR != ""
testData, err := setupTest(tb)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
// Create pod using ipfix collector image
if err = testData.createPodOnNode("ipfix-collector", testNamespace, "", ipfixCollectorImage, nil, nil, nil, nil, true, nil); err != nil {
tb.Errorf("Error when creating the ipfix collector Pod: %v", err)
}
ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testNamespace)
if err != nil || len(ipfixCollectorIP.ipStrings) == 0 {
tb.Errorf("Error when waiting to get ipfix collector Pod IP: %v", err)
return nil, v4Enabled, v6Enabled, err
}
var ipStr string
if v6Enabled && ipfixCollectorIP.ipv6 != nil {
ipStr = ipfixCollectorIP.ipv6.String()
} else {
ipStr = ipfixCollectorIP.ipv4.String()
}
ipfixCollectorAddr := fmt.Sprintf("%s:tcp", net.JoinHostPort(ipStr, ipfixCollectorPort))
faClusterIPAddr := ""
tb.Logf("Applying flow aggregator YAML with ipfix collector address: %s", ipfixCollectorAddr)
faClusterIP, err := testData.deployFlowAggregator(ipfixCollectorAddr)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
if testOptions.providerName == "kind" {
// In Kind cluster, there are issues with DNS name resolution on worker nodes.
// Please note that CoreDNS services are forced on to control-plane Node.
faClusterIPAddr = fmt.Sprintf("%s:%s:tls", faClusterIP, ipfixCollectorPort)
}
tb.Logf("Deploying flow exporter with collector address: %s", faClusterIPAddr)
if err = testData.deployAntreaFlowExporter(faClusterIPAddr); err != nil {
return testData, v4Enabled, v6Enabled, err
}
tb.Logf("Checking CoreDNS deployment")
if err = testData.checkCoreDNSPods(defaultTimeout); err != nil {
return testData, v4Enabled, v6Enabled, err
}
return testData, v4Enabled, v6Enabled, nil
}
func exportLogs(tb testing.TB, data *TestData, logsSubDir string, writeNodeLogs bool) {
if tb.Skipped() {
return
}
// if test was successful and --logs-export-on-success was not provided, we do not export
// any logs.
if !tb.Failed() && !testOptions.logsExportOnSuccess {
return
}
const timeFormat = "Jan02-15-04-05"
timeStamp := time.Now().Format(timeFormat)
logsDir := filepath.Join(data.logsDirForTestCase, fmt.Sprintf("%s.%s", logsSubDir, timeStamp))
err := createDirectory(logsDir)
if err != nil {
tb.Errorf("Error when creating logs directory '%s': %v", logsDir, err)
return
}
tb.Logf("Exporting test logs to '%s'", logsDir)
// for now we just retrieve the logs for the Antrea Pods, but maybe we can find a good way to
// retrieve the logs for the test Pods in the future (before deleting them) if it is useful
// for debugging.
// getPodWriter creates the file with name nodeName-podName-suffix. It returns nil if the
// file cannot be created. File must be closed by the caller.
getPodWriter := func(nodeName, podName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s-%s", nodeName, podName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// runKubectl runs the provided kubectl command on the control-plane Node and returns the
// output. It returns an empty string in case of error.
runKubectl := func(cmd string) string {
rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd)
if err != nil || rc != 0 {
tb.Errorf("Error when running this kubectl command on control-plane Node: %s", cmd)
return ""
}
return stdout
}
// dump the logs for Antrea Pods to disk.
writePodLogs := func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "logs")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s logs --all-containers %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
}
data.forAllMatchingPodsInNamespace("k8s-app=kube-proxy", kubeNamespace, writePodLogs)
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, writePodLogs)
// dump the logs for monitoring Pods to disk.
data.forAllMatchingPodsInNamespace("", monitoringNamespace, writePodLogs)
// dump the logs for flow-aggregator Pods to disk.
data.forAllMatchingPodsInNamespace("", flowAggregatorNamespace, writePodLogs)
// dump the output of "kubectl describe" for Antrea pods to disk.
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "describe")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s describe pod %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
})
if !writeNodeLogs {
return
}
// getNodeWriter creates the file with name nodeName-suffix. It returns nil if the file
// cannot be created. File must be closed by the caller.
getNodeWriter := func(nodeName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s", nodeName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// export kubelet logs with journalctl for each Node. If the Nodes do not use journalctl we
// print a log message. If kubelet is not run with systemd, the log file will be empty.
if err := forAllNodes(func(nodeName string) error {
const numLines = 100
// --no-pager ensures the command does not hang.
cmd := fmt.Sprintf("journalctl -u kubelet -n %d --no-pager", numLines)
if clusterInfo.nodesOS[nodeName] == "windows" {
cmd = "Get-EventLog -LogName \"System\" -Source \"Service Control Manager\" | grep kubelet ; Get-EventLog -LogName \"Application\" -Source \"nssm\" | grep kubelet"
}
rc, stdout, _, err := RunCommandOnNode(nodeName, cmd)
if err != nil || rc != 0 {
// return an error and skip subsequent Nodes
return fmt.Errorf("error when running journalctl on Node '%s', is it available? Error: %v", nodeName, err)
}
w := getNodeWriter(nodeName, "kubelet")
if w == nil {
// move on to the next Node
return nil
}
defer w.Close()
w.WriteString(stdout)
return nil
}); err != nil {
tb.Logf("Error when exporting kubelet logs: %v", err)
}
}
func teardownFlowAggregator(tb testing.TB, data *TestData) {
if testOptions.enableCoverage {
if err := testData.gracefulExitFlowAggregator(testOptions.coverageDir); err != nil {
tb.Fatalf("Error when gracefully exiting Flow Aggregator: %v", err)
}
}
tb.Logf("Deleting '%s' K8s Namespace", flowAggregatorNamespace)
if err := data.deleteNamespace(flowAggregatorNamespace, defaultTimeout); err != nil {
tb.Logf("Error when tearing down flow aggregator: %v", err)
}
}
func teardownTest(tb testing.TB, data *TestData) {
exportLogs(tb, data, "beforeTeardown", true)
if empty, _ := IsDirEmpty(data.logsDirForTestCase); empty {
_ = os.Remove(data.logsDirForTestCase)
}
tb.Logf("Deleting '%s' K8s Namespace", testNamespace)
if err := data.deleteTestNamespace(defaultTimeout); err != nil {
tb.Logf("Error when tearing down test: %v", err)
}
}
| func deletePodWrapper(tb testing.TB, data *TestData, namespace, name string) {
tb.Logf("Deleting Pod '%s'", name)
if err := data.deletePod(namespace, name); err != nil {
tb.Logf("Error when deleting Pod: %v", err)
}
}
// createTestBusyboxPods creates the desired number of busybox Pods and wait for their IP address to
// become available. This is a common patter in our tests, so having this helper function makes
// sense. It calls Fatalf in case of error, so it must be called from the goroutine running the test
// or benchmark function. You can create all the Pods on the same Node by setting nodeName. If
// nodeName is the empty string, each Pod will be created on an arbitrary
// Node. createTestBusyboxPods returns the cleanupFn function which can be used to delete the
// created Pods. Pods are created in parallel to reduce the time required to run the tests.
func createTestBusyboxPods(tb testing.TB, data *TestData, num int, ns string, nodeName string) (
podNames []string, podIPs []*PodIPs, cleanupFn func(),
) {
cleanupFn = func() {
var wg sync.WaitGroup
for _, podName := range podNames {
wg.Add(1)
go func(name string) {
deletePodWrapper(tb, data, ns, name)
wg.Done()
}(podName)
}
wg.Wait()
}
type podData struct {
podName string
podIP *PodIPs
err error
}
createPodAndGetIP := func() (string, *PodIPs, error) {
podName := randName("test-pod-")
tb.Logf("Creating a busybox test Pod '%s' and waiting for IP", podName)
if err := data.createBusyboxPodOnNode(podName, ns, nodeName, false); err != nil {
tb.Errorf("Error when creating busybox test Pod '%s': %v", podName, err)
return "", nil, err
}
podIP, err := data.podWaitForIPs(defaultTimeout, podName, ns)
if err != nil {
tb.Errorf("Error when waiting for IP for Pod '%s': %v", podName, err)
return podName, nil, err
}
return podName, podIP, nil
}
podsCh := make(chan podData, num)
for i := 0; i < num; i++ {
go func() {
podName, podIP, err := createPodAndGetIP()
podsCh <- podData{podName, podIP, err}
}()
}
errCnt := 0
for i := 0; i < num; i++ {
pod := <-podsCh
if pod.podName != "" {
podNames = append(podNames, pod.podName)
podIPs = append(podIPs, pod.podIP)
}
if pod.err != nil {
errCnt++
}
}
if errCnt > 0 {
defer cleanupFn()
tb.Fatalf("%d / %d Pods could not be created successfully", errCnt, num)
}
return podNames, podIPs, cleanupFn
} | random_line_split |
|
fixtures.go | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"fmt"
"log"
"net"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"k8s.io/component-base/featuregate"
"antrea.io/antrea/pkg/agent/config"
)
func skipIfNotBenchmarkTest(tb testing.TB) {
if !testOptions.withBench {
tb.Skipf("Skipping benchmark test: %s", tb.Name())
}
}
func skipIfNotAntreaIPAMTest(tb testing.TB) {
if !testOptions.enableAntreaIPAM {
tb.Skipf("Skipping AntreaIPAM test: %s", tb.Name())
}
}
func skipIfAntreaIPAMTest(tb testing.TB) {
if testOptions.enableAntreaIPAM {
tb.Skipf("Skipping test when running AntreaIPAM: %s", tb.Name())
}
}
func skipIfProviderIs(tb testing.TB, name string, reason string) {
if testOptions.providerName == name {
tb.Skipf("Skipping test for the '%s' provider: %s", name, reason)
}
}
func skipIfNotRequired(tb testing.TB, keys ...string) {
for _, v := range keys {
if strings.Contains(testOptions.skipCases, v) {
tb.Skipf("Skipping test as %s is in skip list %s", v, testOptions.skipCases)
}
}
}
func skipIfNumNodesLessThan(tb testing.TB, required int) {
if clusterInfo.numNodes < required {
tb.Skipf("Skipping test as it requires %d different Nodes but cluster only has %d", required, clusterInfo.numNodes)
}
}
func skipIfRunCoverage(tb testing.TB, reason string) {
if testOptions.enableCoverage {
tb.Skipf("Skipping test for the '%s' when run coverage: %s", tb.Name(), reason)
}
}
func skipIfNotIPv4Cluster(tb testing.TB) {
if clusterInfo.podV4NetworkCIDR == "" {
tb.Skipf("Skipping test as it requires IPv4 addresses but the IPv4 network CIDR is not set")
}
}
func skipIfIPv6Cluster(tb testing.TB) {
if clusterInfo.podV6NetworkCIDR != "" {
tb.Skipf("Skipping test as it is not supported in IPv6 cluster")
}
}
func skipIfNotIPv6Cluster(tb testing.TB) {
if clusterInfo.podV6NetworkCIDR == "" {
tb.Skipf("Skipping test as it requires IPv6 addresses but the IPv6 network CIDR is not set")
}
}
func skipIfMissingKernelModule(tb testing.TB, nodeName string, requiredModules []string) {
for _, module := range requiredModules {
// modprobe with "--dry-run" does not require root privileges
cmd := fmt.Sprintf("modprobe --dry-run %s", module)
rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd)
if err != nil {
tb.Skipf("Skipping test as modprobe could not be run to confirm the presence of module '%s': %v", module, err)
}
if rc != 0 {
tb.Skipf("Skipping test as modprobe exited with an error when trying to confirm the presence of module '%s' - stdout: %s - stderr: %s", module, stdout, stderr)
}
}
tb.Logf("The following modules have been found on Node '%s': %v", nodeName, requiredModules)
}
func skipIfEncapModeIsNot(tb testing.TB, data *TestData, encapMode config.TrafficEncapModeType) {
currentEncapMode, err := data.GetEncapMode()
if err != nil {
tb.Fatalf("Failed to get encap mode: %v", err)
}
if currentEncapMode != encapMode {
tb.Skipf("Skipping test for encap mode '%s', test requires '%s'", currentEncapMode.String(), encapMode.String())
}
}
func skipIfHasWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) != 0 {
tb.Skipf("Skipping test as the cluster has Windows Nodes")
}
}
func skipIfNoWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) == 0 {
tb.Skipf("Skipping test as the cluster has no Windows Nodes")
}
}
func skipIfFeatureDisabled(tb testing.TB, feature featuregate.Feature, checkAgent bool, checkController bool) {
if checkAgent {
if featureGate, err := GetAgentFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Agent: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Agent", feature)
}
}
if checkController {
if featureGate, err := GetControllerFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Controller: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Controller", feature)
}
}
}
func ensureAntreaRunning(data *TestData) error {
log.Println("Applying Antrea YAML")
if err := data.deployAntrea(deployAntreaDefault); err != nil {
return err
}
log.Println("Waiting for all Antrea DaemonSet Pods")
if err := data.waitForAntreaDaemonSetPods(defaultTimeout); err != nil {
return err
}
log.Println("Checking CoreDNS deployment")
if err := data.checkCoreDNSPods(defaultTimeout); err != nil {
return err
}
return nil
}
func createDirectory(path string) error {
return os.Mkdir(path, 0700)
}
func (data *TestData) setupLogDirectoryForTest(testName string) error {
path := filepath.Join(testOptions.logsExportDir, testName)
// remove directory if it already exists. This ensures that we start with an empty
// directory
_ = os.RemoveAll(path)
err := createDirectory(path)
if err != nil {
return err
}
data.logsDirForTestCase = path
return nil
}
func setupTest(tb testing.TB) (*TestData, error) {
if err := testData.setupLogDirectoryForTest(tb.Name()); err != nil {
tb.Errorf("Error creating logs directory '%s': %v", testData.logsDirForTestCase, err)
return nil, err
}
success := false
defer func() {
if !success {
tb.Fail()
exportLogs(tb, testData, "afterSetupTest", true)
}
}()
tb.Logf("Creating '%s' K8s Namespace", testNamespace)
if err := ensureAntreaRunning(testData); err != nil {
return nil, err
}
if err := testData.createTestNamespace(); err != nil {
return nil, err
}
success = true
return testData, nil
}
func setupTestWithIPFIXCollector(tb testing.TB) (*TestData, bool, bool, error) {
v4Enabled := clusterInfo.podV4NetworkCIDR != ""
v6Enabled := clusterInfo.podV6NetworkCIDR != ""
testData, err := setupTest(tb)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
// Create pod using ipfix collector image
if err = testData.createPodOnNode("ipfix-collector", testNamespace, "", ipfixCollectorImage, nil, nil, nil, nil, true, nil); err != nil {
tb.Errorf("Error when creating the ipfix collector Pod: %v", err)
}
ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testNamespace)
if err != nil || len(ipfixCollectorIP.ipStrings) == 0 {
tb.Errorf("Error when waiting to get ipfix collector Pod IP: %v", err)
return nil, v4Enabled, v6Enabled, err
}
var ipStr string
if v6Enabled && ipfixCollectorIP.ipv6 != nil {
ipStr = ipfixCollectorIP.ipv6.String()
} else {
ipStr = ipfixCollectorIP.ipv4.String()
}
ipfixCollectorAddr := fmt.Sprintf("%s:tcp", net.JoinHostPort(ipStr, ipfixCollectorPort))
faClusterIPAddr := ""
tb.Logf("Applying flow aggregator YAML with ipfix collector address: %s", ipfixCollectorAddr)
faClusterIP, err := testData.deployFlowAggregator(ipfixCollectorAddr)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
if testOptions.providerName == "kind" {
// In Kind cluster, there are issues with DNS name resolution on worker nodes.
// Please note that CoreDNS services are forced on to control-plane Node.
faClusterIPAddr = fmt.Sprintf("%s:%s:tls", faClusterIP, ipfixCollectorPort)
}
tb.Logf("Deploying flow exporter with collector address: %s", faClusterIPAddr)
if err = testData.deployAntreaFlowExporter(faClusterIPAddr); err != nil |
tb.Logf("Checking CoreDNS deployment")
if err = testData.checkCoreDNSPods(defaultTimeout); err != nil {
return testData, v4Enabled, v6Enabled, err
}
return testData, v4Enabled, v6Enabled, nil
}
func exportLogs(tb testing.TB, data *TestData, logsSubDir string, writeNodeLogs bool) {
if tb.Skipped() {
return
}
// if test was successful and --logs-export-on-success was not provided, we do not export
// any logs.
if !tb.Failed() && !testOptions.logsExportOnSuccess {
return
}
const timeFormat = "Jan02-15-04-05"
timeStamp := time.Now().Format(timeFormat)
logsDir := filepath.Join(data.logsDirForTestCase, fmt.Sprintf("%s.%s", logsSubDir, timeStamp))
err := createDirectory(logsDir)
if err != nil {
tb.Errorf("Error when creating logs directory '%s': %v", logsDir, err)
return
}
tb.Logf("Exporting test logs to '%s'", logsDir)
// for now we just retrieve the logs for the Antrea Pods, but maybe we can find a good way to
// retrieve the logs for the test Pods in the future (before deleting them) if it is useful
// for debugging.
// getPodWriter creates the file with name nodeName-podName-suffix. It returns nil if the
// file cannot be created. File must be closed by the caller.
getPodWriter := func(nodeName, podName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s-%s", nodeName, podName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// runKubectl runs the provided kubectl command on the control-plane Node and returns the
// output. It returns an empty string in case of error.
runKubectl := func(cmd string) string {
rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd)
if err != nil || rc != 0 {
tb.Errorf("Error when running this kubectl command on control-plane Node: %s", cmd)
return ""
}
return stdout
}
// dump the logs for Antrea Pods to disk.
writePodLogs := func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "logs")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s logs --all-containers %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
}
data.forAllMatchingPodsInNamespace("k8s-app=kube-proxy", kubeNamespace, writePodLogs)
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, writePodLogs)
// dump the logs for monitoring Pods to disk.
data.forAllMatchingPodsInNamespace("", monitoringNamespace, writePodLogs)
// dump the logs for flow-aggregator Pods to disk.
data.forAllMatchingPodsInNamespace("", flowAggregatorNamespace, writePodLogs)
// dump the output of "kubectl describe" for Antrea pods to disk.
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "describe")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s describe pod %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
})
if !writeNodeLogs {
return
}
// getNodeWriter creates the file with name nodeName-suffix. It returns nil if the file
// cannot be created. File must be closed by the caller.
getNodeWriter := func(nodeName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s", nodeName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// export kubelet logs with journalctl for each Node. If the Nodes do not use journalctl we
// print a log message. If kubelet is not run with systemd, the log file will be empty.
if err := forAllNodes(func(nodeName string) error {
const numLines = 100
// --no-pager ensures the command does not hang.
cmd := fmt.Sprintf("journalctl -u kubelet -n %d --no-pager", numLines)
if clusterInfo.nodesOS[nodeName] == "windows" {
cmd = "Get-EventLog -LogName \"System\" -Source \"Service Control Manager\" | grep kubelet ; Get-EventLog -LogName \"Application\" -Source \"nssm\" | grep kubelet"
}
rc, stdout, _, err := RunCommandOnNode(nodeName, cmd)
if err != nil || rc != 0 {
// return an error and skip subsequent Nodes
return fmt.Errorf("error when running journalctl on Node '%s', is it available? Error: %v", nodeName, err)
}
w := getNodeWriter(nodeName, "kubelet")
if w == nil {
// move on to the next Node
return nil
}
defer w.Close()
w.WriteString(stdout)
return nil
}); err != nil {
tb.Logf("Error when exporting kubelet logs: %v", err)
}
}
func teardownFlowAggregator(tb testing.TB, data *TestData) {
if testOptions.enableCoverage {
if err := testData.gracefulExitFlowAggregator(testOptions.coverageDir); err != nil {
tb.Fatalf("Error when gracefully exiting Flow Aggregator: %v", err)
}
}
tb.Logf("Deleting '%s' K8s Namespace", flowAggregatorNamespace)
if err := data.deleteNamespace(flowAggregatorNamespace, defaultTimeout); err != nil {
tb.Logf("Error when tearing down flow aggregator: %v", err)
}
}
func teardownTest(tb testing.TB, data *TestData) {
exportLogs(tb, data, "beforeTeardown", true)
if empty, _ := IsDirEmpty(data.logsDirForTestCase); empty {
_ = os.Remove(data.logsDirForTestCase)
}
tb.Logf("Deleting '%s' K8s Namespace", testNamespace)
if err := data.deleteTestNamespace(defaultTimeout); err != nil {
tb.Logf("Error when tearing down test: %v", err)
}
}
func deletePodWrapper(tb testing.TB, data *TestData, namespace, name string) {
tb.Logf("Deleting Pod '%s'", name)
if err := data.deletePod(namespace, name); err != nil {
tb.Logf("Error when deleting Pod: %v", err)
}
}
// createTestBusyboxPods creates the desired number of busybox Pods and wait for their IP address to
// become available. This is a common patter in our tests, so having this helper function makes
// sense. It calls Fatalf in case of error, so it must be called from the goroutine running the test
// or benchmark function. You can create all the Pods on the same Node by setting nodeName. If
// nodeName is the empty string, each Pod will be created on an arbitrary
// Node. createTestBusyboxPods returns the cleanupFn function which can be used to delete the
// created Pods. Pods are created in parallel to reduce the time required to run the tests.
func createTestBusyboxPods(tb testing.TB, data *TestData, num int, ns string, nodeName string) (
podNames []string, podIPs []*PodIPs, cleanupFn func(),
) {
cleanupFn = func() {
var wg sync.WaitGroup
for _, podName := range podNames {
wg.Add(1)
go func(name string) {
deletePodWrapper(tb, data, ns, name)
wg.Done()
}(podName)
}
wg.Wait()
}
type podData struct {
podName string
podIP *PodIPs
err error
}
createPodAndGetIP := func() (string, *PodIPs, error) {
podName := randName("test-pod-")
tb.Logf("Creating a busybox test Pod '%s' and waiting for IP", podName)
if err := data.createBusyboxPodOnNode(podName, ns, nodeName, false); err != nil {
tb.Errorf("Error when creating busybox test Pod '%s': %v", podName, err)
return "", nil, err
}
podIP, err := data.podWaitForIPs(defaultTimeout, podName, ns)
if err != nil {
tb.Errorf("Error when waiting for IP for Pod '%s': %v", podName, err)
return podName, nil, err
}
return podName, podIP, nil
}
podsCh := make(chan podData, num)
for i := 0; i < num; i++ {
go func() {
podName, podIP, err := createPodAndGetIP()
podsCh <- podData{podName, podIP, err}
}()
}
errCnt := 0
for i := 0; i < num; i++ {
pod := <-podsCh
if pod.podName != "" {
podNames = append(podNames, pod.podName)
podIPs = append(podIPs, pod.podIP)
}
if pod.err != nil {
errCnt++
}
}
if errCnt > 0 {
defer cleanupFn()
tb.Fatalf("%d / %d Pods could not be created successfully", errCnt, num)
}
return podNames, podIPs, cleanupFn
}
| {
return testData, v4Enabled, v6Enabled, err
} | conditional_block |
fixtures.go | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"fmt"
"log"
"net"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"k8s.io/component-base/featuregate"
"antrea.io/antrea/pkg/agent/config"
)
func skipIfNotBenchmarkTest(tb testing.TB) {
if !testOptions.withBench {
tb.Skipf("Skipping benchmark test: %s", tb.Name())
}
}
func skipIfNotAntreaIPAMTest(tb testing.TB) {
if !testOptions.enableAntreaIPAM {
tb.Skipf("Skipping AntreaIPAM test: %s", tb.Name())
}
}
func skipIfAntreaIPAMTest(tb testing.TB) {
if testOptions.enableAntreaIPAM {
tb.Skipf("Skipping test when running AntreaIPAM: %s", tb.Name())
}
}
func skipIfProviderIs(tb testing.TB, name string, reason string) {
if testOptions.providerName == name {
tb.Skipf("Skipping test for the '%s' provider: %s", name, reason)
}
}
func skipIfNotRequired(tb testing.TB, keys ...string) {
for _, v := range keys {
if strings.Contains(testOptions.skipCases, v) {
tb.Skipf("Skipping test as %s is in skip list %s", v, testOptions.skipCases)
}
}
}
func skipIfNumNodesLessThan(tb testing.TB, required int) {
if clusterInfo.numNodes < required {
tb.Skipf("Skipping test as it requires %d different Nodes but cluster only has %d", required, clusterInfo.numNodes)
}
}
func skipIfRunCoverage(tb testing.TB, reason string) {
if testOptions.enableCoverage {
tb.Skipf("Skipping test for the '%s' when run coverage: %s", tb.Name(), reason)
}
}
func skipIfNotIPv4Cluster(tb testing.TB) {
if clusterInfo.podV4NetworkCIDR == "" {
tb.Skipf("Skipping test as it requires IPv4 addresses but the IPv4 network CIDR is not set")
}
}
func skipIfIPv6Cluster(tb testing.TB) |
func skipIfNotIPv6Cluster(tb testing.TB) {
if clusterInfo.podV6NetworkCIDR == "" {
tb.Skipf("Skipping test as it requires IPv6 addresses but the IPv6 network CIDR is not set")
}
}
func skipIfMissingKernelModule(tb testing.TB, nodeName string, requiredModules []string) {
for _, module := range requiredModules {
// modprobe with "--dry-run" does not require root privileges
cmd := fmt.Sprintf("modprobe --dry-run %s", module)
rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd)
if err != nil {
tb.Skipf("Skipping test as modprobe could not be run to confirm the presence of module '%s': %v", module, err)
}
if rc != 0 {
tb.Skipf("Skipping test as modprobe exited with an error when trying to confirm the presence of module '%s' - stdout: %s - stderr: %s", module, stdout, stderr)
}
}
tb.Logf("The following modules have been found on Node '%s': %v", nodeName, requiredModules)
}
func skipIfEncapModeIsNot(tb testing.TB, data *TestData, encapMode config.TrafficEncapModeType) {
currentEncapMode, err := data.GetEncapMode()
if err != nil {
tb.Fatalf("Failed to get encap mode: %v", err)
}
if currentEncapMode != encapMode {
tb.Skipf("Skipping test for encap mode '%s', test requires '%s'", currentEncapMode.String(), encapMode.String())
}
}
func skipIfHasWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) != 0 {
tb.Skipf("Skipping test as the cluster has Windows Nodes")
}
}
func skipIfNoWindowsNodes(tb testing.TB) {
if len(clusterInfo.windowsNodes) == 0 {
tb.Skipf("Skipping test as the cluster has no Windows Nodes")
}
}
func skipIfFeatureDisabled(tb testing.TB, feature featuregate.Feature, checkAgent bool, checkController bool) {
if checkAgent {
if featureGate, err := GetAgentFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Agent: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Agent", feature)
}
}
if checkController {
if featureGate, err := GetControllerFeatures(); err != nil {
tb.Fatalf("Cannot determine if %s is enabled in the Controller: %v", feature, err)
} else if !featureGate.Enabled(feature) {
tb.Skipf("Skipping test because %s is not enabled in the Controller", feature)
}
}
}
func ensureAntreaRunning(data *TestData) error {
log.Println("Applying Antrea YAML")
if err := data.deployAntrea(deployAntreaDefault); err != nil {
return err
}
log.Println("Waiting for all Antrea DaemonSet Pods")
if err := data.waitForAntreaDaemonSetPods(defaultTimeout); err != nil {
return err
}
log.Println("Checking CoreDNS deployment")
if err := data.checkCoreDNSPods(defaultTimeout); err != nil {
return err
}
return nil
}
func createDirectory(path string) error {
return os.Mkdir(path, 0700)
}
func (data *TestData) setupLogDirectoryForTest(testName string) error {
path := filepath.Join(testOptions.logsExportDir, testName)
// remove directory if it already exists. This ensures that we start with an empty
// directory
_ = os.RemoveAll(path)
err := createDirectory(path)
if err != nil {
return err
}
data.logsDirForTestCase = path
return nil
}
func setupTest(tb testing.TB) (*TestData, error) {
if err := testData.setupLogDirectoryForTest(tb.Name()); err != nil {
tb.Errorf("Error creating logs directory '%s': %v", testData.logsDirForTestCase, err)
return nil, err
}
success := false
defer func() {
if !success {
tb.Fail()
exportLogs(tb, testData, "afterSetupTest", true)
}
}()
tb.Logf("Creating '%s' K8s Namespace", testNamespace)
if err := ensureAntreaRunning(testData); err != nil {
return nil, err
}
if err := testData.createTestNamespace(); err != nil {
return nil, err
}
success = true
return testData, nil
}
func setupTestWithIPFIXCollector(tb testing.TB) (*TestData, bool, bool, error) {
v4Enabled := clusterInfo.podV4NetworkCIDR != ""
v6Enabled := clusterInfo.podV6NetworkCIDR != ""
testData, err := setupTest(tb)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
// Create pod using ipfix collector image
if err = testData.createPodOnNode("ipfix-collector", testNamespace, "", ipfixCollectorImage, nil, nil, nil, nil, true, nil); err != nil {
tb.Errorf("Error when creating the ipfix collector Pod: %v", err)
}
ipfixCollectorIP, err := testData.podWaitForIPs(defaultTimeout, "ipfix-collector", testNamespace)
if err != nil || len(ipfixCollectorIP.ipStrings) == 0 {
tb.Errorf("Error when waiting to get ipfix collector Pod IP: %v", err)
return nil, v4Enabled, v6Enabled, err
}
var ipStr string
if v6Enabled && ipfixCollectorIP.ipv6 != nil {
ipStr = ipfixCollectorIP.ipv6.String()
} else {
ipStr = ipfixCollectorIP.ipv4.String()
}
ipfixCollectorAddr := fmt.Sprintf("%s:tcp", net.JoinHostPort(ipStr, ipfixCollectorPort))
faClusterIPAddr := ""
tb.Logf("Applying flow aggregator YAML with ipfix collector address: %s", ipfixCollectorAddr)
faClusterIP, err := testData.deployFlowAggregator(ipfixCollectorAddr)
if err != nil {
return testData, v4Enabled, v6Enabled, err
}
if testOptions.providerName == "kind" {
// In Kind cluster, there are issues with DNS name resolution on worker nodes.
// Please note that CoreDNS services are forced on to control-plane Node.
faClusterIPAddr = fmt.Sprintf("%s:%s:tls", faClusterIP, ipfixCollectorPort)
}
tb.Logf("Deploying flow exporter with collector address: %s", faClusterIPAddr)
if err = testData.deployAntreaFlowExporter(faClusterIPAddr); err != nil {
return testData, v4Enabled, v6Enabled, err
}
tb.Logf("Checking CoreDNS deployment")
if err = testData.checkCoreDNSPods(defaultTimeout); err != nil {
return testData, v4Enabled, v6Enabled, err
}
return testData, v4Enabled, v6Enabled, nil
}
func exportLogs(tb testing.TB, data *TestData, logsSubDir string, writeNodeLogs bool) {
if tb.Skipped() {
return
}
// if test was successful and --logs-export-on-success was not provided, we do not export
// any logs.
if !tb.Failed() && !testOptions.logsExportOnSuccess {
return
}
const timeFormat = "Jan02-15-04-05"
timeStamp := time.Now().Format(timeFormat)
logsDir := filepath.Join(data.logsDirForTestCase, fmt.Sprintf("%s.%s", logsSubDir, timeStamp))
err := createDirectory(logsDir)
if err != nil {
tb.Errorf("Error when creating logs directory '%s': %v", logsDir, err)
return
}
tb.Logf("Exporting test logs to '%s'", logsDir)
// for now we just retrieve the logs for the Antrea Pods, but maybe we can find a good way to
// retrieve the logs for the test Pods in the future (before deleting them) if it is useful
// for debugging.
// getPodWriter creates the file with name nodeName-podName-suffix. It returns nil if the
// file cannot be created. File must be closed by the caller.
getPodWriter := func(nodeName, podName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s-%s", nodeName, podName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// runKubectl runs the provided kubectl command on the control-plane Node and returns the
// output. It returns an empty string in case of error.
runKubectl := func(cmd string) string {
rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd)
if err != nil || rc != 0 {
tb.Errorf("Error when running this kubectl command on control-plane Node: %s", cmd)
return ""
}
return stdout
}
// dump the logs for Antrea Pods to disk.
writePodLogs := func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "logs")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s logs --all-containers %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
}
data.forAllMatchingPodsInNamespace("k8s-app=kube-proxy", kubeNamespace, writePodLogs)
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, writePodLogs)
// dump the logs for monitoring Pods to disk.
data.forAllMatchingPodsInNamespace("", monitoringNamespace, writePodLogs)
// dump the logs for flow-aggregator Pods to disk.
data.forAllMatchingPodsInNamespace("", flowAggregatorNamespace, writePodLogs)
// dump the output of "kubectl describe" for Antrea pods to disk.
data.forAllMatchingPodsInNamespace("app=antrea", antreaNamespace, func(nodeName, podName, nsName string) error {
w := getPodWriter(nodeName, podName, "describe")
if w == nil {
return nil
}
defer w.Close()
cmd := fmt.Sprintf("kubectl -n %s describe pod %s", nsName, podName)
stdout := runKubectl(cmd)
if stdout == "" {
return nil
}
w.WriteString(stdout)
return nil
})
if !writeNodeLogs {
return
}
// getNodeWriter creates the file with name nodeName-suffix. It returns nil if the file
// cannot be created. File must be closed by the caller.
getNodeWriter := func(nodeName, suffix string) *os.File {
logFile := filepath.Join(logsDir, fmt.Sprintf("%s-%s", nodeName, suffix))
f, err := os.Create(logFile)
if err != nil {
tb.Errorf("Error when creating log file '%s': '%v'", logFile, err)
return nil
}
return f
}
// export kubelet logs with journalctl for each Node. If the Nodes do not use journalctl we
// print a log message. If kubelet is not run with systemd, the log file will be empty.
if err := forAllNodes(func(nodeName string) error {
const numLines = 100
// --no-pager ensures the command does not hang.
cmd := fmt.Sprintf("journalctl -u kubelet -n %d --no-pager", numLines)
if clusterInfo.nodesOS[nodeName] == "windows" {
cmd = "Get-EventLog -LogName \"System\" -Source \"Service Control Manager\" | grep kubelet ; Get-EventLog -LogName \"Application\" -Source \"nssm\" | grep kubelet"
}
rc, stdout, _, err := RunCommandOnNode(nodeName, cmd)
if err != nil || rc != 0 {
// return an error and skip subsequent Nodes
return fmt.Errorf("error when running journalctl on Node '%s', is it available? Error: %v", nodeName, err)
}
w := getNodeWriter(nodeName, "kubelet")
if w == nil {
// move on to the next Node
return nil
}
defer w.Close()
w.WriteString(stdout)
return nil
}); err != nil {
tb.Logf("Error when exporting kubelet logs: %v", err)
}
}
func teardownFlowAggregator(tb testing.TB, data *TestData) {
if testOptions.enableCoverage {
if err := testData.gracefulExitFlowAggregator(testOptions.coverageDir); err != nil {
tb.Fatalf("Error when gracefully exiting Flow Aggregator: %v", err)
}
}
tb.Logf("Deleting '%s' K8s Namespace", flowAggregatorNamespace)
if err := data.deleteNamespace(flowAggregatorNamespace, defaultTimeout); err != nil {
tb.Logf("Error when tearing down flow aggregator: %v", err)
}
}
func teardownTest(tb testing.TB, data *TestData) {
exportLogs(tb, data, "beforeTeardown", true)
if empty, _ := IsDirEmpty(data.logsDirForTestCase); empty {
_ = os.Remove(data.logsDirForTestCase)
}
tb.Logf("Deleting '%s' K8s Namespace", testNamespace)
if err := data.deleteTestNamespace(defaultTimeout); err != nil {
tb.Logf("Error when tearing down test: %v", err)
}
}
func deletePodWrapper(tb testing.TB, data *TestData, namespace, name string) {
tb.Logf("Deleting Pod '%s'", name)
if err := data.deletePod(namespace, name); err != nil {
tb.Logf("Error when deleting Pod: %v", err)
}
}
// createTestBusyboxPods creates the desired number of busybox Pods and wait for their IP address to
// become available. This is a common patter in our tests, so having this helper function makes
// sense. It calls Fatalf in case of error, so it must be called from the goroutine running the test
// or benchmark function. You can create all the Pods on the same Node by setting nodeName. If
// nodeName is the empty string, each Pod will be created on an arbitrary
// Node. createTestBusyboxPods returns the cleanupFn function which can be used to delete the
// created Pods. Pods are created in parallel to reduce the time required to run the tests.
func createTestBusyboxPods(tb testing.TB, data *TestData, num int, ns string, nodeName string) (
podNames []string, podIPs []*PodIPs, cleanupFn func(),
) {
cleanupFn = func() {
var wg sync.WaitGroup
for _, podName := range podNames {
wg.Add(1)
go func(name string) {
deletePodWrapper(tb, data, ns, name)
wg.Done()
}(podName)
}
wg.Wait()
}
type podData struct {
podName string
podIP *PodIPs
err error
}
createPodAndGetIP := func() (string, *PodIPs, error) {
podName := randName("test-pod-")
tb.Logf("Creating a busybox test Pod '%s' and waiting for IP", podName)
if err := data.createBusyboxPodOnNode(podName, ns, nodeName, false); err != nil {
tb.Errorf("Error when creating busybox test Pod '%s': %v", podName, err)
return "", nil, err
}
podIP, err := data.podWaitForIPs(defaultTimeout, podName, ns)
if err != nil {
tb.Errorf("Error when waiting for IP for Pod '%s': %v", podName, err)
return podName, nil, err
}
return podName, podIP, nil
}
podsCh := make(chan podData, num)
for i := 0; i < num; i++ {
go func() {
podName, podIP, err := createPodAndGetIP()
podsCh <- podData{podName, podIP, err}
}()
}
errCnt := 0
for i := 0; i < num; i++ {
pod := <-podsCh
if pod.podName != "" {
podNames = append(podNames, pod.podName)
podIPs = append(podIPs, pod.podIP)
}
if pod.err != nil {
errCnt++
}
}
if errCnt > 0 {
defer cleanupFn()
tb.Fatalf("%d / %d Pods could not be created successfully", errCnt, num)
}
return podNames, podIPs, cleanupFn
}
| {
if clusterInfo.podV6NetworkCIDR != "" {
tb.Skipf("Skipping test as it is not supported in IPv6 cluster")
}
} | identifier_body |
lineChart.component.ts | import 'style-loader!./lineChart.scss';
import { BehaviorSubject } from 'rxjs/BehaviorSubject';
import { CoreService, CoreEvent } from 'app/core/services/core.service';
import { ViewComponent } from 'app/core/components/view/view.component';
import {Component, Input, OnInit, AfterViewInit, OnDestroy} from '@angular/core';
import * as ChartistLegend from 'chartist-plugin-legend';
import {UUID} from 'angular2-uuid';
import * as c3 from 'c3';
import { LineChartService, HandleDataFunc, LineChartData,LineChartMetadata, DataListItem } from './lineChart.service';
export interface ChartFormatter {
format (value, ratio, id);
}
export interface Analytics {
label:string;
min?:number;
max?:number;
avg?:number;
last?:number;
total?:number;
}
@Component({
selector: 'line-chart',
template: `<div id="{{controlUid}}"></div>`
})
export class LineChartComponent extends ViewComponent implements OnInit, AfterViewInit, OnDestroy, HandleDataFunc {
@Input() dataList: DataListItem[];
/** First element is Name of the Field a string
* Followed by the other elements being a number.
* This fits in with how C3 charts work.
*
* [ ["nameOfField_1", number, number, number, number],
* ["nameOfField_2", number, number, number, number]
* ]
*/
@Input() series: any[][];
@Input() legends: string[];
@Input() type: string;
@Input() divideBy: number;
@Input() chartFormatter: ChartFormatter;
@Input() minY?: number = 0;
@Input() maxY?: number = 100;
@Input() labelY?: string = 'Label Y';
@Input() interactive: boolean;
public chart:any;
public conf:any;
public columns:any;
public linechartData:any;
public units: string = '';
public showLegendValues: boolean = false;
public legendEvents: BehaviorSubject<any>;
public legendLabels: BehaviorSubject<any>;
public legendAnalytics: BehaviorSubject<any>;
data: LineChartData = {
labels: [],
series: [],
//meta: {}
};
colorPattern = ["#2196f3", "#009688", "#ffc107", "#9c27b0", "#607d8b", "#00bcd4", "#8bc34a", "#ffeb3b", "#e91e63", "#3f51b5"];
timeFormat: string = "%H:%M";
culling:number = 6;
controlUid: string;
constructor(private core:CoreService, private _lineChartService: LineChartService) {
super();
this.legendEvents = new BehaviorSubject(false);
this.legendLabels = new BehaviorSubject([]);
this.legendAnalytics = new BehaviorSubject([]);
}
handleDataFunc(linechartData: LineChartData) {
//console.log(linechartData);
this.data.labels.splice(0, this.data.labels.length);
this.data.series.splice(0, this.data.series.length);
if(linechartData.meta){
this.units = linechartData.meta.units;
}
linechartData.labels.forEach((label) => {this.data.labels.push(new Date(label))});
linechartData.series.forEach((dataSeriesArray) => {
const newArray = [];
if(!linechartData.meta)console.log(linechartData);
if (typeof (this.divideBy) !== 'undefined' || linechartData.meta.conversion) {
dataSeriesArray.forEach((numberVal) => {
if(linechartData.meta.conversion){
newArray.push(this.convertTo(numberVal, linechartData.meta.conversion));
} else if (numberVal > 0) {
newArray.push((numberVal / this.divideBy).toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
} else {
dataSeriesArray.forEach((numberVal) => {
if(numberVal > 0){
newArray.push(numberVal.toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
}
this.data.series.push(dataSeriesArray);
});
const columns: any[][] = [];
let legendLabels: string[] = [];
// xColumn
const xValues: any[] = [];
xValues.push('xValues');
this.data.labels.forEach((label) => {
xValues.push(label);
});
columns.push(xValues);
// For C3.. Put the name of the series as the first element of each series array
for (let i = 0; i < this.legends.length && this.data.series.length; ++i) {
let legend: string;
if(linechartData.meta.removePrefix){
legend = this.legends[i].replace(linechartData.meta.removePrefix, "");
} else {
legend = this.legends[i];
}
legendLabels.push(legend);
let series: any[] = this.data.series[i];
if( typeof(series) !== 'undefined' && series.length > 0 ) {
series.unshift(legend);
} else {
series = [legend];
}
columns.push(series);
}
this.columns = columns;
this.linechartData = linechartData;
this.legendLabels.next(legendLabels);
this.analyze(columns);
this.render();
}
public render(conf?:any){
if(!conf){
conf = this.makeConfig();
}
let colors = this.colorsFromTheme();
const color = {
pattern: colors
}
conf.color = color;
this.chart = c3.generate(conf);
}
//this.chart = c3.generate({
public makeConfig(){
let conf = {
interaction: {
enabled:this.interactive
},
bindto: '#' + this.controlUid,
/*color: {
pattern: this.colorPattern
},*/
data: {
columns: this.columns,
//colors: this.createColorObject(),
x: 'xValues',
//xFormat: '%H:%M',
type: 'line',
onmouseout: (d) => {
this.showLegendValues = false;
}
},
axis: {
x: {
type: 'timeseries',
tick: {
//format: '%H:%M:%S',
format: this.timeFormat,
fit: true,
//values: ['01:10', '03:10', '06:10']
culling: {
max: this.culling
}
}
},
y:{
tick: {
format: (y) => { return y + this.linechartData.meta.units}
},
label: {
text:this.linechartData.meta. labelY,
position: 'outer-middle',
}
//default: [this.minY,this.maxY],
/*min: this.minY,
max: this.maxY*/
}
},
grid:{
x:{
show: true
},
y:{
show: true
}
},
subchart: {
show: false
},
legend: {
show:false
},
zoom: {
enabled: false
},
tooltip: {
show: true,
contents: (raw, defaultTitleFormat, defaultValueFormat, color) => {
if(!this.showLegendValues){
this.showLegendValues = true;
}
if(raw.value == Number(0)){
raw.value == raw.value.toString()
}
this.legendEvents.next(raw);
return '<div style="display:none">' + raw[0].x + '</div>';
}
}
}
return conf;
}
/*private setupPiechart() {
const chart = c3.generate({
bindto: '#' + this.controlUid,
data: {
columns: this.series,
type: 'pie'
},
pie: {
label: {
format: this.chartFormatter.format
}
}
});
}*/
private processThemeColors(theme):string[]{
let colors: string[] = [];
theme.accentColors.map((color) => {
colors.push(theme[color]);
});
return colors;
}
private createColorObject(){
let obj = {};
this.legends.forEach((item, index)=>{
obj[item] = this.colorPattern[index]
})
return obj;
}
public fetchData(rrdOptions, timeformat?: string, culling?:number){
if(timeformat){
this.timeFormat = timeformat;
}
if(culling){
this.culling = culling;
}
// Convert from milliseconds to seconds for epoch time
rrdOptions.start = Math.floor(rrdOptions.start / 1000);
if(rrdOptions.end) |
// This is the time portion of the API call.
this._lineChartService.getData(this, this.dataList, rrdOptions);
}
public convertTo(value, conversion){
let result;
switch(conversion){
case 'bytesToGigabytes':
result = value / 1073741824;
break;
case 'percentFloatToInteger':
result = value * 100;
break;
}
return result.toFixed(2);
}
// Analytics
analyze(columns){
let allColumns: Analytics[] = [];
let cols = Object.assign([], columns);
// Remove X axis
cols.shift(columns[0]);
for(let i = 0; i < cols.length; i++){
// Middleware provides data as strings
// so we store the label (first item)
// and convert the rest to numbers
let colStrings = cols[i];
let label = colStrings[0];
let col = colStrings.map(x => Number(x));
col.shift(col[0]);
let total = col.length > 0 ? col.reduce((accumulator, currentValue) => Number(accumulator) + Number(currentValue)) : "N/A";
let avg = total !== "N/A" ? Number((total / col.length).toFixed(2)) : total;
//console.log("Total type: " + typeof col.length)
let myResult:Analytics = {
label:label,
min: total !== "N/A" ? this.getMin(col) : total ,//.toFixed(2),
max: total !== "N/A" ? this.getMax(col) : total,//.toFixed(2),
avg: avg,
last: total !== "N/A" ? Number(col[col.length - 1].toFixed(2)) : total,
total: total !== "N/A" ? Number(total.toFixed(2)) : total
}
allColumns.push(myResult);
}
this.legendAnalytics.next(allColumns);
}
getMin(arr:any[]){
return Math.min(...arr);
}
getMax(arr:any[]){
return Math.max(...arr);
}
getAvg(arr:any[]){
return 1;
}
getLast(arr:any[]){
return 1
}
// LifeCycle Hooks
ngOnInit() {
this.core.register({ observerClass:this, eventName:"ThemeData" }).subscribe((evt:CoreEvent)=>{
this.colorPattern = this.processThemeColors(evt.data);
if(this.linechartData){
this.render();
//this.chart.data.colors(this.createColorObject())
}
});
this.core.register({ observerClass:this, eventName:"ThemeChanged" }).subscribe((evt:CoreEvent)=>{
this.colorPattern = this.processThemeColors(evt.data);
if(this.linechartData){
this.render();
//this.chart.data.colors(this.createColorObject())
}
});
this.core.emit({name:"ThemeDataRequest"});
this.controlUid = "chart_" + UUID.UUID();
}
ngAfterViewInit() {
}
ngOnDestroy(){
this.core.unregister({observerClass:this});
}
}
| {
rrdOptions.end = Math.floor(rrdOptions.end / 1000);
} | conditional_block |
lineChart.component.ts | import 'style-loader!./lineChart.scss';
import { BehaviorSubject } from 'rxjs/BehaviorSubject';
import { CoreService, CoreEvent } from 'app/core/services/core.service';
import { ViewComponent } from 'app/core/components/view/view.component';
import {Component, Input, OnInit, AfterViewInit, OnDestroy} from '@angular/core';
import * as ChartistLegend from 'chartist-plugin-legend';
import {UUID} from 'angular2-uuid';
import * as c3 from 'c3';
import { LineChartService, HandleDataFunc, LineChartData,LineChartMetadata, DataListItem } from './lineChart.service';
export interface ChartFormatter {
format (value, ratio, id);
}
export interface Analytics {
label:string;
min?:number;
max?:number;
avg?:number;
last?:number;
total?:number;
}
@Component({
selector: 'line-chart',
template: `<div id="{{controlUid}}"></div>`
})
export class LineChartComponent extends ViewComponent implements OnInit, AfterViewInit, OnDestroy, HandleDataFunc {
@Input() dataList: DataListItem[];
/** First element is Name of the Field a string
* Followed by the other elements being a number.
* This fits in with how C3 charts work.
*
* [ ["nameOfField_1", number, number, number, number],
* ["nameOfField_2", number, number, number, number]
* ]
*/
@Input() series: any[][];
@Input() legends: string[];
@Input() type: string;
@Input() divideBy: number;
@Input() chartFormatter: ChartFormatter;
@Input() minY?: number = 0;
@Input() maxY?: number = 100;
@Input() labelY?: string = 'Label Y';
@Input() interactive: boolean;
public chart:any;
public conf:any;
public columns:any;
public linechartData:any;
public units: string = '';
public showLegendValues: boolean = false;
public legendEvents: BehaviorSubject<any>;
public legendLabels: BehaviorSubject<any>;
public legendAnalytics: BehaviorSubject<any>;
data: LineChartData = {
labels: [],
series: [],
//meta: {}
};
colorPattern = ["#2196f3", "#009688", "#ffc107", "#9c27b0", "#607d8b", "#00bcd4", "#8bc34a", "#ffeb3b", "#e91e63", "#3f51b5"];
timeFormat: string = "%H:%M";
culling:number = 6;
controlUid: string;
constructor(private core:CoreService, private _lineChartService: LineChartService) {
super();
this.legendEvents = new BehaviorSubject(false);
this.legendLabels = new BehaviorSubject([]);
this.legendAnalytics = new BehaviorSubject([]);
}
handleDataFunc(linechartData: LineChartData) {
//console.log(linechartData);
this.data.labels.splice(0, this.data.labels.length);
this.data.series.splice(0, this.data.series.length);
if(linechartData.meta){
this.units = linechartData.meta.units;
}
linechartData.labels.forEach((label) => {this.data.labels.push(new Date(label))});
linechartData.series.forEach((dataSeriesArray) => {
const newArray = [];
if(!linechartData.meta)console.log(linechartData);
if (typeof (this.divideBy) !== 'undefined' || linechartData.meta.conversion) {
dataSeriesArray.forEach((numberVal) => {
if(linechartData.meta.conversion){
newArray.push(this.convertTo(numberVal, linechartData.meta.conversion));
} else if (numberVal > 0) {
newArray.push((numberVal / this.divideBy).toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
} else {
dataSeriesArray.forEach((numberVal) => {
if(numberVal > 0){
newArray.push(numberVal.toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
}
this.data.series.push(dataSeriesArray);
});
const columns: any[][] = [];
let legendLabels: string[] = [];
// xColumn
const xValues: any[] = [];
xValues.push('xValues');
this.data.labels.forEach((label) => {
xValues.push(label);
});
columns.push(xValues);
// For C3.. Put the name of the series as the first element of each series array
for (let i = 0; i < this.legends.length && this.data.series.length; ++i) {
let legend: string;
if(linechartData.meta.removePrefix){
legend = this.legends[i].replace(linechartData.meta.removePrefix, "");
} else {
legend = this.legends[i];
}
legendLabels.push(legend);
let series: any[] = this.data.series[i];
if( typeof(series) !== 'undefined' && series.length > 0 ) {
series.unshift(legend);
} else {
series = [legend];
}
columns.push(series);
}
this.columns = columns;
this.linechartData = linechartData;
this.legendLabels.next(legendLabels);
this.analyze(columns);
this.render();
}
public render(conf?:any){
if(!conf){
conf = this.makeConfig();
}
let colors = this.colorsFromTheme();
const color = {
pattern: colors
}
conf.color = color;
this.chart = c3.generate(conf);
}
//this.chart = c3.generate({
public | (){
let conf = {
interaction: {
enabled:this.interactive
},
bindto: '#' + this.controlUid,
/*color: {
pattern: this.colorPattern
},*/
data: {
columns: this.columns,
//colors: this.createColorObject(),
x: 'xValues',
//xFormat: '%H:%M',
type: 'line',
onmouseout: (d) => {
this.showLegendValues = false;
}
},
axis: {
x: {
type: 'timeseries',
tick: {
//format: '%H:%M:%S',
format: this.timeFormat,
fit: true,
//values: ['01:10', '03:10', '06:10']
culling: {
max: this.culling
}
}
},
y:{
tick: {
format: (y) => { return y + this.linechartData.meta.units}
},
label: {
text:this.linechartData.meta. labelY,
position: 'outer-middle',
}
//default: [this.minY,this.maxY],
/*min: this.minY,
max: this.maxY*/
}
},
grid:{
x:{
show: true
},
y:{
show: true
}
},
subchart: {
show: false
},
legend: {
show:false
},
zoom: {
enabled: false
},
tooltip: {
show: true,
contents: (raw, defaultTitleFormat, defaultValueFormat, color) => {
if(!this.showLegendValues){
this.showLegendValues = true;
}
if(raw.value == Number(0)){
raw.value == raw.value.toString()
}
this.legendEvents.next(raw);
return '<div style="display:none">' + raw[0].x + '</div>';
}
}
}
return conf;
}
/*private setupPiechart() {
const chart = c3.generate({
bindto: '#' + this.controlUid,
data: {
columns: this.series,
type: 'pie'
},
pie: {
label: {
format: this.chartFormatter.format
}
}
});
}*/
private processThemeColors(theme):string[]{
let colors: string[] = [];
theme.accentColors.map((color) => {
colors.push(theme[color]);
});
return colors;
}
private createColorObject(){
let obj = {};
this.legends.forEach((item, index)=>{
obj[item] = this.colorPattern[index]
})
return obj;
}
public fetchData(rrdOptions, timeformat?: string, culling?:number){
if(timeformat){
this.timeFormat = timeformat;
}
if(culling){
this.culling = culling;
}
// Convert from milliseconds to seconds for epoch time
rrdOptions.start = Math.floor(rrdOptions.start / 1000);
if(rrdOptions.end){
rrdOptions.end = Math.floor(rrdOptions.end / 1000);
}
// This is the time portion of the API call.
this._lineChartService.getData(this, this.dataList, rrdOptions);
}
public convertTo(value, conversion){
let result;
switch(conversion){
case 'bytesToGigabytes':
result = value / 1073741824;
break;
case 'percentFloatToInteger':
result = value * 100;
break;
}
return result.toFixed(2);
}
// Analytics
analyze(columns){
let allColumns: Analytics[] = [];
let cols = Object.assign([], columns);
// Remove X axis
cols.shift(columns[0]);
for(let i = 0; i < cols.length; i++){
// Middleware provides data as strings
// so we store the label (first item)
// and convert the rest to numbers
let colStrings = cols[i];
let label = colStrings[0];
let col = colStrings.map(x => Number(x));
col.shift(col[0]);
let total = col.length > 0 ? col.reduce((accumulator, currentValue) => Number(accumulator) + Number(currentValue)) : "N/A";
let avg = total !== "N/A" ? Number((total / col.length).toFixed(2)) : total;
//console.log("Total type: " + typeof col.length)
let myResult:Analytics = {
label:label,
min: total !== "N/A" ? this.getMin(col) : total ,//.toFixed(2),
max: total !== "N/A" ? this.getMax(col) : total,//.toFixed(2),
avg: avg,
last: total !== "N/A" ? Number(col[col.length - 1].toFixed(2)) : total,
total: total !== "N/A" ? Number(total.toFixed(2)) : total
}
allColumns.push(myResult);
}
this.legendAnalytics.next(allColumns);
}
getMin(arr:any[]){
return Math.min(...arr);
}
getMax(arr:any[]){
return Math.max(...arr);
}
getAvg(arr:any[]){
return 1;
}
getLast(arr:any[]){
return 1
}
// LifeCycle Hooks
ngOnInit() {
this.core.register({ observerClass:this, eventName:"ThemeData" }).subscribe((evt:CoreEvent)=>{
this.colorPattern = this.processThemeColors(evt.data);
if(this.linechartData){
this.render();
//this.chart.data.colors(this.createColorObject())
}
});
this.core.register({ observerClass:this, eventName:"ThemeChanged" }).subscribe((evt:CoreEvent)=>{
this.colorPattern = this.processThemeColors(evt.data);
if(this.linechartData){
this.render();
//this.chart.data.colors(this.createColorObject())
}
});
this.core.emit({name:"ThemeDataRequest"});
this.controlUid = "chart_" + UUID.UUID();
}
ngAfterViewInit() {
}
ngOnDestroy(){
this.core.unregister({observerClass:this});
}
}
| makeConfig | identifier_name |
lineChart.component.ts | import 'style-loader!./lineChart.scss';
import { BehaviorSubject } from 'rxjs/BehaviorSubject';
import { CoreService, CoreEvent } from 'app/core/services/core.service';
import { ViewComponent } from 'app/core/components/view/view.component';
import {Component, Input, OnInit, AfterViewInit, OnDestroy} from '@angular/core';
import * as ChartistLegend from 'chartist-plugin-legend';
import {UUID} from 'angular2-uuid';
import * as c3 from 'c3';
import { LineChartService, HandleDataFunc, LineChartData,LineChartMetadata, DataListItem } from './lineChart.service';
export interface ChartFormatter {
format (value, ratio, id);
}
export interface Analytics {
label:string;
min?:number;
max?:number;
avg?:number;
last?:number;
total?:number;
}
@Component({
selector: 'line-chart',
template: `<div id="{{controlUid}}"></div>`
})
export class LineChartComponent extends ViewComponent implements OnInit, AfterViewInit, OnDestroy, HandleDataFunc {
@Input() dataList: DataListItem[];
/** First element is Name of the Field a string
* Followed by the other elements being a number.
* This fits in with how C3 charts work.
*
* [ ["nameOfField_1", number, number, number, number],
* ["nameOfField_2", number, number, number, number]
* ]
*/
@Input() series: any[][];
@Input() legends: string[];
@Input() type: string;
@Input() divideBy: number;
@Input() chartFormatter: ChartFormatter;
@Input() minY?: number = 0;
@Input() maxY?: number = 100;
@Input() labelY?: string = 'Label Y';
@Input() interactive: boolean;
public chart:any;
public conf:any;
public columns:any;
public linechartData:any;
public units: string = '';
public showLegendValues: boolean = false;
public legendEvents: BehaviorSubject<any>;
public legendLabels: BehaviorSubject<any>;
public legendAnalytics: BehaviorSubject<any>;
data: LineChartData = {
labels: [],
series: [],
//meta: {}
};
colorPattern = ["#2196f3", "#009688", "#ffc107", "#9c27b0", "#607d8b", "#00bcd4", "#8bc34a", "#ffeb3b", "#e91e63", "#3f51b5"];
timeFormat: string = "%H:%M";
culling:number = 6;
controlUid: string;
constructor(private core:CoreService, private _lineChartService: LineChartService) {
super();
this.legendEvents = new BehaviorSubject(false);
this.legendLabels = new BehaviorSubject([]);
this.legendAnalytics = new BehaviorSubject([]);
}
handleDataFunc(linechartData: LineChartData) {
//console.log(linechartData);
this.data.labels.splice(0, this.data.labels.length);
this.data.series.splice(0, this.data.series.length);
if(linechartData.meta){
this.units = linechartData.meta.units;
}
linechartData.labels.forEach((label) => {this.data.labels.push(new Date(label))});
linechartData.series.forEach((dataSeriesArray) => {
const newArray = [];
if(!linechartData.meta)console.log(linechartData);
if (typeof (this.divideBy) !== 'undefined' || linechartData.meta.conversion) {
dataSeriesArray.forEach((numberVal) => {
if(linechartData.meta.conversion){
newArray.push(this.convertTo(numberVal, linechartData.meta.conversion));
} else if (numberVal > 0) {
newArray.push((numberVal / this.divideBy).toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
} else {
dataSeriesArray.forEach((numberVal) => {
if(numberVal > 0){
newArray.push(numberVal.toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
}
this.data.series.push(dataSeriesArray);
});
const columns: any[][] = [];
let legendLabels: string[] = [];
// xColumn
const xValues: any[] = [];
xValues.push('xValues');
this.data.labels.forEach((label) => {
xValues.push(label);
});
columns.push(xValues);
// For C3.. Put the name of the series as the first element of each series array
for (let i = 0; i < this.legends.length && this.data.series.length; ++i) {
let legend: string;
if(linechartData.meta.removePrefix){
legend = this.legends[i].replace(linechartData.meta.removePrefix, "");
} else {
legend = this.legends[i];
}
legendLabels.push(legend);
let series: any[] = this.data.series[i];
if( typeof(series) !== 'undefined' && series.length > 0 ) {
series.unshift(legend);
} else {
series = [legend];
}
columns.push(series);
}
this.columns = columns;
this.linechartData = linechartData;
this.legendLabels.next(legendLabels);
this.analyze(columns);
this.render();
}
public render(conf?:any){
if(!conf){
conf = this.makeConfig();
}
let colors = this.colorsFromTheme();
const color = {
pattern: colors
}
conf.color = color;
this.chart = c3.generate(conf);
}
//this.chart = c3.generate({
public makeConfig(){
let conf = {
interaction: {
enabled:this.interactive
},
bindto: '#' + this.controlUid,
/*color: {
pattern: this.colorPattern
},*/
data: {
columns: this.columns,
//colors: this.createColorObject(),
x: 'xValues',
//xFormat: '%H:%M',
type: 'line',
onmouseout: (d) => {
this.showLegendValues = false;
}
},
axis: {
x: {
type: 'timeseries',
tick: {
//format: '%H:%M:%S',
format: this.timeFormat,
fit: true,
//values: ['01:10', '03:10', '06:10']
culling: {
max: this.culling
}
}
},
y:{
tick: {
format: (y) => { return y + this.linechartData.meta.units}
},
label: {
text:this.linechartData.meta. labelY,
position: 'outer-middle',
}
//default: [this.minY,this.maxY],
/*min: this.minY,
max: this.maxY*/
}
},
grid:{
x:{
show: true
},
y:{
show: true
}
},
subchart: {
show: false
},
legend: {
show:false
},
zoom: {
enabled: false
},
tooltip: {
show: true,
contents: (raw, defaultTitleFormat, defaultValueFormat, color) => {
if(!this.showLegendValues){
this.showLegendValues = true;
}
if(raw.value == Number(0)){
raw.value == raw.value.toString()
}
this.legendEvents.next(raw);
return '<div style="display:none">' + raw[0].x + '</div>';
}
}
}
return conf;
}
/*private setupPiechart() {
const chart = c3.generate({
bindto: '#' + this.controlUid,
data: {
columns: this.series,
type: 'pie'
},
pie: {
label: {
format: this.chartFormatter.format
}
}
});
}*/
private processThemeColors(theme):string[]{
let colors: string[] = [];
theme.accentColors.map((color) => {
colors.push(theme[color]);
});
return colors;
}
private createColorObject(){
let obj = {};
this.legends.forEach((item, index)=>{
obj[item] = this.colorPattern[index]
})
return obj;
}
public fetchData(rrdOptions, timeformat?: string, culling?:number){
if(timeformat){
this.timeFormat = timeformat;
}
if(culling){
this.culling = culling;
}
// Convert from milliseconds to seconds for epoch time
rrdOptions.start = Math.floor(rrdOptions.start / 1000);
if(rrdOptions.end){
rrdOptions.end = Math.floor(rrdOptions.end / 1000);
}
// This is the time portion of the API call.
this._lineChartService.getData(this, this.dataList, rrdOptions);
}
public convertTo(value, conversion){
let result;
switch(conversion){
case 'bytesToGigabytes':
result = value / 1073741824;
break;
case 'percentFloatToInteger':
result = value * 100;
break;
}
return result.toFixed(2);
}
// Analytics
analyze(columns){
let allColumns: Analytics[] = [];
let cols = Object.assign([], columns);
// Remove X axis
cols.shift(columns[0]);
for(let i = 0; i < cols.length; i++){
// Middleware provides data as strings
// so we store the label (first item)
// and convert the rest to numbers
let colStrings = cols[i];
let label = colStrings[0];
let col = colStrings.map(x => Number(x));
col.shift(col[0]);
let total = col.length > 0 ? col.reduce((accumulator, currentValue) => Number(accumulator) + Number(currentValue)) : "N/A";
let avg = total !== "N/A" ? Number((total / col.length).toFixed(2)) : total;
//console.log("Total type: " + typeof col.length)
let myResult:Analytics = {
label:label,
min: total !== "N/A" ? this.getMin(col) : total ,//.toFixed(2),
max: total !== "N/A" ? this.getMax(col) : total,//.toFixed(2),
avg: avg,
last: total !== "N/A" ? Number(col[col.length - 1].toFixed(2)) : total,
total: total !== "N/A" ? Number(total.toFixed(2)) : total
}
allColumns.push(myResult);
}
this.legendAnalytics.next(allColumns);
}
getMin(arr:any[]){
return Math.min(...arr);
}
getMax(arr:any[]){
return Math.max(...arr);
}
getAvg(arr:any[]){
return 1;
}
getLast(arr:any[]) |
// LifeCycle Hooks
ngOnInit() {
this.core.register({ observerClass:this, eventName:"ThemeData" }).subscribe((evt:CoreEvent)=>{
this.colorPattern = this.processThemeColors(evt.data);
if(this.linechartData){
this.render();
//this.chart.data.colors(this.createColorObject())
}
});
this.core.register({ observerClass:this, eventName:"ThemeChanged" }).subscribe((evt:CoreEvent)=>{
this.colorPattern = this.processThemeColors(evt.data);
if(this.linechartData){
this.render();
//this.chart.data.colors(this.createColorObject())
}
});
this.core.emit({name:"ThemeDataRequest"});
this.controlUid = "chart_" + UUID.UUID();
}
ngAfterViewInit() {
}
ngOnDestroy(){
this.core.unregister({observerClass:this});
}
}
| {
return 1
} | identifier_body |
lineChart.component.ts | import 'style-loader!./lineChart.scss';
import { BehaviorSubject } from 'rxjs/BehaviorSubject';
import { CoreService, CoreEvent } from 'app/core/services/core.service';
import { ViewComponent } from 'app/core/components/view/view.component';
import {Component, Input, OnInit, AfterViewInit, OnDestroy} from '@angular/core';
import * as ChartistLegend from 'chartist-plugin-legend';
import {UUID} from 'angular2-uuid';
import * as c3 from 'c3';
import { LineChartService, HandleDataFunc, LineChartData,LineChartMetadata, DataListItem } from './lineChart.service';
export interface ChartFormatter {
format (value, ratio, id);
}
export interface Analytics {
label:string;
min?:number;
max?:number;
avg?:number;
last?:number;
total?:number;
}
@Component({
selector: 'line-chart',
template: `<div id="{{controlUid}}"></div>`
})
export class LineChartComponent extends ViewComponent implements OnInit, AfterViewInit, OnDestroy, HandleDataFunc {
@Input() dataList: DataListItem[];
/** First element is Name of the Field a string
* Followed by the other elements being a number.
* This fits in with how C3 charts work.
*
* [ ["nameOfField_1", number, number, number, number],
* ["nameOfField_2", number, number, number, number]
* ]
*/
@Input() series: any[][];
@Input() legends: string[];
@Input() type: string;
@Input() divideBy: number;
@Input() chartFormatter: ChartFormatter;
@Input() minY?: number = 0;
@Input() maxY?: number = 100;
@Input() labelY?: string = 'Label Y';
@Input() interactive: boolean;
public chart:any;
public conf:any;
public columns:any;
public linechartData:any;
public units: string = '';
public showLegendValues: boolean = false;
public legendEvents: BehaviorSubject<any>;
public legendLabels: BehaviorSubject<any>;
public legendAnalytics: BehaviorSubject<any>;
data: LineChartData = {
labels: [],
series: [],
//meta: {}
};
colorPattern = ["#2196f3", "#009688", "#ffc107", "#9c27b0", "#607d8b", "#00bcd4", "#8bc34a", "#ffeb3b", "#e91e63", "#3f51b5"];
timeFormat: string = "%H:%M";
culling:number = 6;
controlUid: string;
constructor(private core:CoreService, private _lineChartService: LineChartService) {
super();
this.legendEvents = new BehaviorSubject(false);
this.legendLabels = new BehaviorSubject([]);
this.legendAnalytics = new BehaviorSubject([]);
}
handleDataFunc(linechartData: LineChartData) {
//console.log(linechartData);
this.data.labels.splice(0, this.data.labels.length);
this.data.series.splice(0, this.data.series.length);
if(linechartData.meta){
this.units = linechartData.meta.units;
}
linechartData.labels.forEach((label) => {this.data.labels.push(new Date(label))});
linechartData.series.forEach((dataSeriesArray) => {
const newArray = [];
if(!linechartData.meta)console.log(linechartData);
if (typeof (this.divideBy) !== 'undefined' || linechartData.meta.conversion) {
dataSeriesArray.forEach((numberVal) => {
if(linechartData.meta.conversion){
newArray.push(this.convertTo(numberVal, linechartData.meta.conversion));
} else if (numberVal > 0) {
newArray.push((numberVal / this.divideBy).toFixed(2));
} else {
newArray.push(numberVal);
}
});
dataSeriesArray = newArray;
} else {
dataSeriesArray.forEach((numberVal) => {
if(numberVal > 0){
newArray.push(numberVal.toFixed(2));
} else {
newArray.push(numberVal);
} | dataSeriesArray = newArray;
}
this.data.series.push(dataSeriesArray);
});
const columns: any[][] = [];
let legendLabels: string[] = [];
// xColumn
const xValues: any[] = [];
xValues.push('xValues');
this.data.labels.forEach((label) => {
xValues.push(label);
});
columns.push(xValues);
// For C3.. Put the name of the series as the first element of each series array
for (let i = 0; i < this.legends.length && this.data.series.length; ++i) {
let legend: string;
if(linechartData.meta.removePrefix){
legend = this.legends[i].replace(linechartData.meta.removePrefix, "");
} else {
legend = this.legends[i];
}
legendLabels.push(legend);
let series: any[] = this.data.series[i];
if( typeof(series) !== 'undefined' && series.length > 0 ) {
series.unshift(legend);
} else {
series = [legend];
}
columns.push(series);
}
this.columns = columns;
this.linechartData = linechartData;
this.legendLabels.next(legendLabels);
this.analyze(columns);
this.render();
}
public render(conf?:any){
if(!conf){
conf = this.makeConfig();
}
let colors = this.colorsFromTheme();
const color = {
pattern: colors
}
conf.color = color;
this.chart = c3.generate(conf);
}
//this.chart = c3.generate({
public makeConfig(){
let conf = {
interaction: {
enabled:this.interactive
},
bindto: '#' + this.controlUid,
/*color: {
pattern: this.colorPattern
},*/
data: {
columns: this.columns,
//colors: this.createColorObject(),
x: 'xValues',
//xFormat: '%H:%M',
type: 'line',
onmouseout: (d) => {
this.showLegendValues = false;
}
},
axis: {
x: {
type: 'timeseries',
tick: {
//format: '%H:%M:%S',
format: this.timeFormat,
fit: true,
//values: ['01:10', '03:10', '06:10']
culling: {
max: this.culling
}
}
},
y:{
tick: {
format: (y) => { return y + this.linechartData.meta.units}
},
label: {
text:this.linechartData.meta. labelY,
position: 'outer-middle',
}
//default: [this.minY,this.maxY],
/*min: this.minY,
max: this.maxY*/
}
},
grid:{
x:{
show: true
},
y:{
show: true
}
},
subchart: {
show: false
},
legend: {
show:false
},
zoom: {
enabled: false
},
tooltip: {
show: true,
contents: (raw, defaultTitleFormat, defaultValueFormat, color) => {
if(!this.showLegendValues){
this.showLegendValues = true;
}
if(raw.value == Number(0)){
raw.value == raw.value.toString()
}
this.legendEvents.next(raw);
return '<div style="display:none">' + raw[0].x + '</div>';
}
}
}
return conf;
}
/*private setupPiechart() {
const chart = c3.generate({
bindto: '#' + this.controlUid,
data: {
columns: this.series,
type: 'pie'
},
pie: {
label: {
format: this.chartFormatter.format
}
}
});
}*/
private processThemeColors(theme):string[]{
let colors: string[] = [];
theme.accentColors.map((color) => {
colors.push(theme[color]);
});
return colors;
}
private createColorObject(){
let obj = {};
this.legends.forEach((item, index)=>{
obj[item] = this.colorPattern[index]
})
return obj;
}
public fetchData(rrdOptions, timeformat?: string, culling?:number){
if(timeformat){
this.timeFormat = timeformat;
}
if(culling){
this.culling = culling;
}
// Convert from milliseconds to seconds for epoch time
rrdOptions.start = Math.floor(rrdOptions.start / 1000);
if(rrdOptions.end){
rrdOptions.end = Math.floor(rrdOptions.end / 1000);
}
// This is the time portion of the API call.
this._lineChartService.getData(this, this.dataList, rrdOptions);
}
public convertTo(value, conversion){
let result;
switch(conversion){
case 'bytesToGigabytes':
result = value / 1073741824;
break;
case 'percentFloatToInteger':
result = value * 100;
break;
}
return result.toFixed(2);
}
// Analytics
analyze(columns){
let allColumns: Analytics[] = [];
let cols = Object.assign([], columns);
// Remove X axis
cols.shift(columns[0]);
for(let i = 0; i < cols.length; i++){
// Middleware provides data as strings
// so we store the label (first item)
// and convert the rest to numbers
let colStrings = cols[i];
let label = colStrings[0];
let col = colStrings.map(x => Number(x));
col.shift(col[0]);
let total = col.length > 0 ? col.reduce((accumulator, currentValue) => Number(accumulator) + Number(currentValue)) : "N/A";
let avg = total !== "N/A" ? Number((total / col.length).toFixed(2)) : total;
//console.log("Total type: " + typeof col.length)
let myResult:Analytics = {
label:label,
min: total !== "N/A" ? this.getMin(col) : total ,//.toFixed(2),
max: total !== "N/A" ? this.getMax(col) : total,//.toFixed(2),
avg: avg,
last: total !== "N/A" ? Number(col[col.length - 1].toFixed(2)) : total,
total: total !== "N/A" ? Number(total.toFixed(2)) : total
}
allColumns.push(myResult);
}
this.legendAnalytics.next(allColumns);
}
getMin(arr:any[]){
return Math.min(...arr);
}
getMax(arr:any[]){
return Math.max(...arr);
}
getAvg(arr:any[]){
return 1;
}
getLast(arr:any[]){
return 1
}
// LifeCycle Hooks
ngOnInit() {
this.core.register({ observerClass:this, eventName:"ThemeData" }).subscribe((evt:CoreEvent)=>{
this.colorPattern = this.processThemeColors(evt.data);
if(this.linechartData){
this.render();
//this.chart.data.colors(this.createColorObject())
}
});
this.core.register({ observerClass:this, eventName:"ThemeChanged" }).subscribe((evt:CoreEvent)=>{
this.colorPattern = this.processThemeColors(evt.data);
if(this.linechartData){
this.render();
//this.chart.data.colors(this.createColorObject())
}
});
this.core.emit({name:"ThemeDataRequest"});
this.controlUid = "chart_" + UUID.UUID();
}
ngAfterViewInit() {
}
ngOnDestroy(){
this.core.unregister({observerClass:this});
}
} | }); | random_line_split |
micro_updater.py | from github import Github, Repository, GitRelease, ContentFile
from loguru import logger
import configparser
import sys
import os
import wget
import hashlib
import json
from paho.mqtt.client import Client as MQTTClient, MQTTMessageInfo
from paho.mqtt.subscribe import simple as subscribe
from threading import Thread
from time import time, sleep
import socket
import ctypes
DEFAULT_COMPLETE_CONFIG_PATH = 'config.ini'
CONFIGURATION_LAYOUT = {'github': ['token', 'repo', 'release_cache_complete_path', 'check_rate'],
'logging': ['logs_path'],
'updates': ['download_path', 'port'],
'mqtt': ['broker', 'updates_topic', 'updates_acks_topic', 'installed_tags_topic', 'id'],
'trusted_files': ['files']}
class RepoFile:
def __init__(self, name, sha, download_link):
self.name = name
self.sha = sha
self.download_link = download_link
self.path = None # if downloaded, contains file's complete path
class DeviceUpdater(Thread):
def __init__(self, ip, port, files, broker, installed_tags_topic, mqtt_client, release_json):
super().__init__()
self.ip = ip
self.port = int(port)
self.files = files
self.broker = broker
self.installed_tags_topic = installed_tags_topic
self.mqtt_client = mqtt_client
self.topic = f'{self.ip}_updates'
self.release_json = release_json
def identity(self) -> str:
return f'Thread {self.ip}'
def run(self) -> None:
self.send_new_release(self.release_json)
logger.debug(f'[{self.identity()}]: sending {len(self.files)} files')
for i, file in enumerate(self.files):
logger.debug(f'[{self.identity()}]: [{i}/{len(self.files)-1}] sending {file.name}...')
if not self.send_file(file):
logger.error(f'[{self.identity()}]: file not sent, aborting update...')
return
sleep(1) # Wait a bit to not overcharge probe
logger.debug(f'[{self.identity()}]: ')
logger.debug(f'[{self.identity()}]: stopping me...')
def send_end_message(self):
logger.debug(f'[{self.identity()}]: Sending end "END" message')
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug(f'[{self.identity()}]: Message sent')
def send_new_release(self, release_json):
logger.debug(f"[{self.identity()}]: Sending {release_json} on {self.topic}...")
message = self.mqtt_client.publish(self.topic, payload='', retain=True)
self.mqtt_wait_publish(message)
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug('Message sent')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def send_file(self, file: RepoFile) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((self.ip, self.port))
with open(file.path, 'rb') as f:
content = f.read()
if s.sendall(content) is None:
logger.debug(f'[{self.identity()}]: {file.name} sent.')
return True
else:
logger.error(f'[{self.identity()}]: {file.name} not sent.')
return False
except socket.timeout as stout:
logger.error(f"[{self.identity()}]: Timeout connecting to remote socket; {stout}")
return False
except Exception as e:
logger.error(f"[{self.identity()}]: Error reading file '{file.path}'; {e}")
return False
class FileDownloader(Thread):
def __init__(self, file: RepoFile, download_path: str, trusted: bool):
super().__init__()
self.file = file
self.download_path = download_path
self.response = None
self.trusted = trusted
def run(self) -> None:
try:
if not self._download_file(self.file, self.download_path) or not self._verify_file(self.file):
self.response = False
self.response = True
except SystemExit:
self.response = False
def _download_file(self, file: RepoFile, download_path) -> bool:
logger.debug(f'Downloading {file.name} into {download_path}...')
try:
file_path = os.path.join(download_path, file.name)
wget.download(file.download_link, out=file_path)
file.path = file_path
return True
except Exception as e:
logger.error(f"Can't download {file.name} from {file.download_link}")
return False
def _verify_file(self, file: RepoFile) -> bool: | with open(file.path, 'r') as file_opened:
content = file_opened.read()
size = os.stat(file.path)
hasher = hashlib.sha1(f'blob {len(content)}\x00{content}'.encode("utf-8"))
hash = hasher.hexdigest()
if hash == file.sha:
logger.debug('File integrity OK')
return True
else:
logger.error(f'File hash mismatch\n'
f'Calculated:\t{hash}\n'
f'Correct:\t{file.sha}')
return False
except Exception as e:
logger.error(f"Can't verify {file.name} integrity; {e}")
return False
def kill_thread(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
class MicroUpdater:
def __init__(self, config_path=DEFAULT_COMPLETE_CONFIG_PATH):
log_format = '<green>{time: YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{level}: {message}</level>'
logger.remove()
logger.add(sys.stdout, format=log_format, colorize=True)
logger.info('Starting MicroUpdater...')
self.config = configparser.ConfigParser()
self.read_configuration(config_path)
logger.add(os.path.join(self.config['logging']['logs_path'], 'log_{time: YYYY-MM-DD}.log'), format=log_format, colorize=True, compression='zip', rotation='00:00')
self.github_client = None
self.repo_obj = None
self.cached_release = None
self.github_init()
self.mqtt_client = MQTTClient(client_id=self.config['mqtt']['id'])
self.mqtt_init()
self.threads = {} # ip: thread
self.server_thread = None
self.server_loop = False
def loop(self):
while True:
try:
while True:
next(thread for thread in self.threads.values() if thread.is_alive())
sleep(1)
except StopIteration:
pass
tag, files = self.check_repo()
files = [file for file in files if ".mpy" in file] # Download compiled files only
if tag is not None:
self._download_files(files)
update_json = self._update_json(files=files, tag=tag)
self.start_server(files, update_json)
sleep(int(self.config['github']['check_rate']))
def server(self, files, update_json):
while self.server_loop:
logger.debug('Server waiting for installed tag...')
topic = self.config['mqtt']['installed_tags_topic']
broker = self.config['mqtt']['broker']
message = subscribe(topic, hostname=broker)
payload = message.payload
msg_str = payload.decode("utf-8")
try:
installed_tag_json = json.loads(msg_str)
if 'ip' not in installed_tag_json or 'tag' not in installed_tag_json:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
except:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
logger.debug(f'New update installed tag from {installed_tag_json["ip"]}')
if installed_tag_json['tag'] != update_json['tag']:
logger.debug(f"Probe out of date: installed {installed_tag_json['tag']}, latest {update_json['tag']}")
self.spawn_update_thread(installed_tag_json['ip'], files, update_json)
def spawn_update_thread(self, ip: str, files, update_json):
logger.debug(f'Spawning new thread for {ip} update...')
broker = self.config['mqtt']['broker']
topic = self.config['mqtt']['installed_tags_topic']
port = self.config['updates']['port']
th = DeviceUpdater(ip, port=port, files=files, broker=broker, installed_tags_topic=topic, release_json=update_json, mqtt_client=self.mqtt_client)
th.start()
self.threads[ip] = th
logger.debug(f'Thread spawned and registered.')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def start_server(self, files, update_json):
logger.debug('Starting update server...')
self.server_loop = False
if self.server_thread is not None:
self.server_thread.join()
self.server_loop = True
self.server_thread = Thread(target=self.server, args=(files, update_json))
self.server_thread.start()
logger.debug('Update server started.')
def mqtt_init(self):
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_connect()
self.mqtt_client.loop_start()
def mqtt_connect(self):
broker = self.config['mqtt']['broker']
self.mqtt_client.connect(broker)
def mqtt_on_connect(self, client, userdata, flags, rc) -> bool:
if rc == 0:
logger.debug(f'MQTT client connected to {self.config["mqtt"]["broker"]}')
return True
else:
logger.error(f'Connection to the broker failed, response: {rc}')
return False
def mqtt_on_disconnect(self, *args):
logger.warning(f'MQTT client disconnect from the broker')
self.mqtt_client.reconnect()
def read_configuration(self, config_path):
logger.debug(f'Reading configuration file "{config_path}"')
try:
self.config.read(config_path)
except Exception as e:
logger.critical(f'Error reading configuration file; {e}')
logger.critical('Closing...')
exit(1)
try:
sections = self.config.sections()
for section in CONFIGURATION_LAYOUT:
assert section in sections
for key in CONFIGURATION_LAYOUT[section]:
assert key in self.config[section]
except AssertionError:
logger.critical(f'Configuration file malformed, creating sample as "{DEFAULT_COMPLETE_CONFIG_PATH}"...')
for section in CONFIGURATION_LAYOUT:
self.config[section] = {}
for key in CONFIGURATION_LAYOUT[section]:
self.config[section][key] = f'<{key}>'
try:
if os.path.isfile(DEFAULT_COMPLETE_CONFIG_PATH):
logger.error("Can't create configuration sample, please provide a custom configuration file")
exit(1)
with open(DEFAULT_COMPLETE_CONFIG_PATH, 'w') as file:
self.config.write(file)
except Exception as e:
logger.critical(f"Can't create a config sample as '{DEFAULT_COMPLETE_CONFIG_PATH}' in working directory; {e}")
finally:
exit(1)
logger.info(f'Configuration loaded: \n'
f'\tToken: {self.config["github"]["token"]}\n'
f'\tLogs path: {self.config["logging"]["logs_path"]}')
def github_init(self):
logger.debug('Initializing github attributes...')
github = Github(self.config['github']['token'])
self.github_client = github.get_user()
self.repo_obj = self.github_client.get_repo(self.config['github']['repo'])
self.load_cached_release()
logger.debug('Github attributes initialized.')
def load_cached_release(self):
cache_path = self.config['github']['release_cache_complete_path']
logger.debug(f'Loading cached release from {cache_path}')
try:
with open(cache_path, 'r') as file:
self.cached_release = file.readline().strip()
logger.debug(f'Cached release: {self.cached_release}')
except Exception as e:
logger.error(f"Can't load cached release, 'default' tag will be used; {e}")
self.cached_release = 'default'
def save_cached_release(self, tag):
release_cache_path = self.config["github"]["release_cache_complete_path"]
logger.debug(f'Saving cached release in {release_cache_path}')
self.cached_release = tag
try:
with open(release_cache_path, 'w') as file:
file.write(self.cached_release)
logger.debug(f'Cached release saved.')
except Exception as e:
logger.error(f"Can't save cached release")
def check_repo(self): # returns: latest_tag, files
logger.debug(f'Checking "{self.config["github"]["repo"]}" latest release tag')
try:
latest_release = self.repo_obj.get_latest_release()
except:
logger.error(f"Can't get latest release")
return None, None
tag = latest_release.tag_name
if self.cached_release != tag:
logger.info(f"New update found: {tag}")
contents = self.repo_obj.get_contents(path='', ref=tag)
files = [RepoFile(file.name, file.sha, file.download_url) for file in contents]
self.save_cached_release(tag)
return tag, files
else:
return None, None
def _clean_download_folder(self):
download_path = self.config['updates']['download_path']
logger.debug(f'Cleaning download folder "{download_path}"...')
if not os.path.isdir(download_path):
logger.warning(f'Download folder "{download_path}" does not exists, creating it...')
os.mkdir(download_path)
logger.debug('Download folder ready.')
return
files = [os.path.join(download_path, file) for file in os.listdir(download_path)]
logger.debug(f'{len(files)} files will be deleted..')
for idx, file in enumerate(files):
logger.debug(f'[{idx}/{len(files)-1}] Deleting {file}')
if os.path.isfile(file):
os.remove(file)
else:
os.rmdir(file)
if len(os.listdir(download_path)) > 0:
logger.error("Can't clean download folder")
exit(1)
logger.debug('Download folder ready.')
def _download_files(self, files) -> bool:
download_path = self.config['updates']['download_path']
logger.debug(f'Downloading {len(files)} files in {download_path}...')
self._clean_download_folder()
trusted_files = self.config['trusted_files']['files'].split(',')
trusted_files = [file.strip() for file in trusted_files]
files_threads = [FileDownloader(file, download_path, file.name in trusted_files) for file in files]
for thread in files_threads:
thread.start()
for thread in files_threads:
thread.join()
if not thread.response:
logger.error(f"Error downloading {thread.file.name}, aborting files download")
for th in files_threads:
if th.is_alive():
th.kill_thread()
return False
logger.debug('Files downloaded')
return True
def remove_empty_lines(self, file: RepoFile) -> (str, bool):
with open(file.path, 'r') as file_opened:
content = file_opened.read()
#print(content)
content = content.rstrip()
with open(file.path, 'w') as file_opened:
file_opened.write(content)
def _update_json(self, tag, files):
msg = {}
msg['tag'] = tag
msg['files'] = [file.name for file in files]
msg_json = json.dumps(msg)
return msg_json
updater = MicroUpdater()
updater.loop() | logger.debug(f'Verifying {file.name} integrity...')
if self.trusted:
logger.warning(f'Skipping {file.name} integrity check')
return True
try: | random_line_split |
micro_updater.py | from github import Github, Repository, GitRelease, ContentFile
from loguru import logger
import configparser
import sys
import os
import wget
import hashlib
import json
from paho.mqtt.client import Client as MQTTClient, MQTTMessageInfo
from paho.mqtt.subscribe import simple as subscribe
from threading import Thread
from time import time, sleep
import socket
import ctypes
DEFAULT_COMPLETE_CONFIG_PATH = 'config.ini'
CONFIGURATION_LAYOUT = {'github': ['token', 'repo', 'release_cache_complete_path', 'check_rate'],
'logging': ['logs_path'],
'updates': ['download_path', 'port'],
'mqtt': ['broker', 'updates_topic', 'updates_acks_topic', 'installed_tags_topic', 'id'],
'trusted_files': ['files']}
class RepoFile:
def __init__(self, name, sha, download_link):
self.name = name
self.sha = sha
self.download_link = download_link
self.path = None # if downloaded, contains file's complete path
class DeviceUpdater(Thread):
def __init__(self, ip, port, files, broker, installed_tags_topic, mqtt_client, release_json):
super().__init__()
self.ip = ip
self.port = int(port)
self.files = files
self.broker = broker
self.installed_tags_topic = installed_tags_topic
self.mqtt_client = mqtt_client
self.topic = f'{self.ip}_updates'
self.release_json = release_json
def identity(self) -> str:
return f'Thread {self.ip}'
def run(self) -> None:
self.send_new_release(self.release_json)
logger.debug(f'[{self.identity()}]: sending {len(self.files)} files')
for i, file in enumerate(self.files):
logger.debug(f'[{self.identity()}]: [{i}/{len(self.files)-1}] sending {file.name}...')
if not self.send_file(file):
logger.error(f'[{self.identity()}]: file not sent, aborting update...')
return
sleep(1) # Wait a bit to not overcharge probe
logger.debug(f'[{self.identity()}]: ')
logger.debug(f'[{self.identity()}]: stopping me...')
def send_end_message(self):
logger.debug(f'[{self.identity()}]: Sending end "END" message')
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug(f'[{self.identity()}]: Message sent')
def send_new_release(self, release_json):
logger.debug(f"[{self.identity()}]: Sending {release_json} on {self.topic}...")
message = self.mqtt_client.publish(self.topic, payload='', retain=True)
self.mqtt_wait_publish(message)
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug('Message sent')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def send_file(self, file: RepoFile) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((self.ip, self.port))
with open(file.path, 'rb') as f:
content = f.read()
if s.sendall(content) is None:
logger.debug(f'[{self.identity()}]: {file.name} sent.')
return True
else:
logger.error(f'[{self.identity()}]: {file.name} not sent.')
return False
except socket.timeout as stout:
logger.error(f"[{self.identity()}]: Timeout connecting to remote socket; {stout}")
return False
except Exception as e:
logger.error(f"[{self.identity()}]: Error reading file '{file.path}'; {e}")
return False
class FileDownloader(Thread):
def __init__(self, file: RepoFile, download_path: str, trusted: bool):
super().__init__()
self.file = file
self.download_path = download_path
self.response = None
self.trusted = trusted
def run(self) -> None:
try:
if not self._download_file(self.file, self.download_path) or not self._verify_file(self.file):
self.response = False
self.response = True
except SystemExit:
self.response = False
def _download_file(self, file: RepoFile, download_path) -> bool:
logger.debug(f'Downloading {file.name} into {download_path}...')
try:
file_path = os.path.join(download_path, file.name)
wget.download(file.download_link, out=file_path)
file.path = file_path
return True
except Exception as e:
logger.error(f"Can't download {file.name} from {file.download_link}")
return False
def _verify_file(self, file: RepoFile) -> bool:
logger.debug(f'Verifying {file.name} integrity...')
if self.trusted:
logger.warning(f'Skipping {file.name} integrity check')
return True
try:
with open(file.path, 'r') as file_opened:
content = file_opened.read()
size = os.stat(file.path)
hasher = hashlib.sha1(f'blob {len(content)}\x00{content}'.encode("utf-8"))
hash = hasher.hexdigest()
if hash == file.sha:
logger.debug('File integrity OK')
return True
else:
logger.error(f'File hash mismatch\n'
f'Calculated:\t{hash}\n'
f'Correct:\t{file.sha}')
return False
except Exception as e:
logger.error(f"Can't verify {file.name} integrity; {e}")
return False
def kill_thread(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
class MicroUpdater:
def __init__(self, config_path=DEFAULT_COMPLETE_CONFIG_PATH):
log_format = '<green>{time: YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{level}: {message}</level>'
logger.remove()
logger.add(sys.stdout, format=log_format, colorize=True)
logger.info('Starting MicroUpdater...')
self.config = configparser.ConfigParser()
self.read_configuration(config_path)
logger.add(os.path.join(self.config['logging']['logs_path'], 'log_{time: YYYY-MM-DD}.log'), format=log_format, colorize=True, compression='zip', rotation='00:00')
self.github_client = None
self.repo_obj = None
self.cached_release = None
self.github_init()
self.mqtt_client = MQTTClient(client_id=self.config['mqtt']['id'])
self.mqtt_init()
self.threads = {} # ip: thread
self.server_thread = None
self.server_loop = False
def loop(self):
while True:
try:
while True:
next(thread for thread in self.threads.values() if thread.is_alive())
sleep(1)
except StopIteration:
pass
tag, files = self.check_repo()
files = [file for file in files if ".mpy" in file] # Download compiled files only
if tag is not None:
self._download_files(files)
update_json = self._update_json(files=files, tag=tag)
self.start_server(files, update_json)
sleep(int(self.config['github']['check_rate']))
def server(self, files, update_json):
while self.server_loop:
logger.debug('Server waiting for installed tag...')
topic = self.config['mqtt']['installed_tags_topic']
broker = self.config['mqtt']['broker']
message = subscribe(topic, hostname=broker)
payload = message.payload
msg_str = payload.decode("utf-8")
try:
installed_tag_json = json.loads(msg_str)
if 'ip' not in installed_tag_json or 'tag' not in installed_tag_json:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
except:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
logger.debug(f'New update installed tag from {installed_tag_json["ip"]}')
if installed_tag_json['tag'] != update_json['tag']:
logger.debug(f"Probe out of date: installed {installed_tag_json['tag']}, latest {update_json['tag']}")
self.spawn_update_thread(installed_tag_json['ip'], files, update_json)
def spawn_update_thread(self, ip: str, files, update_json):
logger.debug(f'Spawning new thread for {ip} update...')
broker = self.config['mqtt']['broker']
topic = self.config['mqtt']['installed_tags_topic']
port = self.config['updates']['port']
th = DeviceUpdater(ip, port=port, files=files, broker=broker, installed_tags_topic=topic, release_json=update_json, mqtt_client=self.mqtt_client)
th.start()
self.threads[ip] = th
logger.debug(f'Thread spawned and registered.')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def start_server(self, files, update_json):
logger.debug('Starting update server...')
self.server_loop = False
if self.server_thread is not None:
self.server_thread.join()
self.server_loop = True
self.server_thread = Thread(target=self.server, args=(files, update_json))
self.server_thread.start()
logger.debug('Update server started.')
def mqtt_init(self):
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_connect()
self.mqtt_client.loop_start()
def mqtt_connect(self):
broker = self.config['mqtt']['broker']
self.mqtt_client.connect(broker)
def mqtt_on_connect(self, client, userdata, flags, rc) -> bool:
if rc == 0:
logger.debug(f'MQTT client connected to {self.config["mqtt"]["broker"]}')
return True
else:
logger.error(f'Connection to the broker failed, response: {rc}')
return False
def mqtt_on_disconnect(self, *args):
logger.warning(f'MQTT client disconnect from the broker')
self.mqtt_client.reconnect()
def read_configuration(self, config_path):
logger.debug(f'Reading configuration file "{config_path}"')
try:
self.config.read(config_path)
except Exception as e:
logger.critical(f'Error reading configuration file; {e}')
logger.critical('Closing...')
exit(1)
try:
sections = self.config.sections()
for section in CONFIGURATION_LAYOUT:
assert section in sections
for key in CONFIGURATION_LAYOUT[section]:
assert key in self.config[section]
except AssertionError:
logger.critical(f'Configuration file malformed, creating sample as "{DEFAULT_COMPLETE_CONFIG_PATH}"...')
for section in CONFIGURATION_LAYOUT:
|
try:
if os.path.isfile(DEFAULT_COMPLETE_CONFIG_PATH):
logger.error("Can't create configuration sample, please provide a custom configuration file")
exit(1)
with open(DEFAULT_COMPLETE_CONFIG_PATH, 'w') as file:
self.config.write(file)
except Exception as e:
logger.critical(f"Can't create a config sample as '{DEFAULT_COMPLETE_CONFIG_PATH}' in working directory; {e}")
finally:
exit(1)
logger.info(f'Configuration loaded: \n'
f'\tToken: {self.config["github"]["token"]}\n'
f'\tLogs path: {self.config["logging"]["logs_path"]}')
def github_init(self):
logger.debug('Initializing github attributes...')
github = Github(self.config['github']['token'])
self.github_client = github.get_user()
self.repo_obj = self.github_client.get_repo(self.config['github']['repo'])
self.load_cached_release()
logger.debug('Github attributes initialized.')
def load_cached_release(self):
cache_path = self.config['github']['release_cache_complete_path']
logger.debug(f'Loading cached release from {cache_path}')
try:
with open(cache_path, 'r') as file:
self.cached_release = file.readline().strip()
logger.debug(f'Cached release: {self.cached_release}')
except Exception as e:
logger.error(f"Can't load cached release, 'default' tag will be used; {e}")
self.cached_release = 'default'
def save_cached_release(self, tag):
release_cache_path = self.config["github"]["release_cache_complete_path"]
logger.debug(f'Saving cached release in {release_cache_path}')
self.cached_release = tag
try:
with open(release_cache_path, 'w') as file:
file.write(self.cached_release)
logger.debug(f'Cached release saved.')
except Exception as e:
logger.error(f"Can't save cached release")
def check_repo(self): # returns: latest_tag, files
logger.debug(f'Checking "{self.config["github"]["repo"]}" latest release tag')
try:
latest_release = self.repo_obj.get_latest_release()
except:
logger.error(f"Can't get latest release")
return None, None
tag = latest_release.tag_name
if self.cached_release != tag:
logger.info(f"New update found: {tag}")
contents = self.repo_obj.get_contents(path='', ref=tag)
files = [RepoFile(file.name, file.sha, file.download_url) for file in contents]
self.save_cached_release(tag)
return tag, files
else:
return None, None
def _clean_download_folder(self):
download_path = self.config['updates']['download_path']
logger.debug(f'Cleaning download folder "{download_path}"...')
if not os.path.isdir(download_path):
logger.warning(f'Download folder "{download_path}" does not exists, creating it...')
os.mkdir(download_path)
logger.debug('Download folder ready.')
return
files = [os.path.join(download_path, file) for file in os.listdir(download_path)]
logger.debug(f'{len(files)} files will be deleted..')
for idx, file in enumerate(files):
logger.debug(f'[{idx}/{len(files)-1}] Deleting {file}')
if os.path.isfile(file):
os.remove(file)
else:
os.rmdir(file)
if len(os.listdir(download_path)) > 0:
logger.error("Can't clean download folder")
exit(1)
logger.debug('Download folder ready.')
def _download_files(self, files) -> bool:
download_path = self.config['updates']['download_path']
logger.debug(f'Downloading {len(files)} files in {download_path}...')
self._clean_download_folder()
trusted_files = self.config['trusted_files']['files'].split(',')
trusted_files = [file.strip() for file in trusted_files]
files_threads = [FileDownloader(file, download_path, file.name in trusted_files) for file in files]
for thread in files_threads:
thread.start()
for thread in files_threads:
thread.join()
if not thread.response:
logger.error(f"Error downloading {thread.file.name}, aborting files download")
for th in files_threads:
if th.is_alive():
th.kill_thread()
return False
logger.debug('Files downloaded')
return True
def remove_empty_lines(self, file: RepoFile) -> (str, bool):
with open(file.path, 'r') as file_opened:
content = file_opened.read()
#print(content)
content = content.rstrip()
with open(file.path, 'w') as file_opened:
file_opened.write(content)
def _update_json(self, tag, files):
msg = {}
msg['tag'] = tag
msg['files'] = [file.name for file in files]
msg_json = json.dumps(msg)
return msg_json
updater = MicroUpdater()
updater.loop()
| self.config[section] = {}
for key in CONFIGURATION_LAYOUT[section]:
self.config[section][key] = f'<{key}>' | conditional_block |
micro_updater.py | from github import Github, Repository, GitRelease, ContentFile
from loguru import logger
import configparser
import sys
import os
import wget
import hashlib
import json
from paho.mqtt.client import Client as MQTTClient, MQTTMessageInfo
from paho.mqtt.subscribe import simple as subscribe
from threading import Thread
from time import time, sleep
import socket
import ctypes
DEFAULT_COMPLETE_CONFIG_PATH = 'config.ini'
CONFIGURATION_LAYOUT = {'github': ['token', 'repo', 'release_cache_complete_path', 'check_rate'],
'logging': ['logs_path'],
'updates': ['download_path', 'port'],
'mqtt': ['broker', 'updates_topic', 'updates_acks_topic', 'installed_tags_topic', 'id'],
'trusted_files': ['files']}
class RepoFile:
def __init__(self, name, sha, download_link):
self.name = name
self.sha = sha
self.download_link = download_link
self.path = None # if downloaded, contains file's complete path
class DeviceUpdater(Thread):
def __init__(self, ip, port, files, broker, installed_tags_topic, mqtt_client, release_json):
super().__init__()
self.ip = ip
self.port = int(port)
self.files = files
self.broker = broker
self.installed_tags_topic = installed_tags_topic
self.mqtt_client = mqtt_client
self.topic = f'{self.ip}_updates'
self.release_json = release_json
def identity(self) -> str:
return f'Thread {self.ip}'
def run(self) -> None:
self.send_new_release(self.release_json)
logger.debug(f'[{self.identity()}]: sending {len(self.files)} files')
for i, file in enumerate(self.files):
logger.debug(f'[{self.identity()}]: [{i}/{len(self.files)-1}] sending {file.name}...')
if not self.send_file(file):
logger.error(f'[{self.identity()}]: file not sent, aborting update...')
return
sleep(1) # Wait a bit to not overcharge probe
logger.debug(f'[{self.identity()}]: ')
logger.debug(f'[{self.identity()}]: stopping me...')
def send_end_message(self):
logger.debug(f'[{self.identity()}]: Sending end "END" message')
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug(f'[{self.identity()}]: Message sent')
def send_new_release(self, release_json):
logger.debug(f"[{self.identity()}]: Sending {release_json} on {self.topic}...")
message = self.mqtt_client.publish(self.topic, payload='', retain=True)
self.mqtt_wait_publish(message)
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug('Message sent')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def send_file(self, file: RepoFile) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((self.ip, self.port))
with open(file.path, 'rb') as f:
content = f.read()
if s.sendall(content) is None:
logger.debug(f'[{self.identity()}]: {file.name} sent.')
return True
else:
logger.error(f'[{self.identity()}]: {file.name} not sent.')
return False
except socket.timeout as stout:
logger.error(f"[{self.identity()}]: Timeout connecting to remote socket; {stout}")
return False
except Exception as e:
logger.error(f"[{self.identity()}]: Error reading file '{file.path}'; {e}")
return False
class FileDownloader(Thread):
def __init__(self, file: RepoFile, download_path: str, trusted: bool):
super().__init__()
self.file = file
self.download_path = download_path
self.response = None
self.trusted = trusted
def run(self) -> None:
try:
if not self._download_file(self.file, self.download_path) or not self._verify_file(self.file):
self.response = False
self.response = True
except SystemExit:
self.response = False
def _download_file(self, file: RepoFile, download_path) -> bool:
logger.debug(f'Downloading {file.name} into {download_path}...')
try:
file_path = os.path.join(download_path, file.name)
wget.download(file.download_link, out=file_path)
file.path = file_path
return True
except Exception as e:
logger.error(f"Can't download {file.name} from {file.download_link}")
return False
def _verify_file(self, file: RepoFile) -> bool:
logger.debug(f'Verifying {file.name} integrity...')
if self.trusted:
logger.warning(f'Skipping {file.name} integrity check')
return True
try:
with open(file.path, 'r') as file_opened:
content = file_opened.read()
size = os.stat(file.path)
hasher = hashlib.sha1(f'blob {len(content)}\x00{content}'.encode("utf-8"))
hash = hasher.hexdigest()
if hash == file.sha:
logger.debug('File integrity OK')
return True
else:
logger.error(f'File hash mismatch\n'
f'Calculated:\t{hash}\n'
f'Correct:\t{file.sha}')
return False
except Exception as e:
logger.error(f"Can't verify {file.name} integrity; {e}")
return False
def kill_thread(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
class MicroUpdater:
def __init__(self, config_path=DEFAULT_COMPLETE_CONFIG_PATH):
log_format = '<green>{time: YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{level}: {message}</level>'
logger.remove()
logger.add(sys.stdout, format=log_format, colorize=True)
logger.info('Starting MicroUpdater...')
self.config = configparser.ConfigParser()
self.read_configuration(config_path)
logger.add(os.path.join(self.config['logging']['logs_path'], 'log_{time: YYYY-MM-DD}.log'), format=log_format, colorize=True, compression='zip', rotation='00:00')
self.github_client = None
self.repo_obj = None
self.cached_release = None
self.github_init()
self.mqtt_client = MQTTClient(client_id=self.config['mqtt']['id'])
self.mqtt_init()
self.threads = {} # ip: thread
self.server_thread = None
self.server_loop = False
def loop(self):
while True:
try:
while True:
next(thread for thread in self.threads.values() if thread.is_alive())
sleep(1)
except StopIteration:
pass
tag, files = self.check_repo()
files = [file for file in files if ".mpy" in file] # Download compiled files only
if tag is not None:
self._download_files(files)
update_json = self._update_json(files=files, tag=tag)
self.start_server(files, update_json)
sleep(int(self.config['github']['check_rate']))
def server(self, files, update_json):
while self.server_loop:
logger.debug('Server waiting for installed tag...')
topic = self.config['mqtt']['installed_tags_topic']
broker = self.config['mqtt']['broker']
message = subscribe(topic, hostname=broker)
payload = message.payload
msg_str = payload.decode("utf-8")
try:
installed_tag_json = json.loads(msg_str)
if 'ip' not in installed_tag_json or 'tag' not in installed_tag_json:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
except:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
logger.debug(f'New update installed tag from {installed_tag_json["ip"]}')
if installed_tag_json['tag'] != update_json['tag']:
logger.debug(f"Probe out of date: installed {installed_tag_json['tag']}, latest {update_json['tag']}")
self.spawn_update_thread(installed_tag_json['ip'], files, update_json)
def spawn_update_thread(self, ip: str, files, update_json):
logger.debug(f'Spawning new thread for {ip} update...')
broker = self.config['mqtt']['broker']
topic = self.config['mqtt']['installed_tags_topic']
port = self.config['updates']['port']
th = DeviceUpdater(ip, port=port, files=files, broker=broker, installed_tags_topic=topic, release_json=update_json, mqtt_client=self.mqtt_client)
th.start()
self.threads[ip] = th
logger.debug(f'Thread spawned and registered.')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def start_server(self, files, update_json):
logger.debug('Starting update server...')
self.server_loop = False
if self.server_thread is not None:
self.server_thread.join()
self.server_loop = True
self.server_thread = Thread(target=self.server, args=(files, update_json))
self.server_thread.start()
logger.debug('Update server started.')
def mqtt_init(self):
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_connect()
self.mqtt_client.loop_start()
def mqtt_connect(self):
broker = self.config['mqtt']['broker']
self.mqtt_client.connect(broker)
def mqtt_on_connect(self, client, userdata, flags, rc) -> bool:
if rc == 0:
logger.debug(f'MQTT client connected to {self.config["mqtt"]["broker"]}')
return True
else:
logger.error(f'Connection to the broker failed, response: {rc}')
return False
def mqtt_on_disconnect(self, *args):
logger.warning(f'MQTT client disconnect from the broker')
self.mqtt_client.reconnect()
def read_configuration(self, config_path):
|
def github_init(self):
logger.debug('Initializing github attributes...')
github = Github(self.config['github']['token'])
self.github_client = github.get_user()
self.repo_obj = self.github_client.get_repo(self.config['github']['repo'])
self.load_cached_release()
logger.debug('Github attributes initialized.')
def load_cached_release(self):
cache_path = self.config['github']['release_cache_complete_path']
logger.debug(f'Loading cached release from {cache_path}')
try:
with open(cache_path, 'r') as file:
self.cached_release = file.readline().strip()
logger.debug(f'Cached release: {self.cached_release}')
except Exception as e:
logger.error(f"Can't load cached release, 'default' tag will be used; {e}")
self.cached_release = 'default'
def save_cached_release(self, tag):
release_cache_path = self.config["github"]["release_cache_complete_path"]
logger.debug(f'Saving cached release in {release_cache_path}')
self.cached_release = tag
try:
with open(release_cache_path, 'w') as file:
file.write(self.cached_release)
logger.debug(f'Cached release saved.')
except Exception as e:
logger.error(f"Can't save cached release")
def check_repo(self): # returns: latest_tag, files
logger.debug(f'Checking "{self.config["github"]["repo"]}" latest release tag')
try:
latest_release = self.repo_obj.get_latest_release()
except:
logger.error(f"Can't get latest release")
return None, None
tag = latest_release.tag_name
if self.cached_release != tag:
logger.info(f"New update found: {tag}")
contents = self.repo_obj.get_contents(path='', ref=tag)
files = [RepoFile(file.name, file.sha, file.download_url) for file in contents]
self.save_cached_release(tag)
return tag, files
else:
return None, None
def _clean_download_folder(self):
download_path = self.config['updates']['download_path']
logger.debug(f'Cleaning download folder "{download_path}"...')
if not os.path.isdir(download_path):
logger.warning(f'Download folder "{download_path}" does not exists, creating it...')
os.mkdir(download_path)
logger.debug('Download folder ready.')
return
files = [os.path.join(download_path, file) for file in os.listdir(download_path)]
logger.debug(f'{len(files)} files will be deleted..')
for idx, file in enumerate(files):
logger.debug(f'[{idx}/{len(files)-1}] Deleting {file}')
if os.path.isfile(file):
os.remove(file)
else:
os.rmdir(file)
if len(os.listdir(download_path)) > 0:
logger.error("Can't clean download folder")
exit(1)
logger.debug('Download folder ready.')
def _download_files(self, files) -> bool:
download_path = self.config['updates']['download_path']
logger.debug(f'Downloading {len(files)} files in {download_path}...')
self._clean_download_folder()
trusted_files = self.config['trusted_files']['files'].split(',')
trusted_files = [file.strip() for file in trusted_files]
files_threads = [FileDownloader(file, download_path, file.name in trusted_files) for file in files]
for thread in files_threads:
thread.start()
for thread in files_threads:
thread.join()
if not thread.response:
logger.error(f"Error downloading {thread.file.name}, aborting files download")
for th in files_threads:
if th.is_alive():
th.kill_thread()
return False
logger.debug('Files downloaded')
return True
def remove_empty_lines(self, file: RepoFile) -> (str, bool):
with open(file.path, 'r') as file_opened:
content = file_opened.read()
#print(content)
content = content.rstrip()
with open(file.path, 'w') as file_opened:
file_opened.write(content)
def _update_json(self, tag, files):
msg = {}
msg['tag'] = tag
msg['files'] = [file.name for file in files]
msg_json = json.dumps(msg)
return msg_json
updater = MicroUpdater()
updater.loop()
| logger.debug(f'Reading configuration file "{config_path}"')
try:
self.config.read(config_path)
except Exception as e:
logger.critical(f'Error reading configuration file; {e}')
logger.critical('Closing...')
exit(1)
try:
sections = self.config.sections()
for section in CONFIGURATION_LAYOUT:
assert section in sections
for key in CONFIGURATION_LAYOUT[section]:
assert key in self.config[section]
except AssertionError:
logger.critical(f'Configuration file malformed, creating sample as "{DEFAULT_COMPLETE_CONFIG_PATH}"...')
for section in CONFIGURATION_LAYOUT:
self.config[section] = {}
for key in CONFIGURATION_LAYOUT[section]:
self.config[section][key] = f'<{key}>'
try:
if os.path.isfile(DEFAULT_COMPLETE_CONFIG_PATH):
logger.error("Can't create configuration sample, please provide a custom configuration file")
exit(1)
with open(DEFAULT_COMPLETE_CONFIG_PATH, 'w') as file:
self.config.write(file)
except Exception as e:
logger.critical(f"Can't create a config sample as '{DEFAULT_COMPLETE_CONFIG_PATH}' in working directory; {e}")
finally:
exit(1)
logger.info(f'Configuration loaded: \n'
f'\tToken: {self.config["github"]["token"]}\n'
f'\tLogs path: {self.config["logging"]["logs_path"]}') | identifier_body |
micro_updater.py | from github import Github, Repository, GitRelease, ContentFile
from loguru import logger
import configparser
import sys
import os
import wget
import hashlib
import json
from paho.mqtt.client import Client as MQTTClient, MQTTMessageInfo
from paho.mqtt.subscribe import simple as subscribe
from threading import Thread
from time import time, sleep
import socket
import ctypes
DEFAULT_COMPLETE_CONFIG_PATH = 'config.ini'
CONFIGURATION_LAYOUT = {'github': ['token', 'repo', 'release_cache_complete_path', 'check_rate'],
'logging': ['logs_path'],
'updates': ['download_path', 'port'],
'mqtt': ['broker', 'updates_topic', 'updates_acks_topic', 'installed_tags_topic', 'id'],
'trusted_files': ['files']}
class RepoFile:
def __init__(self, name, sha, download_link):
self.name = name
self.sha = sha
self.download_link = download_link
self.path = None # if downloaded, contains file's complete path
class DeviceUpdater(Thread):
def __init__(self, ip, port, files, broker, installed_tags_topic, mqtt_client, release_json):
super().__init__()
self.ip = ip
self.port = int(port)
self.files = files
self.broker = broker
self.installed_tags_topic = installed_tags_topic
self.mqtt_client = mqtt_client
self.topic = f'{self.ip}_updates'
self.release_json = release_json
def identity(self) -> str:
return f'Thread {self.ip}'
def run(self) -> None:
self.send_new_release(self.release_json)
logger.debug(f'[{self.identity()}]: sending {len(self.files)} files')
for i, file in enumerate(self.files):
logger.debug(f'[{self.identity()}]: [{i}/{len(self.files)-1}] sending {file.name}...')
if not self.send_file(file):
logger.error(f'[{self.identity()}]: file not sent, aborting update...')
return
sleep(1) # Wait a bit to not overcharge probe
logger.debug(f'[{self.identity()}]: ')
logger.debug(f'[{self.identity()}]: stopping me...')
def send_end_message(self):
logger.debug(f'[{self.identity()}]: Sending end "END" message')
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload='END', retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug(f'[{self.identity()}]: Message sent')
def send_new_release(self, release_json):
logger.debug(f"[{self.identity()}]: Sending {release_json} on {self.topic}...")
message = self.mqtt_client.publish(self.topic, payload='', retain=True)
self.mqtt_wait_publish(message)
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
self.mqtt_client.reconnect()
message = self.mqtt_client.publish(self.topic, payload=release_json, retain=True)
self.mqtt_wait_publish(message)
if not message.is_published():
logger.error(f'[{self.identity()}]: Message not sent')
return
logger.debug('Message sent')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def send_file(self, file: RepoFile) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((self.ip, self.port))
with open(file.path, 'rb') as f:
content = f.read()
if s.sendall(content) is None:
logger.debug(f'[{self.identity()}]: {file.name} sent.')
return True
else:
logger.error(f'[{self.identity()}]: {file.name} not sent.')
return False
except socket.timeout as stout:
logger.error(f"[{self.identity()}]: Timeout connecting to remote socket; {stout}")
return False
except Exception as e:
logger.error(f"[{self.identity()}]: Error reading file '{file.path}'; {e}")
return False
class FileDownloader(Thread):
def __init__(self, file: RepoFile, download_path: str, trusted: bool):
super().__init__()
self.file = file
self.download_path = download_path
self.response = None
self.trusted = trusted
def run(self) -> None:
try:
if not self._download_file(self.file, self.download_path) or not self._verify_file(self.file):
self.response = False
self.response = True
except SystemExit:
self.response = False
def | (self, file: RepoFile, download_path) -> bool:
logger.debug(f'Downloading {file.name} into {download_path}...')
try:
file_path = os.path.join(download_path, file.name)
wget.download(file.download_link, out=file_path)
file.path = file_path
return True
except Exception as e:
logger.error(f"Can't download {file.name} from {file.download_link}")
return False
def _verify_file(self, file: RepoFile) -> bool:
logger.debug(f'Verifying {file.name} integrity...')
if self.trusted:
logger.warning(f'Skipping {file.name} integrity check')
return True
try:
with open(file.path, 'r') as file_opened:
content = file_opened.read()
size = os.stat(file.path)
hasher = hashlib.sha1(f'blob {len(content)}\x00{content}'.encode("utf-8"))
hash = hasher.hexdigest()
if hash == file.sha:
logger.debug('File integrity OK')
return True
else:
logger.error(f'File hash mismatch\n'
f'Calculated:\t{hash}\n'
f'Correct:\t{file.sha}')
return False
except Exception as e:
logger.error(f"Can't verify {file.name} integrity; {e}")
return False
def kill_thread(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
class MicroUpdater:
def __init__(self, config_path=DEFAULT_COMPLETE_CONFIG_PATH):
log_format = '<green>{time: YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{level}: {message}</level>'
logger.remove()
logger.add(sys.stdout, format=log_format, colorize=True)
logger.info('Starting MicroUpdater...')
self.config = configparser.ConfigParser()
self.read_configuration(config_path)
logger.add(os.path.join(self.config['logging']['logs_path'], 'log_{time: YYYY-MM-DD}.log'), format=log_format, colorize=True, compression='zip', rotation='00:00')
self.github_client = None
self.repo_obj = None
self.cached_release = None
self.github_init()
self.mqtt_client = MQTTClient(client_id=self.config['mqtt']['id'])
self.mqtt_init()
self.threads = {} # ip: thread
self.server_thread = None
self.server_loop = False
def loop(self):
while True:
try:
while True:
next(thread for thread in self.threads.values() if thread.is_alive())
sleep(1)
except StopIteration:
pass
tag, files = self.check_repo()
files = [file for file in files if ".mpy" in file] # Download compiled files only
if tag is not None:
self._download_files(files)
update_json = self._update_json(files=files, tag=tag)
self.start_server(files, update_json)
sleep(int(self.config['github']['check_rate']))
def server(self, files, update_json):
while self.server_loop:
logger.debug('Server waiting for installed tag...')
topic = self.config['mqtt']['installed_tags_topic']
broker = self.config['mqtt']['broker']
message = subscribe(topic, hostname=broker)
payload = message.payload
msg_str = payload.decode("utf-8")
try:
installed_tag_json = json.loads(msg_str)
if 'ip' not in installed_tag_json or 'tag' not in installed_tag_json:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
except:
logger.warning('Server received a malformed installed tag message, skipping it...')
continue
logger.debug(f'New update installed tag from {installed_tag_json["ip"]}')
if installed_tag_json['tag'] != update_json['tag']:
logger.debug(f"Probe out of date: installed {installed_tag_json['tag']}, latest {update_json['tag']}")
self.spawn_update_thread(installed_tag_json['ip'], files, update_json)
def spawn_update_thread(self, ip: str, files, update_json):
logger.debug(f'Spawning new thread for {ip} update...')
broker = self.config['mqtt']['broker']
topic = self.config['mqtt']['installed_tags_topic']
port = self.config['updates']['port']
th = DeviceUpdater(ip, port=port, files=files, broker=broker, installed_tags_topic=topic, release_json=update_json, mqtt_client=self.mqtt_client)
th.start()
self.threads[ip] = th
logger.debug(f'Thread spawned and registered.')
def mqtt_wait_publish(self, message: MQTTMessageInfo, timeout=1):
start = time()
t_out = timeout * 1000
while not message.is_published() and time()-start < t_out:
sleep(0.1)
if message.is_published():
return True
return False
def start_server(self, files, update_json):
logger.debug('Starting update server...')
self.server_loop = False
if self.server_thread is not None:
self.server_thread.join()
self.server_loop = True
self.server_thread = Thread(target=self.server, args=(files, update_json))
self.server_thread.start()
logger.debug('Update server started.')
def mqtt_init(self):
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_connect()
self.mqtt_client.loop_start()
def mqtt_connect(self):
broker = self.config['mqtt']['broker']
self.mqtt_client.connect(broker)
def mqtt_on_connect(self, client, userdata, flags, rc) -> bool:
if rc == 0:
logger.debug(f'MQTT client connected to {self.config["mqtt"]["broker"]}')
return True
else:
logger.error(f'Connection to the broker failed, response: {rc}')
return False
def mqtt_on_disconnect(self, *args):
logger.warning(f'MQTT client disconnect from the broker')
self.mqtt_client.reconnect()
def read_configuration(self, config_path):
logger.debug(f'Reading configuration file "{config_path}"')
try:
self.config.read(config_path)
except Exception as e:
logger.critical(f'Error reading configuration file; {e}')
logger.critical('Closing...')
exit(1)
try:
sections = self.config.sections()
for section in CONFIGURATION_LAYOUT:
assert section in sections
for key in CONFIGURATION_LAYOUT[section]:
assert key in self.config[section]
except AssertionError:
logger.critical(f'Configuration file malformed, creating sample as "{DEFAULT_COMPLETE_CONFIG_PATH}"...')
for section in CONFIGURATION_LAYOUT:
self.config[section] = {}
for key in CONFIGURATION_LAYOUT[section]:
self.config[section][key] = f'<{key}>'
try:
if os.path.isfile(DEFAULT_COMPLETE_CONFIG_PATH):
logger.error("Can't create configuration sample, please provide a custom configuration file")
exit(1)
with open(DEFAULT_COMPLETE_CONFIG_PATH, 'w') as file:
self.config.write(file)
except Exception as e:
logger.critical(f"Can't create a config sample as '{DEFAULT_COMPLETE_CONFIG_PATH}' in working directory; {e}")
finally:
exit(1)
logger.info(f'Configuration loaded: \n'
f'\tToken: {self.config["github"]["token"]}\n'
f'\tLogs path: {self.config["logging"]["logs_path"]}')
def github_init(self):
logger.debug('Initializing github attributes...')
github = Github(self.config['github']['token'])
self.github_client = github.get_user()
self.repo_obj = self.github_client.get_repo(self.config['github']['repo'])
self.load_cached_release()
logger.debug('Github attributes initialized.')
def load_cached_release(self):
cache_path = self.config['github']['release_cache_complete_path']
logger.debug(f'Loading cached release from {cache_path}')
try:
with open(cache_path, 'r') as file:
self.cached_release = file.readline().strip()
logger.debug(f'Cached release: {self.cached_release}')
except Exception as e:
logger.error(f"Can't load cached release, 'default' tag will be used; {e}")
self.cached_release = 'default'
def save_cached_release(self, tag):
release_cache_path = self.config["github"]["release_cache_complete_path"]
logger.debug(f'Saving cached release in {release_cache_path}')
self.cached_release = tag
try:
with open(release_cache_path, 'w') as file:
file.write(self.cached_release)
logger.debug(f'Cached release saved.')
except Exception as e:
logger.error(f"Can't save cached release")
def check_repo(self): # returns: latest_tag, files
logger.debug(f'Checking "{self.config["github"]["repo"]}" latest release tag')
try:
latest_release = self.repo_obj.get_latest_release()
except:
logger.error(f"Can't get latest release")
return None, None
tag = latest_release.tag_name
if self.cached_release != tag:
logger.info(f"New update found: {tag}")
contents = self.repo_obj.get_contents(path='', ref=tag)
files = [RepoFile(file.name, file.sha, file.download_url) for file in contents]
self.save_cached_release(tag)
return tag, files
else:
return None, None
def _clean_download_folder(self):
download_path = self.config['updates']['download_path']
logger.debug(f'Cleaning download folder "{download_path}"...')
if not os.path.isdir(download_path):
logger.warning(f'Download folder "{download_path}" does not exists, creating it...')
os.mkdir(download_path)
logger.debug('Download folder ready.')
return
files = [os.path.join(download_path, file) for file in os.listdir(download_path)]
logger.debug(f'{len(files)} files will be deleted..')
for idx, file in enumerate(files):
logger.debug(f'[{idx}/{len(files)-1}] Deleting {file}')
if os.path.isfile(file):
os.remove(file)
else:
os.rmdir(file)
if len(os.listdir(download_path)) > 0:
logger.error("Can't clean download folder")
exit(1)
logger.debug('Download folder ready.')
def _download_files(self, files) -> bool:
download_path = self.config['updates']['download_path']
logger.debug(f'Downloading {len(files)} files in {download_path}...')
self._clean_download_folder()
trusted_files = self.config['trusted_files']['files'].split(',')
trusted_files = [file.strip() for file in trusted_files]
files_threads = [FileDownloader(file, download_path, file.name in trusted_files) for file in files]
for thread in files_threads:
thread.start()
for thread in files_threads:
thread.join()
if not thread.response:
logger.error(f"Error downloading {thread.file.name}, aborting files download")
for th in files_threads:
if th.is_alive():
th.kill_thread()
return False
logger.debug('Files downloaded')
return True
def remove_empty_lines(self, file: RepoFile) -> (str, bool):
with open(file.path, 'r') as file_opened:
content = file_opened.read()
#print(content)
content = content.rstrip()
with open(file.path, 'w') as file_opened:
file_opened.write(content)
def _update_json(self, tag, files):
msg = {}
msg['tag'] = tag
msg['files'] = [file.name for file in files]
msg_json = json.dumps(msg)
return msg_json
updater = MicroUpdater()
updater.loop()
| _download_file | identifier_name |
WaterHeaterClass.py | #Definition of Water Heater Class and Fuel Type
from scipy.stats import weibull_min
from Inputs_Energy import *
from RefrigerantCalc import Refrigerant
#a= 0.5 #shape
#b = 10 # scale
#Years of MeanLifeTIme, when all WHs are absolutely killed
#y = range(0,21)
#r= weibull_min.cdf(y,3,loc=0,scale = b)
#print (r)
UltimYr = 15
class WaterHeater:
NPV = 0
dailyVol = 50 #gallons
# IncTemp = 75 #temp increase in Fahr
# DiscRate = 0.04
CCDiscRate = 0.04 #carbon price discount rate
Inflation = 0.0
def __init__(self,name, fuel, ef, vintage, OrigNum, lt, IC, OM, hasRefrigerant, refrigerant = Refrigerant(), IncTemp = 75):
self.name = name
self.fuel = fuel
self.ef = ef #Efficiency
self.vintage = vintage #year of installation..typically assumed happens beginning of a year
self.OrigNum = OrigNum #Original Num is the number of waterheaters created in the 'vintage'year
self.lt = lt #lifetime
self.IC = IC #Initial Cost could be just Capex or could be Capex+initial cost to build infrastructure
self.OM = OM #Operations and Maintenance
self.hasRefrigerant = hasRefrigerant
self.refrigerant = refrigerant
self.IncTemp = IncTemp
# self.calcNPV()
#computes the total annual leakage from all the (self.Num appliances during their life
def RefLeaks(self, yr ):
result = {}
if (self.hasRefrigerant == True):
leakages = self.refrigerant.RefLeakage(yr, yr+self.lt)
for i in range( yr, yr + self.lt+1):
result[i]=leakages[i]
return result
def AvgRefLeaks(self,yr): #in tons of CO2 eq
result = {}
avgleak = 0
if (self.hasRefrigerant == True):
result = self.RefLeaks(yr)
#for i in range(vint, vint+ self.lt):
# avgleak = avgleak + result[i]/(1+CCDiscRate)**(i-vint+1)
avgleak = sum(result.values())/self.lt
else:
avgleak = 0
return avgleak
def AnnEmissions(self,yr): #in tons with NO REFRIGERANT
return self.AnnualEngUsage() * self.fuel.UnitEmissions[yr]/1000
def AnnualEmissions(self,yr): #in tons with REFRIGERANTS
if self.hasRefrigerant == False:
return self.AnnEmissions(yr)
else:
return ( self.AnnEmissions(yr)+ self.AvgRefLeaks(yr) )
def annualizedEmissions(self, vint): #in tons (THIS IS THE AVERAGE EMISSIONS..NOT DISCOUNTED
result = {}
# result1 = {}
for i in range(vint, vint+self.lt):
result[i] = self.AnnualEmissions(i) #INCLUDING DIRECT AND INDIRECT
# result1[yr] = self.AnnEmissions(yr)
annEmis = sum(result.values())/self.lt
return (annEmis)
def MarginalAnnualEmissions(self, WH2, yr):
return (self.AnnualEmissions(yr) - WH2.AnnualEmissions(yr) )
def annualCarbonCost(self, vint, UnitCarbonPrice=20): #$20/ton is the default rate for Carbon...if not specified when calling the func
result = {}
for i in range(vint, vint+self.lt):
if self.hasRefrigerant == True:
result[i] = UnitCarbonPrice * (self.AnnualEmissions(i) )
else:
result[i] = UnitCarbonPrice * (self.AnnEmissions(i) )
return result
def averageCarbonCost(self, vint, UnitCarbonPrice=20):
result = {}
result = self.annualCarbonCost(vint, UnitCarbonPrice)
return sum(result.values())/self.lt
def NPVEmissions_Refrigerant(self, yr):
if self.hasRefrigerant == True:
result = 0
RefLeek = self.RefLeaks(yr)
for i in range(yr, yr+self.lt+1):
result = result + RefLeek[i]/(1+DiscRate)**(i-yr+1)
else:
result = 0
return result
def NPVEmissions_Indirect(self, yr):
result = 0
for i in range(yr, yr+self.lt+1):
result = result + self.AnnEmissions(i)/(1+DiscRate)**(i-yr+1)
return result
def NPVEmissions(self, yr): #NPV OF EMISSIONS USED FOR COMPUTING NPV OF CARBONCOST
NPVEm = self.NPVEmissions_Indirect(yr)+ self. NPVEmissions_Refrigerant(yr)
return NPVEm
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def totalCapex(self): #total cost of the stock of vintage yr
return self.OrigNum * self.IC
def NPVCost(self,yr):
NPV = self.IC
for I in range(yr, self.lt+yr):
NPV = NPV + (self.OM[I-yr])/(1+DiscRate)**(I-yr+1)
return NPV
def NPVEngCost(self,yr):
NPV = 0
for I in range(yr, self.lt+yr):
NPV = NPV + (self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def NPVCC(self,vint, CarbonCost= 21): #NPV of carbon cost
return self.NPVEmissions(vint)*CarbonCost
def calcNPV_Capex(self, yr, Capex): #changing capex
NPV = Capex
for I in range(yr,self.lt +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV_LifeTime(self, yr, lifetime): #changing can specify a difff lifetime other than self.lt
NPV = self.IC
for I in range(yr,lifetime +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV(self,yr):#initial fixed capex
NPV = self.IC
for I in range(yr, self.lt+ yr):
# print I, self.OM[I-ThisYear], self.AnnualEngCost(I)
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def annualizedNPV(self,yr):
return self.calcNPV(yr)/self.lt
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def Annuallcc(self, yr, UnitCarbonPrice =20 ): #levelized
|
def payback(self, WHx,yr):
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
#print "#", X, Y, "#"
if self == WHx:
return 0
elif (X>=0 and Y<=0):
return max(self.lt, WHx.lt)
elif (X<0 and Y>=0):
return 0
else:
return (min(self.lt, WHx.lt, X/(Y) ))
def payback1(self, WHx,yr):
N= 1
maxN = max(self.lt, WHx.lt)
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
# print '\n test', X, Y
# if X <= 0 and Y <=0:
# return 0
# else:
while N < maxN and abs(X/Y) >1 :
Y = Y + (self.OM[N] + self.AnnualEngCost(yr+N)) - (WHx.OM[N] + WHx.AnnualEngCost(yr+N))
N = N +1
if N == maxN and X/Y > 1:
return maxN
else:
return N
def AnnualEngUsage(self):
return self.dailyVol* self.IncTemp*self.fuel.unitEng * 365/self.ef
def AnnualEngCost(self, yr):
# if self.fuel == NG:
# print yr, self.fuel.UnitEngCost[inf][yr]
return self.AnnualEngUsage() * self.fuel.UnitEngCost[yr]
def compareEngUsage(self, WH2):
return (self.AnnualEngUsage() - WH2.AnnualEngUsage())
def compareEmissions(self,WH2):
return(self.AnnualEmissions() - WH2.AnnualEmissions())
def CCBreakEven(self, WH2, yr): #breakeven carbon cost
breakeven = (self.calcNPV(yr)/self.lt- WH2.calcNPV(yr)/WH2.lt)/( WH2.NPVEmissions(yr)/WH2.lt - self.NPVEmissions(yr)/self.lt )
return breakeven
def weib(self):
x = range(0, self.lt+UltimYr+1)
w = weibull_min.cdf(x,3,loc=2.5,scale = self.lt) * self.OrigNum
#print w
return(w)
def deadsofar(self, yr):
if yr > self.vintage and yr < self.vintage+ self.lt + UltimYr:
# print yr, self.vintage
return self.weib()[yr-self.vintage]
elif yr >= self.vintage + self.lt + UltimYr:
return self.OrigNum
else:
return 0
def numAlive(self,yr):
return (self.OrigNum - self.deadsofar(yr))
def age(self, yr):
return (yr - self.vintage)
def annualreplacement(self,yr):
# if yr> self.vintage + (self.lt + UltimYr) or yr < self.vintage:
# return 0
# else:
return (max(self.deadsofar(yr)- self.deadsofar(yr-1),0))
class FuelType:
def __init__(self, name,unitEng,UnitEngCost, UnitEmissions):
self.name = name
self.unitEng = unitEng
self.UnitEngCost = UnitEngCost
self.UnitEmissions= UnitEmissions
NG = FuelType("NG", UnitNG , NGCostYrly, NGEmisYrly)
Elec = FuelType("Elec", UnitElec, ElecCostYrly, ElecEmisYrly)
Prop = FuelType("Prop", UnitProp, PropCostYrly, PropEmisYrly)
#for yr in range(ThisYear, EndYear+1):
# print yr, "NGCOST", NGCostYrly['MED'][yr], ElecCostYrly['LOW'][yr], PropCostYrly['LOW'][yr]
#this class is to track the annual 'living' stock of WHs of a particular type, their annual energy and emissions for each
#WH in any year (sum over all vintages)
class WH_Aggregates:
def __init__(self, name):
self.name=name
self.AnnAggStock = {}
self.AnnAggEnergy = {}
self.AnnAggEmissions = {}
Ref1 = Refrigerant(2000, 1, 0.005, 0.1, 0.3)
Ref2 = Refrigerant(675, 1, 0.005, 0.1, 0.3)
Ref3 = Refrigerant(1, 1, 0.005, 0.1, 0.3)
Stck = 100
Time = 2016
NGGG = WaterHeater('NG_WH1', NG, NG0_EF, Time, Stck, NG_LT, NGIC, OM_NG, False)
INGGG = WaterHeater('ING_WH', NG, ING_EF, Time, Stck, ING_LT, INGIC, OM_ING, False)
EWH = WaterHeater('E_WH', Elec, E_EF, Time, Stck, EL_LT, EWHIC, OM_EL, False)
PWH = WaterHeater('Prop_WH', Prop, Prop_EF, Time, Stck, Prop_LT, PropIC, OM_Prop, False)
HPPP = WaterHeater('HP_WH1', Elec, HP1_EF, Time, Stck, HP_LT, HPIC, OM_HP, True, Ref1)
HPP2 = WaterHeater('HP_WH1', Elec, HP2_EF, Time, Stck, HP_LT, HPIC2, OM_HP, True, Ref2)
HPP3 = WaterHeater('HP_WH1', Elec, HP3_EF, Time, Stck, HP_LT, HPIC3, OM_HP, True, Ref3)
#STHER = WaterHeater('ST_El', Elec, E_EF/(1-.6), Time ,Stck, ST_LT, SThCapex, OM_ST, False)
STH = WaterHeater('ST_EL', Elec, E_EF/(1-.6), Time ,Stck, ST_LT, SThERIC, OM_ST, False)
STHHP = WaterHeater('ST_HP1', Elec, HP1_EF/(1-.6), Time ,Stck, ST_LT, SThHPIC, OM_ST, True, Ref1)
#print ".............."
#print "EMissions solar, HP", STHHP.AvgRefLeaks(Time), HPPP.AvgRefLeaks(Time)
#print "EMissions solar, HP", STHHP.AvgRefLeaks(2031), HPPP.AvgRefLeaks(2031)
#print "EMissions solar, HP", STHHP.AvgRefLeaks(2046), HPPP.AvgRefLeaks(2046)
#print "Emis", NGGG.AnnualEmissions(Time), INGGG.AnnualEmissions(Time), HPPP.AnnualEmissions(yr), STH.AnnualEmissions(yr)
#print "Eng Usage NG, EWH, Prop", NGGG.AnnualEngUsage(),EWH.AnnualEngUsage(),PWH.AnnualEngUsage()
#print "Annual Emissions NG, EWH, Prop", NGGG.AnnualEmissions(Time),EWH.AnnualEmissions(Time), PWH.AnnualEmissions(Time)
#print "Ann Emissions NG, EWH, Prop", NGGG.AnnEmissions(Time),EWH.AnnEmissions(Time), PWH.AnnEmissions(Time)
#print "\n"
yr = Time
CarbonCost = 100
#print "TT", HPP3.AnnEmissions(yr), HPP3.AvgRefLeaks(yr), EWH.AnnEmissions(yr)
#print NGGG.AnnualEmissions(yr),HPPP.AnnualEmissions(yr), HPP2.AnnualEmissions(yr), HPP3.AnnualEmissions(yr)
#print "HELLO", EWH.annualizedEmissions(Time),HPPP.annualizedEmissions(Time),HPP2.annualizedEmissions(Time),STH.annualizedEmissions(Time),STHHP.annualizedEmissions(Time)
#print "HEllo Agian", EWH.AnnEmissions(2016),EWH.AvgRefLeaks(2016), HPPP.AnnEmissions(2022), HPPP.AvgRefLeaks(2022),STHHP.AnnEmissions(2016),STHHP.AvgRefLeaks(2016)
#print "++", EWH.AnnEmissions(2016)+ EWH.AvgRefLeaks(2016), HPPP.AnnEmissions(2016)+ HPPP.AvgRefLeaks(2016),HPP2.AnnEmissions(2016)+HPP2.AvgRefLeaks(2016), STHHP.AnnEmissions(2016)+STHHP.AvgRefLeaks(2016)
#print "AnnualizedEmissions", Time,HPPP.annualizedEmissions(Time), EWH.annualizedEmissions(Time), EWH.AnnEmissions(Time),
#print "AnnualEng", Time, NGGG.AnnualEngUsage(),HPPP.AnnualEngUsage(), EWH.AnnualEngUsage()
#print "AnnualCarbonCost", Time, EWH.annualCarbonCost(Time,50), HPPP.annualCarbonCost(Time,50)
#print "Solar", STHHP.deadsofar(2025), STHHP.numAlive(2025)
#print "LCC", Time, NGGG.lcc(Time,0),INGGG.lcc(Time, 0), EWH.lcc(Time,0),HPPP.lcc(Time,0), STHHP.lcc(Time,0)
#print "I payback w.r.t NG", yr, NGGG.payback(INGGG,yr), NGGG.payback(EWH,yr), NGGG.payback(HPPP,yr), NGGG.payback(STH,yr)
#print "II payback w.r.t NG", yr, NGGG.payback1(INGGG,yr), NGGG.payback1(EWH,yr), NGGG.payback1(HPPP,yr), NGGG.payback1(STH,yr)
#print "II payback w.r.t EWH", yr, EWH.payback(NGGG,yr), EWH.payback(INGGG,yr), EWH.payback(HPPP,yr), EWH.payback(STHHP,yr)
#print "III payback w.r.t EWH", yr, HPPP.payback(STHHP,yr), HPPP.payback(EWH,yr)
#print "annualized LCC", Time, NGGG.lcc(Time,0)/NGGG.lt, INGGG.lcc(Time, 0)/ INGGG.lt , EWH.lcc(Time,0)/EWH.lt, HPPP.lcc(Time,0)/HPPP.lt, STH.lcc(Time,0)/STH.lt
#print 'payback', Time, NGGG.payback1(INGGG,Time),INGGG.payback1(NGGG,Time), EWH.payback1(NGGG,Time), NGGG.payback1(EWH, Time), INGGG.payback1(HPPP,Time), EWH.payback1(HPPP,Time), INGGG.payback1(STH,Time),STH.payback1(INGGG,Time)
#print 'payback orig', Time, NGGG.payback(INGGG,Time),INGGG.payback(NGGG,Time), EWH.payback(NGGG,Time), NGGG.payback(EWH,Time), INGGG.payback(HPPP,Time), EWH.payback(HPPP,Time), INGGG.payback(STH,Time), STH.payback(INGGG,Time)
#import matplotlib.pyplot as plt
#from matplotlib.pyplot import *
#import matplotlib.patches as mpatches
#fig = plt.figure(figsize=(10.0, 8.0))
#axes1 = fig.add_subplot(1,1, 1)
#p1 =[]
#p2 = []
#for yr in range (2016,2060):
# print yr, round( NGGG.numAlive(yr),2), round(INGGG.numAlive(yr),2), round(PWH.numAlive(yr),2), round(HPPP.numAlive(yr),2)
# plt.hold(True)
# s1 = axes1.scatter(yr-2016, INGGG.numAlive(yr), color = 'r')
# s1 = axes1.scatter(yr-2016, HPPP.numAlive(yr), color = 'g')
# s1 = axes1.scatter(yr-2016, STH.numAlive(yr), color = 'b')
# p1.append([s1])
#axes1.legend([mpatches.Patch(color='r'), mpatches.Patch(color='g'), mpatches.Patch(color = 'b')], ['Instantaneous NG','HeatPump/NG Storage', 'SolarThermal'], loc = 1, fontsize = 10 )
#axes1.axis([0,40, 0, 110])
#fig.tight_layout()
#plt.show()
| return ( (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) ) /self.lt) | identifier_body |
WaterHeaterClass.py | #Definition of Water Heater Class and Fuel Type
from scipy.stats import weibull_min
from Inputs_Energy import *
from RefrigerantCalc import Refrigerant
#a= 0.5 #shape
#b = 10 # scale
#Years of MeanLifeTIme, when all WHs are absolutely killed
#y = range(0,21)
#r= weibull_min.cdf(y,3,loc=0,scale = b)
#print (r)
UltimYr = 15
class WaterHeater:
NPV = 0
dailyVol = 50 #gallons
# IncTemp = 75 #temp increase in Fahr
# DiscRate = 0.04
CCDiscRate = 0.04 #carbon price discount rate
Inflation = 0.0
def __init__(self,name, fuel, ef, vintage, OrigNum, lt, IC, OM, hasRefrigerant, refrigerant = Refrigerant(), IncTemp = 75):
self.name = name
self.fuel = fuel
self.ef = ef #Efficiency
self.vintage = vintage #year of installation..typically assumed happens beginning of a year
self.OrigNum = OrigNum #Original Num is the number of waterheaters created in the 'vintage'year
self.lt = lt #lifetime
self.IC = IC #Initial Cost could be just Capex or could be Capex+initial cost to build infrastructure
self.OM = OM #Operations and Maintenance
self.hasRefrigerant = hasRefrigerant
self.refrigerant = refrigerant
self.IncTemp = IncTemp
# self.calcNPV()
#computes the total annual leakage from all the (self.Num appliances during their life
def RefLeaks(self, yr ):
result = {}
if (self.hasRefrigerant == True):
leakages = self.refrigerant.RefLeakage(yr, yr+self.lt)
for i in range( yr, yr + self.lt+1):
result[i]=leakages[i]
return result
def AvgRefLeaks(self,yr): #in tons of CO2 eq
result = {}
avgleak = 0
if (self.hasRefrigerant == True):
result = self.RefLeaks(yr)
#for i in range(vint, vint+ self.lt):
# avgleak = avgleak + result[i]/(1+CCDiscRate)**(i-vint+1)
avgleak = sum(result.values())/self.lt
else:
avgleak = 0
return avgleak
def AnnEmissions(self,yr): #in tons with NO REFRIGERANT
return self.AnnualEngUsage() * self.fuel.UnitEmissions[yr]/1000
def AnnualEmissions(self,yr): #in tons with REFRIGERANTS
if self.hasRefrigerant == False:
return self.AnnEmissions(yr)
else:
return ( self.AnnEmissions(yr)+ self.AvgRefLeaks(yr) )
def annualizedEmissions(self, vint): #in tons (THIS IS THE AVERAGE EMISSIONS..NOT DISCOUNTED
result = {}
# result1 = {}
for i in range(vint, vint+self.lt):
result[i] = self.AnnualEmissions(i) #INCLUDING DIRECT AND INDIRECT
# result1[yr] = self.AnnEmissions(yr)
annEmis = sum(result.values())/self.lt
return (annEmis)
def MarginalAnnualEmissions(self, WH2, yr):
return (self.AnnualEmissions(yr) - WH2.AnnualEmissions(yr) )
def annualCarbonCost(self, vint, UnitCarbonPrice=20): #$20/ton is the default rate for Carbon...if not specified when calling the func
result = {}
for i in range(vint, vint+self.lt):
if self.hasRefrigerant == True:
result[i] = UnitCarbonPrice * (self.AnnualEmissions(i) )
else:
result[i] = UnitCarbonPrice * (self.AnnEmissions(i) )
return result
def averageCarbonCost(self, vint, UnitCarbonPrice=20):
result = {}
result = self.annualCarbonCost(vint, UnitCarbonPrice)
return sum(result.values())/self.lt
def NPVEmissions_Refrigerant(self, yr):
if self.hasRefrigerant == True:
result = 0
RefLeek = self.RefLeaks(yr)
for i in range(yr, yr+self.lt+1):
result = result + RefLeek[i]/(1+DiscRate)**(i-yr+1)
else:
result = 0
return result
def NPVEmissions_Indirect(self, yr):
result = 0
for i in range(yr, yr+self.lt+1):
result = result + self.AnnEmissions(i)/(1+DiscRate)**(i-yr+1)
return result
def NPVEmissions(self, yr): #NPV OF EMISSIONS USED FOR COMPUTING NPV OF CARBONCOST
NPVEm = self.NPVEmissions_Indirect(yr)+ self. NPVEmissions_Refrigerant(yr)
return NPVEm
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def totalCapex(self): #total cost of the stock of vintage yr
return self.OrigNum * self.IC
def NPVCost(self,yr):
NPV = self.IC
for I in range(yr, self.lt+yr):
NPV = NPV + (self.OM[I-yr])/(1+DiscRate)**(I-yr+1)
return NPV
def NPVEngCost(self,yr):
NPV = 0
for I in range(yr, self.lt+yr):
NPV = NPV + (self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def NPVCC(self,vint, CarbonCost= 21): #NPV of carbon cost
return self.NPVEmissions(vint)*CarbonCost
def calcNPV_Capex(self, yr, Capex): #changing capex
NPV = Capex
for I in range(yr,self.lt +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV_LifeTime(self, yr, lifetime): #changing can specify a difff lifetime other than self.lt
NPV = self.IC
for I in range(yr,lifetime +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV(self,yr):#initial fixed capex
NPV = self.IC
for I in range(yr, self.lt+ yr):
# print I, self.OM[I-ThisYear], self.AnnualEngCost(I)
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def annualizedNPV(self,yr):
return self.calcNPV(yr)/self.lt
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def Annuallcc(self, yr, UnitCarbonPrice =20 ): #levelized
return ( (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) ) /self.lt)
def payback(self, WHx,yr):
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
#print "#", X, Y, "#"
if self == WHx:
return 0
elif (X>=0 and Y<=0):
return max(self.lt, WHx.lt)
elif (X<0 and Y>=0):
return 0
else:
return (min(self.lt, WHx.lt, X/(Y) ))
def payback1(self, WHx,yr):
N= 1
maxN = max(self.lt, WHx.lt)
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
# print '\n test', X, Y
# if X <= 0 and Y <=0:
# return 0
# else:
while N < maxN and abs(X/Y) >1 :
Y = Y + (self.OM[N] + self.AnnualEngCost(yr+N)) - (WHx.OM[N] + WHx.AnnualEngCost(yr+N))
N = N +1
if N == maxN and X/Y > 1:
return maxN
else:
return N
def AnnualEngUsage(self):
return self.dailyVol* self.IncTemp*self.fuel.unitEng * 365/self.ef
def AnnualEngCost(self, yr):
# if self.fuel == NG:
# print yr, self.fuel.UnitEngCost[inf][yr]
return self.AnnualEngUsage() * self.fuel.UnitEngCost[yr]
def compareEngUsage(self, WH2):
return (self.AnnualEngUsage() - WH2.AnnualEngUsage())
def compareEmissions(self,WH2):
return(self.AnnualEmissions() - WH2.AnnualEmissions())
def CCBreakEven(self, WH2, yr): #breakeven carbon cost
breakeven = (self.calcNPV(yr)/self.lt- WH2.calcNPV(yr)/WH2.lt)/( WH2.NPVEmissions(yr)/WH2.lt - self.NPVEmissions(yr)/self.lt )
return breakeven
def weib(self):
x = range(0, self.lt+UltimYr+1)
w = weibull_min.cdf(x,3,loc=2.5,scale = self.lt) * self.OrigNum
#print w
return(w)
def deadsofar(self, yr):
if yr > self.vintage and yr < self.vintage+ self.lt + UltimYr:
# print yr, self.vintage
|
elif yr >= self.vintage + self.lt + UltimYr:
return self.OrigNum
else:
return 0
def numAlive(self,yr):
return (self.OrigNum - self.deadsofar(yr))
def age(self, yr):
return (yr - self.vintage)
def annualreplacement(self,yr):
# if yr> self.vintage + (self.lt + UltimYr) or yr < self.vintage:
# return 0
# else:
return (max(self.deadsofar(yr)- self.deadsofar(yr-1),0))
class FuelType:
def __init__(self, name,unitEng,UnitEngCost, UnitEmissions):
self.name = name
self.unitEng = unitEng
self.UnitEngCost = UnitEngCost
self.UnitEmissions= UnitEmissions
NG = FuelType("NG", UnitNG , NGCostYrly, NGEmisYrly)
Elec = FuelType("Elec", UnitElec, ElecCostYrly, ElecEmisYrly)
Prop = FuelType("Prop", UnitProp, PropCostYrly, PropEmisYrly)
#for yr in range(ThisYear, EndYear+1):
# print yr, "NGCOST", NGCostYrly['MED'][yr], ElecCostYrly['LOW'][yr], PropCostYrly['LOW'][yr]
#this class is to track the annual 'living' stock of WHs of a particular type, their annual energy and emissions for each
#WH in any year (sum over all vintages)
class WH_Aggregates:
def __init__(self, name):
self.name=name
self.AnnAggStock = {}
self.AnnAggEnergy = {}
self.AnnAggEmissions = {}
Ref1 = Refrigerant(2000, 1, 0.005, 0.1, 0.3)
Ref2 = Refrigerant(675, 1, 0.005, 0.1, 0.3)
Ref3 = Refrigerant(1, 1, 0.005, 0.1, 0.3)
Stck = 100
Time = 2016
NGGG = WaterHeater('NG_WH1', NG, NG0_EF, Time, Stck, NG_LT, NGIC, OM_NG, False)
INGGG = WaterHeater('ING_WH', NG, ING_EF, Time, Stck, ING_LT, INGIC, OM_ING, False)
EWH = WaterHeater('E_WH', Elec, E_EF, Time, Stck, EL_LT, EWHIC, OM_EL, False)
PWH = WaterHeater('Prop_WH', Prop, Prop_EF, Time, Stck, Prop_LT, PropIC, OM_Prop, False)
HPPP = WaterHeater('HP_WH1', Elec, HP1_EF, Time, Stck, HP_LT, HPIC, OM_HP, True, Ref1)
HPP2 = WaterHeater('HP_WH1', Elec, HP2_EF, Time, Stck, HP_LT, HPIC2, OM_HP, True, Ref2)
HPP3 = WaterHeater('HP_WH1', Elec, HP3_EF, Time, Stck, HP_LT, HPIC3, OM_HP, True, Ref3)
#STHER = WaterHeater('ST_El', Elec, E_EF/(1-.6), Time ,Stck, ST_LT, SThCapex, OM_ST, False)
STH = WaterHeater('ST_EL', Elec, E_EF/(1-.6), Time ,Stck, ST_LT, SThERIC, OM_ST, False)
STHHP = WaterHeater('ST_HP1', Elec, HP1_EF/(1-.6), Time ,Stck, ST_LT, SThHPIC, OM_ST, True, Ref1)
#print ".............."
#print "EMissions solar, HP", STHHP.AvgRefLeaks(Time), HPPP.AvgRefLeaks(Time)
#print "EMissions solar, HP", STHHP.AvgRefLeaks(2031), HPPP.AvgRefLeaks(2031)
#print "EMissions solar, HP", STHHP.AvgRefLeaks(2046), HPPP.AvgRefLeaks(2046)
#print "Emis", NGGG.AnnualEmissions(Time), INGGG.AnnualEmissions(Time), HPPP.AnnualEmissions(yr), STH.AnnualEmissions(yr)
#print "Eng Usage NG, EWH, Prop", NGGG.AnnualEngUsage(),EWH.AnnualEngUsage(),PWH.AnnualEngUsage()
#print "Annual Emissions NG, EWH, Prop", NGGG.AnnualEmissions(Time),EWH.AnnualEmissions(Time), PWH.AnnualEmissions(Time)
#print "Ann Emissions NG, EWH, Prop", NGGG.AnnEmissions(Time),EWH.AnnEmissions(Time), PWH.AnnEmissions(Time)
#print "\n"
yr = Time
CarbonCost = 100
#print "TT", HPP3.AnnEmissions(yr), HPP3.AvgRefLeaks(yr), EWH.AnnEmissions(yr)
#print NGGG.AnnualEmissions(yr),HPPP.AnnualEmissions(yr), HPP2.AnnualEmissions(yr), HPP3.AnnualEmissions(yr)
#print "HELLO", EWH.annualizedEmissions(Time),HPPP.annualizedEmissions(Time),HPP2.annualizedEmissions(Time),STH.annualizedEmissions(Time),STHHP.annualizedEmissions(Time)
#print "HEllo Agian", EWH.AnnEmissions(2016),EWH.AvgRefLeaks(2016), HPPP.AnnEmissions(2022), HPPP.AvgRefLeaks(2022),STHHP.AnnEmissions(2016),STHHP.AvgRefLeaks(2016)
#print "++", EWH.AnnEmissions(2016)+ EWH.AvgRefLeaks(2016), HPPP.AnnEmissions(2016)+ HPPP.AvgRefLeaks(2016),HPP2.AnnEmissions(2016)+HPP2.AvgRefLeaks(2016), STHHP.AnnEmissions(2016)+STHHP.AvgRefLeaks(2016)
#print "AnnualizedEmissions", Time,HPPP.annualizedEmissions(Time), EWH.annualizedEmissions(Time), EWH.AnnEmissions(Time),
#print "AnnualEng", Time, NGGG.AnnualEngUsage(),HPPP.AnnualEngUsage(), EWH.AnnualEngUsage()
#print "AnnualCarbonCost", Time, EWH.annualCarbonCost(Time,50), HPPP.annualCarbonCost(Time,50)
#print "Solar", STHHP.deadsofar(2025), STHHP.numAlive(2025)
#print "LCC", Time, NGGG.lcc(Time,0),INGGG.lcc(Time, 0), EWH.lcc(Time,0),HPPP.lcc(Time,0), STHHP.lcc(Time,0)
#print "I payback w.r.t NG", yr, NGGG.payback(INGGG,yr), NGGG.payback(EWH,yr), NGGG.payback(HPPP,yr), NGGG.payback(STH,yr)
#print "II payback w.r.t NG", yr, NGGG.payback1(INGGG,yr), NGGG.payback1(EWH,yr), NGGG.payback1(HPPP,yr), NGGG.payback1(STH,yr)
#print "II payback w.r.t EWH", yr, EWH.payback(NGGG,yr), EWH.payback(INGGG,yr), EWH.payback(HPPP,yr), EWH.payback(STHHP,yr)
#print "III payback w.r.t EWH", yr, HPPP.payback(STHHP,yr), HPPP.payback(EWH,yr)
#print "annualized LCC", Time, NGGG.lcc(Time,0)/NGGG.lt, INGGG.lcc(Time, 0)/ INGGG.lt , EWH.lcc(Time,0)/EWH.lt, HPPP.lcc(Time,0)/HPPP.lt, STH.lcc(Time,0)/STH.lt
#print 'payback', Time, NGGG.payback1(INGGG,Time),INGGG.payback1(NGGG,Time), EWH.payback1(NGGG,Time), NGGG.payback1(EWH, Time), INGGG.payback1(HPPP,Time), EWH.payback1(HPPP,Time), INGGG.payback1(STH,Time),STH.payback1(INGGG,Time)
#print 'payback orig', Time, NGGG.payback(INGGG,Time),INGGG.payback(NGGG,Time), EWH.payback(NGGG,Time), NGGG.payback(EWH,Time), INGGG.payback(HPPP,Time), EWH.payback(HPPP,Time), INGGG.payback(STH,Time), STH.payback(INGGG,Time)
#import matplotlib.pyplot as plt
#from matplotlib.pyplot import *
#import matplotlib.patches as mpatches
#fig = plt.figure(figsize=(10.0, 8.0))
#axes1 = fig.add_subplot(1,1, 1)
#p1 =[]
#p2 = []
#for yr in range (2016,2060):
# print yr, round( NGGG.numAlive(yr),2), round(INGGG.numAlive(yr),2), round(PWH.numAlive(yr),2), round(HPPP.numAlive(yr),2)
# plt.hold(True)
# s1 = axes1.scatter(yr-2016, INGGG.numAlive(yr), color = 'r')
# s1 = axes1.scatter(yr-2016, HPPP.numAlive(yr), color = 'g')
# s1 = axes1.scatter(yr-2016, STH.numAlive(yr), color = 'b')
# p1.append([s1])
#axes1.legend([mpatches.Patch(color='r'), mpatches.Patch(color='g'), mpatches.Patch(color = 'b')], ['Instantaneous NG','HeatPump/NG Storage', 'SolarThermal'], loc = 1, fontsize = 10 )
#axes1.axis([0,40, 0, 110])
#fig.tight_layout()
#plt.show()
| return self.weib()[yr-self.vintage] | conditional_block |
WaterHeaterClass.py | #Definition of Water Heater Class and Fuel Type
from scipy.stats import weibull_min
from Inputs_Energy import *
from RefrigerantCalc import Refrigerant
#a= 0.5 #shape
#b = 10 # scale
#Years of MeanLifeTIme, when all WHs are absolutely killed
#y = range(0,21)
#r= weibull_min.cdf(y,3,loc=0,scale = b)
#print (r)
UltimYr = 15
class WaterHeater:
NPV = 0
dailyVol = 50 #gallons
# IncTemp = 75 #temp increase in Fahr
# DiscRate = 0.04
CCDiscRate = 0.04 #carbon price discount rate
Inflation = 0.0
def __init__(self,name, fuel, ef, vintage, OrigNum, lt, IC, OM, hasRefrigerant, refrigerant = Refrigerant(), IncTemp = 75):
self.name = name
self.fuel = fuel
self.ef = ef #Efficiency
self.vintage = vintage #year of installation..typically assumed happens beginning of a year
self.OrigNum = OrigNum #Original Num is the number of waterheaters created in the 'vintage'year
self.lt = lt #lifetime
self.IC = IC #Initial Cost could be just Capex or could be Capex+initial cost to build infrastructure
self.OM = OM #Operations and Maintenance
self.hasRefrigerant = hasRefrigerant
self.refrigerant = refrigerant
self.IncTemp = IncTemp
# self.calcNPV()
#computes the total annual leakage from all the (self.Num appliances during their life
def RefLeaks(self, yr ):
result = {}
if (self.hasRefrigerant == True):
leakages = self.refrigerant.RefLeakage(yr, yr+self.lt)
for i in range( yr, yr + self.lt+1):
result[i]=leakages[i]
return result
def AvgRefLeaks(self,yr): #in tons of CO2 eq
result = {}
avgleak = 0
if (self.hasRefrigerant == True):
result = self.RefLeaks(yr)
#for i in range(vint, vint+ self.lt):
# avgleak = avgleak + result[i]/(1+CCDiscRate)**(i-vint+1)
avgleak = sum(result.values())/self.lt
else:
avgleak = 0
return avgleak
def AnnEmissions(self,yr): #in tons with NO REFRIGERANT
return self.AnnualEngUsage() * self.fuel.UnitEmissions[yr]/1000
def AnnualEmissions(self,yr): #in tons with REFRIGERANTS
if self.hasRefrigerant == False:
return self.AnnEmissions(yr)
else:
return ( self.AnnEmissions(yr)+ self.AvgRefLeaks(yr) )
def annualizedEmissions(self, vint): #in tons (THIS IS THE AVERAGE EMISSIONS..NOT DISCOUNTED
result = {}
# result1 = {}
for i in range(vint, vint+self.lt):
result[i] = self.AnnualEmissions(i) #INCLUDING DIRECT AND INDIRECT
# result1[yr] = self.AnnEmissions(yr)
annEmis = sum(result.values())/self.lt
return (annEmis)
def MarginalAnnualEmissions(self, WH2, yr):
return (self.AnnualEmissions(yr) - WH2.AnnualEmissions(yr) )
def annualCarbonCost(self, vint, UnitCarbonPrice=20): #$20/ton is the default rate for Carbon...if not specified when calling the func
result = {}
for i in range(vint, vint+self.lt):
if self.hasRefrigerant == True:
result[i] = UnitCarbonPrice * (self.AnnualEmissions(i) )
else:
result[i] = UnitCarbonPrice * (self.AnnEmissions(i) )
return result
def averageCarbonCost(self, vint, UnitCarbonPrice=20):
result = {}
result = self.annualCarbonCost(vint, UnitCarbonPrice)
return sum(result.values())/self.lt
def NPVEmissions_Refrigerant(self, yr):
if self.hasRefrigerant == True:
result = 0
RefLeek = self.RefLeaks(yr)
for i in range(yr, yr+self.lt+1):
result = result + RefLeek[i]/(1+DiscRate)**(i-yr+1)
else:
result = 0
return result
def NPVEmissions_Indirect(self, yr):
result = 0
for i in range(yr, yr+self.lt+1):
result = result + self.AnnEmissions(i)/(1+DiscRate)**(i-yr+1)
return result
def NPVEmissions(self, yr): #NPV OF EMISSIONS USED FOR COMPUTING NPV OF CARBONCOST
NPVEm = self.NPVEmissions_Indirect(yr)+ self. NPVEmissions_Refrigerant(yr)
return NPVEm
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def totalCapex(self): #total cost of the stock of vintage yr
return self.OrigNum * self.IC
def NPVCost(self,yr):
NPV = self.IC
for I in range(yr, self.lt+yr):
NPV = NPV + (self.OM[I-yr])/(1+DiscRate)**(I-yr+1)
return NPV
def NPVEngCost(self,yr):
NPV = 0
for I in range(yr, self.lt+yr):
NPV = NPV + (self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def NPVCC(self,vint, CarbonCost= 21): #NPV of carbon cost
return self.NPVEmissions(vint)*CarbonCost
def calcNPV_Capex(self, yr, Capex): #changing capex
NPV = Capex
for I in range(yr,self.lt +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV_LifeTime(self, yr, lifetime): #changing can specify a difff lifetime other than self.lt
NPV = self.IC
for I in range(yr,lifetime +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV(self,yr):#initial fixed capex
NPV = self.IC
for I in range(yr, self.lt+ yr):
# print I, self.OM[I-ThisYear], self.AnnualEngCost(I)
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def annualizedNPV(self,yr):
return self.calcNPV(yr)/self.lt
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def Annuallcc(self, yr, UnitCarbonPrice =20 ): #levelized
return ( (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) ) /self.lt)
def payback(self, WHx,yr):
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
#print "#", X, Y, "#"
if self == WHx:
return 0
elif (X>=0 and Y<=0):
return max(self.lt, WHx.lt)
elif (X<0 and Y>=0):
return 0
else:
return (min(self.lt, WHx.lt, X/(Y) ))
def payback1(self, WHx,yr):
N= 1
maxN = max(self.lt, WHx.lt)
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
# print '\n test', X, Y
# if X <= 0 and Y <=0:
# return 0
# else:
while N < maxN and abs(X/Y) >1 :
Y = Y + (self.OM[N] + self.AnnualEngCost(yr+N)) - (WHx.OM[N] + WHx.AnnualEngCost(yr+N))
N = N +1
if N == maxN and X/Y > 1:
return maxN
else:
return N
def AnnualEngUsage(self):
return self.dailyVol* self.IncTemp*self.fuel.unitEng * 365/self.ef
def AnnualEngCost(self, yr):
# if self.fuel == NG:
# print yr, self.fuel.UnitEngCost[inf][yr]
return self.AnnualEngUsage() * self.fuel.UnitEngCost[yr]
def compareEngUsage(self, WH2):
return (self.AnnualEngUsage() - WH2.AnnualEngUsage())
def compareEmissions(self,WH2):
return(self.AnnualEmissions() - WH2.AnnualEmissions())
| def weib(self):
x = range(0, self.lt+UltimYr+1)
w = weibull_min.cdf(x,3,loc=2.5,scale = self.lt) * self.OrigNum
#print w
return(w)
def deadsofar(self, yr):
if yr > self.vintage and yr < self.vintage+ self.lt + UltimYr:
# print yr, self.vintage
return self.weib()[yr-self.vintage]
elif yr >= self.vintage + self.lt + UltimYr:
return self.OrigNum
else:
return 0
def numAlive(self,yr):
return (self.OrigNum - self.deadsofar(yr))
def age(self, yr):
return (yr - self.vintage)
def annualreplacement(self,yr):
# if yr> self.vintage + (self.lt + UltimYr) or yr < self.vintage:
# return 0
# else:
return (max(self.deadsofar(yr)- self.deadsofar(yr-1),0))
class FuelType:
def __init__(self, name,unitEng,UnitEngCost, UnitEmissions):
self.name = name
self.unitEng = unitEng
self.UnitEngCost = UnitEngCost
self.UnitEmissions= UnitEmissions
NG = FuelType("NG", UnitNG , NGCostYrly, NGEmisYrly)
Elec = FuelType("Elec", UnitElec, ElecCostYrly, ElecEmisYrly)
Prop = FuelType("Prop", UnitProp, PropCostYrly, PropEmisYrly)
#for yr in range(ThisYear, EndYear+1):
# print yr, "NGCOST", NGCostYrly['MED'][yr], ElecCostYrly['LOW'][yr], PropCostYrly['LOW'][yr]
#this class is to track the annual 'living' stock of WHs of a particular type, their annual energy and emissions for each
#WH in any year (sum over all vintages)
class WH_Aggregates:
def __init__(self, name):
self.name=name
self.AnnAggStock = {}
self.AnnAggEnergy = {}
self.AnnAggEmissions = {}
Ref1 = Refrigerant(2000, 1, 0.005, 0.1, 0.3)
Ref2 = Refrigerant(675, 1, 0.005, 0.1, 0.3)
Ref3 = Refrigerant(1, 1, 0.005, 0.1, 0.3)
Stck = 100
Time = 2016
NGGG = WaterHeater('NG_WH1', NG, NG0_EF, Time, Stck, NG_LT, NGIC, OM_NG, False)
INGGG = WaterHeater('ING_WH', NG, ING_EF, Time, Stck, ING_LT, INGIC, OM_ING, False)
EWH = WaterHeater('E_WH', Elec, E_EF, Time, Stck, EL_LT, EWHIC, OM_EL, False)
PWH = WaterHeater('Prop_WH', Prop, Prop_EF, Time, Stck, Prop_LT, PropIC, OM_Prop, False)
HPPP = WaterHeater('HP_WH1', Elec, HP1_EF, Time, Stck, HP_LT, HPIC, OM_HP, True, Ref1)
HPP2 = WaterHeater('HP_WH1', Elec, HP2_EF, Time, Stck, HP_LT, HPIC2, OM_HP, True, Ref2)
HPP3 = WaterHeater('HP_WH1', Elec, HP3_EF, Time, Stck, HP_LT, HPIC3, OM_HP, True, Ref3)
#STHER = WaterHeater('ST_El', Elec, E_EF/(1-.6), Time ,Stck, ST_LT, SThCapex, OM_ST, False)
STH = WaterHeater('ST_EL', Elec, E_EF/(1-.6), Time ,Stck, ST_LT, SThERIC, OM_ST, False)
STHHP = WaterHeater('ST_HP1', Elec, HP1_EF/(1-.6), Time ,Stck, ST_LT, SThHPIC, OM_ST, True, Ref1)
#print ".............."
#print "EMissions solar, HP", STHHP.AvgRefLeaks(Time), HPPP.AvgRefLeaks(Time)
#print "EMissions solar, HP", STHHP.AvgRefLeaks(2031), HPPP.AvgRefLeaks(2031)
#print "EMissions solar, HP", STHHP.AvgRefLeaks(2046), HPPP.AvgRefLeaks(2046)
#print "Emis", NGGG.AnnualEmissions(Time), INGGG.AnnualEmissions(Time), HPPP.AnnualEmissions(yr), STH.AnnualEmissions(yr)
#print "Eng Usage NG, EWH, Prop", NGGG.AnnualEngUsage(),EWH.AnnualEngUsage(),PWH.AnnualEngUsage()
#print "Annual Emissions NG, EWH, Prop", NGGG.AnnualEmissions(Time),EWH.AnnualEmissions(Time), PWH.AnnualEmissions(Time)
#print "Ann Emissions NG, EWH, Prop", NGGG.AnnEmissions(Time),EWH.AnnEmissions(Time), PWH.AnnEmissions(Time)
#print "\n"
yr = Time
CarbonCost = 100
#print "TT", HPP3.AnnEmissions(yr), HPP3.AvgRefLeaks(yr), EWH.AnnEmissions(yr)
#print NGGG.AnnualEmissions(yr),HPPP.AnnualEmissions(yr), HPP2.AnnualEmissions(yr), HPP3.AnnualEmissions(yr)
#print "HELLO", EWH.annualizedEmissions(Time),HPPP.annualizedEmissions(Time),HPP2.annualizedEmissions(Time),STH.annualizedEmissions(Time),STHHP.annualizedEmissions(Time)
#print "HEllo Agian", EWH.AnnEmissions(2016),EWH.AvgRefLeaks(2016), HPPP.AnnEmissions(2022), HPPP.AvgRefLeaks(2022),STHHP.AnnEmissions(2016),STHHP.AvgRefLeaks(2016)
#print "++", EWH.AnnEmissions(2016)+ EWH.AvgRefLeaks(2016), HPPP.AnnEmissions(2016)+ HPPP.AvgRefLeaks(2016),HPP2.AnnEmissions(2016)+HPP2.AvgRefLeaks(2016), STHHP.AnnEmissions(2016)+STHHP.AvgRefLeaks(2016)
#print "AnnualizedEmissions", Time,HPPP.annualizedEmissions(Time), EWH.annualizedEmissions(Time), EWH.AnnEmissions(Time),
#print "AnnualEng", Time, NGGG.AnnualEngUsage(),HPPP.AnnualEngUsage(), EWH.AnnualEngUsage()
#print "AnnualCarbonCost", Time, EWH.annualCarbonCost(Time,50), HPPP.annualCarbonCost(Time,50)
#print "Solar", STHHP.deadsofar(2025), STHHP.numAlive(2025)
#print "LCC", Time, NGGG.lcc(Time,0),INGGG.lcc(Time, 0), EWH.lcc(Time,0),HPPP.lcc(Time,0), STHHP.lcc(Time,0)
#print "I payback w.r.t NG", yr, NGGG.payback(INGGG,yr), NGGG.payback(EWH,yr), NGGG.payback(HPPP,yr), NGGG.payback(STH,yr)
#print "II payback w.r.t NG", yr, NGGG.payback1(INGGG,yr), NGGG.payback1(EWH,yr), NGGG.payback1(HPPP,yr), NGGG.payback1(STH,yr)
#print "II payback w.r.t EWH", yr, EWH.payback(NGGG,yr), EWH.payback(INGGG,yr), EWH.payback(HPPP,yr), EWH.payback(STHHP,yr)
#print "III payback w.r.t EWH", yr, HPPP.payback(STHHP,yr), HPPP.payback(EWH,yr)
#print "annualized LCC", Time, NGGG.lcc(Time,0)/NGGG.lt, INGGG.lcc(Time, 0)/ INGGG.lt , EWH.lcc(Time,0)/EWH.lt, HPPP.lcc(Time,0)/HPPP.lt, STH.lcc(Time,0)/STH.lt
#print 'payback', Time, NGGG.payback1(INGGG,Time),INGGG.payback1(NGGG,Time), EWH.payback1(NGGG,Time), NGGG.payback1(EWH, Time), INGGG.payback1(HPPP,Time), EWH.payback1(HPPP,Time), INGGG.payback1(STH,Time),STH.payback1(INGGG,Time)
#print 'payback orig', Time, NGGG.payback(INGGG,Time),INGGG.payback(NGGG,Time), EWH.payback(NGGG,Time), NGGG.payback(EWH,Time), INGGG.payback(HPPP,Time), EWH.payback(HPPP,Time), INGGG.payback(STH,Time), STH.payback(INGGG,Time)
#import matplotlib.pyplot as plt
#from matplotlib.pyplot import *
#import matplotlib.patches as mpatches
#fig = plt.figure(figsize=(10.0, 8.0))
#axes1 = fig.add_subplot(1,1, 1)
#p1 =[]
#p2 = []
#for yr in range (2016,2060):
# print yr, round( NGGG.numAlive(yr),2), round(INGGG.numAlive(yr),2), round(PWH.numAlive(yr),2), round(HPPP.numAlive(yr),2)
# plt.hold(True)
# s1 = axes1.scatter(yr-2016, INGGG.numAlive(yr), color = 'r')
# s1 = axes1.scatter(yr-2016, HPPP.numAlive(yr), color = 'g')
# s1 = axes1.scatter(yr-2016, STH.numAlive(yr), color = 'b')
# p1.append([s1])
#axes1.legend([mpatches.Patch(color='r'), mpatches.Patch(color='g'), mpatches.Patch(color = 'b')], ['Instantaneous NG','HeatPump/NG Storage', 'SolarThermal'], loc = 1, fontsize = 10 )
#axes1.axis([0,40, 0, 110])
#fig.tight_layout()
#plt.show() | def CCBreakEven(self, WH2, yr): #breakeven carbon cost
breakeven = (self.calcNPV(yr)/self.lt- WH2.calcNPV(yr)/WH2.lt)/( WH2.NPVEmissions(yr)/WH2.lt - self.NPVEmissions(yr)/self.lt )
return breakeven
| random_line_split |
WaterHeaterClass.py | #Definition of Water Heater Class and Fuel Type
from scipy.stats import weibull_min
from Inputs_Energy import *
from RefrigerantCalc import Refrigerant
#a= 0.5 #shape
#b = 10 # scale
#Years of MeanLifeTIme, when all WHs are absolutely killed
#y = range(0,21)
#r= weibull_min.cdf(y,3,loc=0,scale = b)
#print (r)
UltimYr = 15
class WaterHeater:
NPV = 0
dailyVol = 50 #gallons
# IncTemp = 75 #temp increase in Fahr
# DiscRate = 0.04
CCDiscRate = 0.04 #carbon price discount rate
Inflation = 0.0
def __init__(self,name, fuel, ef, vintage, OrigNum, lt, IC, OM, hasRefrigerant, refrigerant = Refrigerant(), IncTemp = 75):
self.name = name
self.fuel = fuel
self.ef = ef #Efficiency
self.vintage = vintage #year of installation..typically assumed happens beginning of a year
self.OrigNum = OrigNum #Original Num is the number of waterheaters created in the 'vintage'year
self.lt = lt #lifetime
self.IC = IC #Initial Cost could be just Capex or could be Capex+initial cost to build infrastructure
self.OM = OM #Operations and Maintenance
self.hasRefrigerant = hasRefrigerant
self.refrigerant = refrigerant
self.IncTemp = IncTemp
# self.calcNPV()
#computes the total annual leakage from all the (self.Num appliances during their life
def RefLeaks(self, yr ):
result = {}
if (self.hasRefrigerant == True):
leakages = self.refrigerant.RefLeakage(yr, yr+self.lt)
for i in range( yr, yr + self.lt+1):
result[i]=leakages[i]
return result
def | (self,yr): #in tons of CO2 eq
result = {}
avgleak = 0
if (self.hasRefrigerant == True):
result = self.RefLeaks(yr)
#for i in range(vint, vint+ self.lt):
# avgleak = avgleak + result[i]/(1+CCDiscRate)**(i-vint+1)
avgleak = sum(result.values())/self.lt
else:
avgleak = 0
return avgleak
def AnnEmissions(self,yr): #in tons with NO REFRIGERANT
return self.AnnualEngUsage() * self.fuel.UnitEmissions[yr]/1000
def AnnualEmissions(self,yr): #in tons with REFRIGERANTS
if self.hasRefrigerant == False:
return self.AnnEmissions(yr)
else:
return ( self.AnnEmissions(yr)+ self.AvgRefLeaks(yr) )
def annualizedEmissions(self, vint): #in tons (THIS IS THE AVERAGE EMISSIONS..NOT DISCOUNTED
result = {}
# result1 = {}
for i in range(vint, vint+self.lt):
result[i] = self.AnnualEmissions(i) #INCLUDING DIRECT AND INDIRECT
# result1[yr] = self.AnnEmissions(yr)
annEmis = sum(result.values())/self.lt
return (annEmis)
def MarginalAnnualEmissions(self, WH2, yr):
return (self.AnnualEmissions(yr) - WH2.AnnualEmissions(yr) )
def annualCarbonCost(self, vint, UnitCarbonPrice=20): #$20/ton is the default rate for Carbon...if not specified when calling the func
result = {}
for i in range(vint, vint+self.lt):
if self.hasRefrigerant == True:
result[i] = UnitCarbonPrice * (self.AnnualEmissions(i) )
else:
result[i] = UnitCarbonPrice * (self.AnnEmissions(i) )
return result
def averageCarbonCost(self, vint, UnitCarbonPrice=20):
result = {}
result = self.annualCarbonCost(vint, UnitCarbonPrice)
return sum(result.values())/self.lt
def NPVEmissions_Refrigerant(self, yr):
if self.hasRefrigerant == True:
result = 0
RefLeek = self.RefLeaks(yr)
for i in range(yr, yr+self.lt+1):
result = result + RefLeek[i]/(1+DiscRate)**(i-yr+1)
else:
result = 0
return result
def NPVEmissions_Indirect(self, yr):
result = 0
for i in range(yr, yr+self.lt+1):
result = result + self.AnnEmissions(i)/(1+DiscRate)**(i-yr+1)
return result
def NPVEmissions(self, yr): #NPV OF EMISSIONS USED FOR COMPUTING NPV OF CARBONCOST
NPVEm = self.NPVEmissions_Indirect(yr)+ self. NPVEmissions_Refrigerant(yr)
return NPVEm
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def totalCapex(self): #total cost of the stock of vintage yr
return self.OrigNum * self.IC
def NPVCost(self,yr):
NPV = self.IC
for I in range(yr, self.lt+yr):
NPV = NPV + (self.OM[I-yr])/(1+DiscRate)**(I-yr+1)
return NPV
def NPVEngCost(self,yr):
NPV = 0
for I in range(yr, self.lt+yr):
NPV = NPV + (self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def NPVCC(self,vint, CarbonCost= 21): #NPV of carbon cost
return self.NPVEmissions(vint)*CarbonCost
def calcNPV_Capex(self, yr, Capex): #changing capex
NPV = Capex
for I in range(yr,self.lt +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV_LifeTime(self, yr, lifetime): #changing can specify a difff lifetime other than self.lt
NPV = self.IC
for I in range(yr,lifetime +yr):
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def calcNPV(self,yr):#initial fixed capex
NPV = self.IC
for I in range(yr, self.lt+ yr):
# print I, self.OM[I-ThisYear], self.AnnualEngCost(I)
NPV = NPV + (self.OM[I-yr] + self.AnnualEngCost(I))/(1+DiscRate)**(I-yr+1)
return NPV
def annualizedNPV(self,yr):
return self.calcNPV(yr)/self.lt
def lcc(self, yr, UnitCarbonPrice =20 ): #levelized
return (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) )
def Annuallcc(self, yr, UnitCarbonPrice =20 ): #levelized
return ( (self.NPVEmissions(yr)*UnitCarbonPrice + self.calcNPV(yr) ) /self.lt)
def payback(self, WHx,yr):
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
#print "#", X, Y, "#"
if self == WHx:
return 0
elif (X>=0 and Y<=0):
return max(self.lt, WHx.lt)
elif (X<0 and Y>=0):
return 0
else:
return (min(self.lt, WHx.lt, X/(Y) ))
def payback1(self, WHx,yr):
N= 1
maxN = max(self.lt, WHx.lt)
X = WHx.IC - self.IC
Y = (self.OM[0] + self.AnnualEngCost(yr)) - (WHx.OM[0] + WHx.AnnualEngCost(yr))
# print '\n test', X, Y
# if X <= 0 and Y <=0:
# return 0
# else:
while N < maxN and abs(X/Y) >1 :
Y = Y + (self.OM[N] + self.AnnualEngCost(yr+N)) - (WHx.OM[N] + WHx.AnnualEngCost(yr+N))
N = N +1
if N == maxN and X/Y > 1:
return maxN
else:
return N
def AnnualEngUsage(self):
return self.dailyVol* self.IncTemp*self.fuel.unitEng * 365/self.ef
def AnnualEngCost(self, yr):
# if self.fuel == NG:
# print yr, self.fuel.UnitEngCost[inf][yr]
return self.AnnualEngUsage() * self.fuel.UnitEngCost[yr]
def compareEngUsage(self, WH2):
return (self.AnnualEngUsage() - WH2.AnnualEngUsage())
def compareEmissions(self,WH2):
return(self.AnnualEmissions() - WH2.AnnualEmissions())
def CCBreakEven(self, WH2, yr): #breakeven carbon cost
breakeven = (self.calcNPV(yr)/self.lt- WH2.calcNPV(yr)/WH2.lt)/( WH2.NPVEmissions(yr)/WH2.lt - self.NPVEmissions(yr)/self.lt )
return breakeven
def weib(self):
x = range(0, self.lt+UltimYr+1)
w = weibull_min.cdf(x,3,loc=2.5,scale = self.lt) * self.OrigNum
#print w
return(w)
def deadsofar(self, yr):
if yr > self.vintage and yr < self.vintage+ self.lt + UltimYr:
# print yr, self.vintage
return self.weib()[yr-self.vintage]
elif yr >= self.vintage + self.lt + UltimYr:
return self.OrigNum
else:
return 0
def numAlive(self,yr):
return (self.OrigNum - self.deadsofar(yr))
def age(self, yr):
return (yr - self.vintage)
def annualreplacement(self,yr):
# if yr> self.vintage + (self.lt + UltimYr) or yr < self.vintage:
# return 0
# else:
return (max(self.deadsofar(yr)- self.deadsofar(yr-1),0))
class FuelType:
def __init__(self, name,unitEng,UnitEngCost, UnitEmissions):
self.name = name
self.unitEng = unitEng
self.UnitEngCost = UnitEngCost
self.UnitEmissions= UnitEmissions
NG = FuelType("NG", UnitNG , NGCostYrly, NGEmisYrly)
Elec = FuelType("Elec", UnitElec, ElecCostYrly, ElecEmisYrly)
Prop = FuelType("Prop", UnitProp, PropCostYrly, PropEmisYrly)
#for yr in range(ThisYear, EndYear+1):
# print yr, "NGCOST", NGCostYrly['MED'][yr], ElecCostYrly['LOW'][yr], PropCostYrly['LOW'][yr]
#this class is to track the annual 'living' stock of WHs of a particular type, their annual energy and emissions for each
#WH in any year (sum over all vintages)
class WH_Aggregates:
def __init__(self, name):
self.name=name
self.AnnAggStock = {}
self.AnnAggEnergy = {}
self.AnnAggEmissions = {}
Ref1 = Refrigerant(2000, 1, 0.005, 0.1, 0.3)
Ref2 = Refrigerant(675, 1, 0.005, 0.1, 0.3)
Ref3 = Refrigerant(1, 1, 0.005, 0.1, 0.3)
Stck = 100
Time = 2016
NGGG = WaterHeater('NG_WH1', NG, NG0_EF, Time, Stck, NG_LT, NGIC, OM_NG, False)
INGGG = WaterHeater('ING_WH', NG, ING_EF, Time, Stck, ING_LT, INGIC, OM_ING, False)
EWH = WaterHeater('E_WH', Elec, E_EF, Time, Stck, EL_LT, EWHIC, OM_EL, False)
PWH = WaterHeater('Prop_WH', Prop, Prop_EF, Time, Stck, Prop_LT, PropIC, OM_Prop, False)
HPPP = WaterHeater('HP_WH1', Elec, HP1_EF, Time, Stck, HP_LT, HPIC, OM_HP, True, Ref1)
HPP2 = WaterHeater('HP_WH1', Elec, HP2_EF, Time, Stck, HP_LT, HPIC2, OM_HP, True, Ref2)
HPP3 = WaterHeater('HP_WH1', Elec, HP3_EF, Time, Stck, HP_LT, HPIC3, OM_HP, True, Ref3)
#STHER = WaterHeater('ST_El', Elec, E_EF/(1-.6), Time ,Stck, ST_LT, SThCapex, OM_ST, False)
STH = WaterHeater('ST_EL', Elec, E_EF/(1-.6), Time ,Stck, ST_LT, SThERIC, OM_ST, False)
STHHP = WaterHeater('ST_HP1', Elec, HP1_EF/(1-.6), Time ,Stck, ST_LT, SThHPIC, OM_ST, True, Ref1)
#print ".............."
#print "EMissions solar, HP", STHHP.AvgRefLeaks(Time), HPPP.AvgRefLeaks(Time)
#print "EMissions solar, HP", STHHP.AvgRefLeaks(2031), HPPP.AvgRefLeaks(2031)
#print "EMissions solar, HP", STHHP.AvgRefLeaks(2046), HPPP.AvgRefLeaks(2046)
#print "Emis", NGGG.AnnualEmissions(Time), INGGG.AnnualEmissions(Time), HPPP.AnnualEmissions(yr), STH.AnnualEmissions(yr)
#print "Eng Usage NG, EWH, Prop", NGGG.AnnualEngUsage(),EWH.AnnualEngUsage(),PWH.AnnualEngUsage()
#print "Annual Emissions NG, EWH, Prop", NGGG.AnnualEmissions(Time),EWH.AnnualEmissions(Time), PWH.AnnualEmissions(Time)
#print "Ann Emissions NG, EWH, Prop", NGGG.AnnEmissions(Time),EWH.AnnEmissions(Time), PWH.AnnEmissions(Time)
#print "\n"
yr = Time
CarbonCost = 100
#print "TT", HPP3.AnnEmissions(yr), HPP3.AvgRefLeaks(yr), EWH.AnnEmissions(yr)
#print NGGG.AnnualEmissions(yr),HPPP.AnnualEmissions(yr), HPP2.AnnualEmissions(yr), HPP3.AnnualEmissions(yr)
#print "HELLO", EWH.annualizedEmissions(Time),HPPP.annualizedEmissions(Time),HPP2.annualizedEmissions(Time),STH.annualizedEmissions(Time),STHHP.annualizedEmissions(Time)
#print "HEllo Agian", EWH.AnnEmissions(2016),EWH.AvgRefLeaks(2016), HPPP.AnnEmissions(2022), HPPP.AvgRefLeaks(2022),STHHP.AnnEmissions(2016),STHHP.AvgRefLeaks(2016)
#print "++", EWH.AnnEmissions(2016)+ EWH.AvgRefLeaks(2016), HPPP.AnnEmissions(2016)+ HPPP.AvgRefLeaks(2016),HPP2.AnnEmissions(2016)+HPP2.AvgRefLeaks(2016), STHHP.AnnEmissions(2016)+STHHP.AvgRefLeaks(2016)
#print "AnnualizedEmissions", Time,HPPP.annualizedEmissions(Time), EWH.annualizedEmissions(Time), EWH.AnnEmissions(Time),
#print "AnnualEng", Time, NGGG.AnnualEngUsage(),HPPP.AnnualEngUsage(), EWH.AnnualEngUsage()
#print "AnnualCarbonCost", Time, EWH.annualCarbonCost(Time,50), HPPP.annualCarbonCost(Time,50)
#print "Solar", STHHP.deadsofar(2025), STHHP.numAlive(2025)
#print "LCC", Time, NGGG.lcc(Time,0),INGGG.lcc(Time, 0), EWH.lcc(Time,0),HPPP.lcc(Time,0), STHHP.lcc(Time,0)
#print "I payback w.r.t NG", yr, NGGG.payback(INGGG,yr), NGGG.payback(EWH,yr), NGGG.payback(HPPP,yr), NGGG.payback(STH,yr)
#print "II payback w.r.t NG", yr, NGGG.payback1(INGGG,yr), NGGG.payback1(EWH,yr), NGGG.payback1(HPPP,yr), NGGG.payback1(STH,yr)
#print "II payback w.r.t EWH", yr, EWH.payback(NGGG,yr), EWH.payback(INGGG,yr), EWH.payback(HPPP,yr), EWH.payback(STHHP,yr)
#print "III payback w.r.t EWH", yr, HPPP.payback(STHHP,yr), HPPP.payback(EWH,yr)
#print "annualized LCC", Time, NGGG.lcc(Time,0)/NGGG.lt, INGGG.lcc(Time, 0)/ INGGG.lt , EWH.lcc(Time,0)/EWH.lt, HPPP.lcc(Time,0)/HPPP.lt, STH.lcc(Time,0)/STH.lt
#print 'payback', Time, NGGG.payback1(INGGG,Time),INGGG.payback1(NGGG,Time), EWH.payback1(NGGG,Time), NGGG.payback1(EWH, Time), INGGG.payback1(HPPP,Time), EWH.payback1(HPPP,Time), INGGG.payback1(STH,Time),STH.payback1(INGGG,Time)
#print 'payback orig', Time, NGGG.payback(INGGG,Time),INGGG.payback(NGGG,Time), EWH.payback(NGGG,Time), NGGG.payback(EWH,Time), INGGG.payback(HPPP,Time), EWH.payback(HPPP,Time), INGGG.payback(STH,Time), STH.payback(INGGG,Time)
#import matplotlib.pyplot as plt
#from matplotlib.pyplot import *
#import matplotlib.patches as mpatches
#fig = plt.figure(figsize=(10.0, 8.0))
#axes1 = fig.add_subplot(1,1, 1)
#p1 =[]
#p2 = []
#for yr in range (2016,2060):
# print yr, round( NGGG.numAlive(yr),2), round(INGGG.numAlive(yr),2), round(PWH.numAlive(yr),2), round(HPPP.numAlive(yr),2)
# plt.hold(True)
# s1 = axes1.scatter(yr-2016, INGGG.numAlive(yr), color = 'r')
# s1 = axes1.scatter(yr-2016, HPPP.numAlive(yr), color = 'g')
# s1 = axes1.scatter(yr-2016, STH.numAlive(yr), color = 'b')
# p1.append([s1])
#axes1.legend([mpatches.Patch(color='r'), mpatches.Patch(color='g'), mpatches.Patch(color = 'b')], ['Instantaneous NG','HeatPump/NG Storage', 'SolarThermal'], loc = 1, fontsize = 10 )
#axes1.axis([0,40, 0, 110])
#fig.tight_layout()
#plt.show()
| AvgRefLeaks | identifier_name |
indexed_set.rs | // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use array_vec::ArrayVec;
use std::fmt;
use std::iter;
use std::marker::PhantomData;
use std::mem;
use std::slice;
use bitslice::{BitSlice, Word};
use bitslice::{bitwise, Union, Subtract, Intersect};
use indexed_vec::Idx;
use rustc_serialize;
/// This is implemented by all the index sets so that IdxSet::union() can be
/// passed any type of index set.
pub trait UnionIntoIdxSet<T: Idx> {
// Performs `other = other | self`.
fn union_into(&self, other: &mut IdxSet<T>) -> bool;
}
/// This is implemented by all the index sets so that IdxSet::subtract() can be
/// passed any type of index set.
pub trait SubtractFromIdxSet<T: Idx> {
// Performs `other = other - self`.
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool;
}
/// Represents a set of some element type E, where each E is identified by some
/// unique index type `T`.
///
/// In other words, `T` is the type used to index into the bitvector
/// this type uses to represent the set of object it holds.
///
/// The representation is dense, using one bit per possible element.
#[derive(Eq, PartialEq)]
pub struct IdxSet<T: Idx> {
_pd: PhantomData<fn(&T)>,
bits: Vec<Word>,
}
impl<T: Idx> Clone for IdxSet<T> {
fn clone(&self) -> Self {
IdxSet { _pd: PhantomData, bits: self.bits.clone() }
}
}
impl<T: Idx> rustc_serialize::Encodable for IdxSet<T> {
fn encode<E: rustc_serialize::Encoder>(&self,
encoder: &mut E)
-> Result<(), E::Error> {
self.bits.encode(encoder)
}
}
impl<T: Idx> rustc_serialize::Decodable for IdxSet<T> {
fn decode<D: rustc_serialize::Decoder>(d: &mut D) -> Result<IdxSet<T>, D::Error> {
let words: Vec<Word> = rustc_serialize::Decodable::decode(d)?;
Ok(IdxSet {
_pd: PhantomData,
bits: words,
})
}
}
const BITS_PER_WORD: usize = mem::size_of::<Word>() * 8;
impl<T: Idx> fmt::Debug for IdxSet<T> {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
w.debug_list()
.entries(self.iter())
.finish()
}
}
impl<T: Idx> IdxSet<T> {
fn new(init: Word, domain_size: usize) -> Self {
let num_words = (domain_size + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
IdxSet {
_pd: Default::default(),
bits: vec![init; num_words],
}
}
/// Creates set holding every element whose index falls in range 0..domain_size.
pub fn new_filled(domain_size: usize) -> Self {
let mut result = Self::new(!0, domain_size);
result.trim_to(domain_size);
result
}
/// Creates set holding no elements.
pub fn new_empty(domain_size: usize) -> Self {
Self::new(0, domain_size)
}
/// Duplicates as a hybrid set.
pub fn to_hybrid(&self) -> HybridIdxSet<T> {
// This domain_size may be slightly larger than the one specified
// upon creation, due to rounding up to a whole word. That's ok.
let domain_size = self.bits.len() * BITS_PER_WORD;
// Note: we currently don't bother trying to make a Sparse set.
HybridIdxSet::Dense(self.to_owned(), domain_size)
}
/// Removes all elements
pub fn clear(&mut self) {
for b in &mut self.bits {
*b = 0;
}
}
/// Sets all elements up to `domain_size`
pub fn set_up_to(&mut self, domain_size: usize) {
for b in &mut self.bits {
*b = !0;
}
self.trim_to(domain_size);
}
/// Clear all elements above `domain_size`.
fn trim_to(&mut self, domain_size: usize) {
// `trim_block` is the first block where some bits have
// to be cleared.
let trim_block = domain_size / BITS_PER_WORD;
// all the blocks above it have to be completely cleared.
if trim_block < self.bits.len() {
for b in &mut self.bits[trim_block+1..] {
*b = 0;
}
// at that block, the `domain_size % BITS_PER_WORD` LSBs
// should remain.
let remaining_bits = domain_size % BITS_PER_WORD;
let mask = (1<<remaining_bits)-1;
self.bits[trim_block] &= mask;
}
}
/// Removes `elem` from the set `self`; returns true iff this changed `self`.
pub fn remove(&mut self, elem: &T) -> bool {
self.bits.clear_bit(elem.index())
}
/// Adds `elem` to the set `self`; returns true iff this changed `self`.
pub fn add(&mut self, elem: &T) -> bool {
self.bits.set_bit(elem.index())
}
/// Returns true iff set `self` contains `elem`.
pub fn contains(&self, elem: &T) -> bool {
self.bits.get_bit(elem.index())
}
pub fn words(&self) -> &[Word] {
&self.bits
}
pub fn words_mut(&mut self) -> &mut [Word] {
&mut self.bits
}
/// Efficiently overwrite `self` with `other`. Panics if `self` and `other`
/// don't have the same length.
pub fn overwrite(&mut self, other: &IdxSet<T>) {
self.words_mut().clone_from_slice(other.words());
}
/// Set `self = self | other` and return true if `self` changed
/// (i.e., if new bits were added).
pub fn union(&mut self, other: &impl UnionIntoIdxSet<T>) -> bool {
other.union_into(self)
}
/// Set `self = self - other` and return true if `self` changed.
/// (i.e., if any bits were removed).
pub fn subtract(&mut self, other: &impl SubtractFromIdxSet<T>) -> bool {
other.subtract_from(self)
}
/// Set `self = self & other` and return true if `self` changed.
/// (i.e., if any bits were removed).
pub fn intersect(&mut self, other: &IdxSet<T>) -> bool {
bitwise(self.words_mut(), other.words(), &Intersect)
}
pub fn iter(&self) -> Iter<T> {
Iter {
cur: None,
iter: self.words().iter().enumerate(),
_pd: PhantomData,
}
}
}
impl<T: Idx> UnionIntoIdxSet<T> for IdxSet<T> {
fn union_into(&self, other: &mut IdxSet<T>) -> bool {
bitwise(other.words_mut(), self.words(), &Union)
}
}
impl<T: Idx> SubtractFromIdxSet<T> for IdxSet<T> {
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool {
bitwise(other.words_mut(), self.words(), &Subtract)
}
}
pub struct Iter<'a, T: Idx> {
cur: Option<(Word, usize)>,
iter: iter::Enumerate<slice::Iter<'a, Word>>,
_pd: PhantomData<fn(&T)>,
}
impl<'a, T: Idx> Iterator for Iter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
loop {
if let Some((ref mut word, offset)) = self.cur {
let bit_pos = word.trailing_zeros() as usize;
if bit_pos != BITS_PER_WORD {
let bit = 1 << bit_pos;
*word ^= bit;
return Some(T::new(bit_pos + offset))
}
}
let (i, word) = self.iter.next()?;
self.cur = Some((*word, BITS_PER_WORD * i));
}
}
}
const SPARSE_MAX: usize = 8;
/// A sparse index set with a maximum of SPARSE_MAX elements. Used by
/// HybridIdxSet; do not use directly.
///
/// The elements are stored as an unsorted vector with no duplicates.
#[derive(Clone, Debug)]
pub struct SparseIdxSet<T: Idx>(ArrayVec<[T; SPARSE_MAX]>);
impl<T: Idx> SparseIdxSet<T> {
fn new() -> Self {
SparseIdxSet(ArrayVec::new())
}
fn len(&self) -> usize {
self.0.len()
}
fn contains(&self, elem: &T) -> bool {
self.0.contains(elem)
}
fn add(&mut self, elem: &T) -> bool {
// Ensure there are no duplicates.
if self.0.contains(elem) {
false
} else {
self.0.push(*elem);
true
}
}
fn remove(&mut self, elem: &T) -> bool {
if let Some(i) = self.0.iter().position(|e| e == elem) {
// Swap the found element to the end, then pop it.
let len = self.0.len();
self.0.swap(i, len - 1);
self.0.pop();
true
} else {
false
}
}
fn to_dense(&self, domain_size: usize) -> IdxSet<T> {
let mut dense = IdxSet::new_empty(domain_size);
for elem in self.0.iter() {
dense.add(elem);
}
dense
}
fn iter(&self) -> SparseIter<T> {
SparseIter {
iter: self.0.iter(),
}
}
}
impl<T: Idx> UnionIntoIdxSet<T> for SparseIdxSet<T> {
fn union_into(&self, other: &mut IdxSet<T>) -> bool {
let mut changed = false;
for elem in self.iter() {
changed |= other.add(&elem);
}
changed
}
}
impl<T: Idx> SubtractFromIdxSet<T> for SparseIdxSet<T> {
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool {
let mut changed = false;
for elem in self.iter() {
changed |= other.remove(&elem);
}
changed
}
}
pub struct SparseIter<'a, T: Idx> {
iter: slice::Iter<'a, T>,
}
impl<'a, T: Idx> Iterator for SparseIter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
self.iter.next().map(|e| *e)
}
}
/// Like IdxSet, but with a hybrid representation: sparse when there are few
/// elements in the set, but dense when there are many. It's especially
/// efficient for sets that typically have a small number of elements, but a
/// large `domain_size`, and are cleared frequently.
#[derive(Clone, Debug)]
pub enum HybridIdxSet<T: Idx> {
Sparse(SparseIdxSet<T>, usize),
Dense(IdxSet<T>, usize),
}
impl<T: Idx> HybridIdxSet<T> {
pub fn | (domain_size: usize) -> Self {
HybridIdxSet::Sparse(SparseIdxSet::new(), domain_size)
}
pub fn clear(&mut self) {
let domain_size = match *self {
HybridIdxSet::Sparse(_, size) => size,
HybridIdxSet::Dense(_, size) => size,
};
*self = HybridIdxSet::new_empty(domain_size);
}
/// Returns true iff set `self` contains `elem`.
pub fn contains(&self, elem: &T) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.contains(elem),
HybridIdxSet::Dense(dense, _) => dense.contains(elem),
}
}
/// Adds `elem` to the set `self`.
pub fn add(&mut self, elem: &T) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) if sparse.len() < SPARSE_MAX => {
// The set is sparse and has space for `elem`.
sparse.add(elem)
}
HybridIdxSet::Sparse(sparse, _) if sparse.contains(elem) => {
// The set is sparse and does not have space for `elem`, but
// that doesn't matter because `elem` is already present.
false
}
HybridIdxSet::Sparse(_, _) => {
// The set is sparse and full. Convert to a dense set.
//
// FIXME: This code is awful, but I can't work out how else to
// appease the borrow checker.
let dummy = HybridIdxSet::Sparse(SparseIdxSet::new(), 0);
match mem::replace(self, dummy) {
HybridIdxSet::Sparse(sparse, domain_size) => {
let mut dense = sparse.to_dense(domain_size);
let changed = dense.add(elem);
assert!(changed);
mem::replace(self, HybridIdxSet::Dense(dense, domain_size));
changed
}
_ => panic!("impossible"),
}
}
HybridIdxSet::Dense(dense, _) => dense.add(elem),
}
}
/// Removes `elem` from the set `self`.
pub fn remove(&mut self, elem: &T) -> bool {
// Note: we currently don't bother going from Dense back to Sparse.
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.remove(elem),
HybridIdxSet::Dense(dense, _) => dense.remove(elem),
}
}
/// Converts to a dense set, consuming itself in the process.
pub fn to_dense(self) -> IdxSet<T> {
match self {
HybridIdxSet::Sparse(sparse, domain_size) => sparse.to_dense(domain_size),
HybridIdxSet::Dense(dense, _) => dense,
}
}
/// Iteration order is unspecified.
pub fn iter(&self) -> HybridIter<T> {
match self {
HybridIdxSet::Sparse(sparse, _) => HybridIter::Sparse(sparse.iter()),
HybridIdxSet::Dense(dense, _) => HybridIter::Dense(dense.iter()),
}
}
}
impl<T: Idx> UnionIntoIdxSet<T> for HybridIdxSet<T> {
fn union_into(&self, other: &mut IdxSet<T>) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.union_into(other),
HybridIdxSet::Dense(dense, _) => dense.union_into(other),
}
}
}
impl<T: Idx> SubtractFromIdxSet<T> for HybridIdxSet<T> {
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.subtract_from(other),
HybridIdxSet::Dense(dense, _) => dense.subtract_from(other),
}
}
}
pub enum HybridIter<'a, T: Idx> {
Sparse(SparseIter<'a, T>),
Dense(Iter<'a, T>),
}
impl<'a, T: Idx> Iterator for HybridIter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
match self {
HybridIter::Sparse(sparse) => sparse.next(),
HybridIter::Dense(dense) => dense.next(),
}
}
}
#[test]
fn test_trim_to() {
use std::cmp;
for i in 0..256 {
let mut idx_buf: IdxSet<usize> = IdxSet::new_filled(128);
idx_buf.trim_to(i);
let elems: Vec<usize> = idx_buf.iter().collect();
let expected: Vec<usize> = (0..cmp::min(i, 128)).collect();
assert_eq!(elems, expected);
}
}
#[test]
fn test_set_up_to() {
for i in 0..128 {
for mut idx_buf in
vec![IdxSet::new_empty(128), IdxSet::new_filled(128)]
.into_iter()
{
idx_buf.set_up_to(i);
let elems: Vec<usize> = idx_buf.iter().collect();
let expected: Vec<usize> = (0..i).collect();
assert_eq!(elems, expected);
}
}
}
#[test]
fn test_new_filled() {
for i in 0..128 {
let idx_buf = IdxSet::new_filled(i);
let elems: Vec<usize> = idx_buf.iter().collect();
let expected: Vec<usize> = (0..i).collect();
assert_eq!(elems, expected);
}
}
| new_empty | identifier_name |
indexed_set.rs | // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use array_vec::ArrayVec;
use std::fmt;
use std::iter;
use std::marker::PhantomData;
use std::mem;
use std::slice;
use bitslice::{BitSlice, Word};
use bitslice::{bitwise, Union, Subtract, Intersect};
use indexed_vec::Idx;
use rustc_serialize;
/// This is implemented by all the index sets so that IdxSet::union() can be
/// passed any type of index set.
pub trait UnionIntoIdxSet<T: Idx> {
// Performs `other = other | self`.
fn union_into(&self, other: &mut IdxSet<T>) -> bool;
}
/// This is implemented by all the index sets so that IdxSet::subtract() can be
/// passed any type of index set.
pub trait SubtractFromIdxSet<T: Idx> {
// Performs `other = other - self`.
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool;
}
/// Represents a set of some element type E, where each E is identified by some
/// unique index type `T`.
///
/// In other words, `T` is the type used to index into the bitvector
/// this type uses to represent the set of object it holds.
///
/// The representation is dense, using one bit per possible element.
#[derive(Eq, PartialEq)]
pub struct IdxSet<T: Idx> {
_pd: PhantomData<fn(&T)>,
bits: Vec<Word>,
}
impl<T: Idx> Clone for IdxSet<T> {
fn clone(&self) -> Self {
IdxSet { _pd: PhantomData, bits: self.bits.clone() }
}
}
impl<T: Idx> rustc_serialize::Encodable for IdxSet<T> {
fn encode<E: rustc_serialize::Encoder>(&self,
encoder: &mut E)
-> Result<(), E::Error> {
self.bits.encode(encoder)
}
}
impl<T: Idx> rustc_serialize::Decodable for IdxSet<T> {
fn decode<D: rustc_serialize::Decoder>(d: &mut D) -> Result<IdxSet<T>, D::Error> {
let words: Vec<Word> = rustc_serialize::Decodable::decode(d)?;
Ok(IdxSet {
_pd: PhantomData,
bits: words,
})
}
}
const BITS_PER_WORD: usize = mem::size_of::<Word>() * 8;
impl<T: Idx> fmt::Debug for IdxSet<T> {
fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result {
w.debug_list()
.entries(self.iter())
.finish()
}
}
impl<T: Idx> IdxSet<T> {
fn new(init: Word, domain_size: usize) -> Self {
let num_words = (domain_size + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
IdxSet {
_pd: Default::default(),
bits: vec![init; num_words],
}
}
/// Creates set holding every element whose index falls in range 0..domain_size.
pub fn new_filled(domain_size: usize) -> Self {
let mut result = Self::new(!0, domain_size);
result.trim_to(domain_size);
result
}
/// Creates set holding no elements.
pub fn new_empty(domain_size: usize) -> Self {
Self::new(0, domain_size)
}
/// Duplicates as a hybrid set.
pub fn to_hybrid(&self) -> HybridIdxSet<T> {
// This domain_size may be slightly larger than the one specified
// upon creation, due to rounding up to a whole word. That's ok.
let domain_size = self.bits.len() * BITS_PER_WORD;
// Note: we currently don't bother trying to make a Sparse set.
HybridIdxSet::Dense(self.to_owned(), domain_size)
}
/// Removes all elements
pub fn clear(&mut self) {
for b in &mut self.bits {
*b = 0;
}
}
/// Sets all elements up to `domain_size`
pub fn set_up_to(&mut self, domain_size: usize) {
for b in &mut self.bits {
*b = !0;
}
self.trim_to(domain_size);
}
/// Clear all elements above `domain_size`.
fn trim_to(&mut self, domain_size: usize) {
// `trim_block` is the first block where some bits have
// to be cleared.
let trim_block = domain_size / BITS_PER_WORD;
// all the blocks above it have to be completely cleared.
if trim_block < self.bits.len() {
for b in &mut self.bits[trim_block+1..] {
*b = 0;
}
// at that block, the `domain_size % BITS_PER_WORD` LSBs
// should remain.
let remaining_bits = domain_size % BITS_PER_WORD;
let mask = (1<<remaining_bits)-1;
self.bits[trim_block] &= mask;
}
}
/// Removes `elem` from the set `self`; returns true iff this changed `self`.
pub fn remove(&mut self, elem: &T) -> bool {
self.bits.clear_bit(elem.index())
}
/// Adds `elem` to the set `self`; returns true iff this changed `self`.
pub fn add(&mut self, elem: &T) -> bool {
self.bits.set_bit(elem.index())
}
/// Returns true iff set `self` contains `elem`.
pub fn contains(&self, elem: &T) -> bool {
self.bits.get_bit(elem.index())
}
pub fn words(&self) -> &[Word] {
&self.bits
}
pub fn words_mut(&mut self) -> &mut [Word] {
&mut self.bits
}
/// Efficiently overwrite `self` with `other`. Panics if `self` and `other`
/// don't have the same length.
pub fn overwrite(&mut self, other: &IdxSet<T>) {
self.words_mut().clone_from_slice(other.words());
}
/// Set `self = self | other` and return true if `self` changed
/// (i.e., if new bits were added).
pub fn union(&mut self, other: &impl UnionIntoIdxSet<T>) -> bool {
other.union_into(self)
}
/// Set `self = self - other` and return true if `self` changed.
/// (i.e., if any bits were removed).
pub fn subtract(&mut self, other: &impl SubtractFromIdxSet<T>) -> bool {
other.subtract_from(self)
}
/// Set `self = self & other` and return true if `self` changed.
/// (i.e., if any bits were removed).
pub fn intersect(&mut self, other: &IdxSet<T>) -> bool {
bitwise(self.words_mut(), other.words(), &Intersect)
}
pub fn iter(&self) -> Iter<T> {
Iter {
cur: None,
iter: self.words().iter().enumerate(),
_pd: PhantomData,
}
}
}
impl<T: Idx> UnionIntoIdxSet<T> for IdxSet<T> {
fn union_into(&self, other: &mut IdxSet<T>) -> bool {
bitwise(other.words_mut(), self.words(), &Union)
}
}
impl<T: Idx> SubtractFromIdxSet<T> for IdxSet<T> {
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool {
bitwise(other.words_mut(), self.words(), &Subtract)
}
}
pub struct Iter<'a, T: Idx> {
cur: Option<(Word, usize)>,
iter: iter::Enumerate<slice::Iter<'a, Word>>,
_pd: PhantomData<fn(&T)>,
}
impl<'a, T: Idx> Iterator for Iter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
loop {
if let Some((ref mut word, offset)) = self.cur {
let bit_pos = word.trailing_zeros() as usize;
if bit_pos != BITS_PER_WORD {
let bit = 1 << bit_pos;
*word ^= bit;
return Some(T::new(bit_pos + offset))
}
}
let (i, word) = self.iter.next()?;
self.cur = Some((*word, BITS_PER_WORD * i));
}
}
}
const SPARSE_MAX: usize = 8;
/// A sparse index set with a maximum of SPARSE_MAX elements. Used by
/// HybridIdxSet; do not use directly.
///
/// The elements are stored as an unsorted vector with no duplicates.
#[derive(Clone, Debug)]
pub struct SparseIdxSet<T: Idx>(ArrayVec<[T; SPARSE_MAX]>);
impl<T: Idx> SparseIdxSet<T> {
fn new() -> Self {
SparseIdxSet(ArrayVec::new())
}
fn len(&self) -> usize {
self.0.len()
}
fn contains(&self, elem: &T) -> bool {
self.0.contains(elem)
}
fn add(&mut self, elem: &T) -> bool {
// Ensure there are no duplicates.
if self.0.contains(elem) {
false
} else {
self.0.push(*elem);
true
}
}
fn remove(&mut self, elem: &T) -> bool {
if let Some(i) = self.0.iter().position(|e| e == elem) {
// Swap the found element to the end, then pop it.
let len = self.0.len();
self.0.swap(i, len - 1);
self.0.pop();
true
} else {
false
}
}
fn to_dense(&self, domain_size: usize) -> IdxSet<T> {
let mut dense = IdxSet::new_empty(domain_size);
for elem in self.0.iter() {
dense.add(elem);
}
dense
}
fn iter(&self) -> SparseIter<T> {
SparseIter {
iter: self.0.iter(),
}
}
}
impl<T: Idx> UnionIntoIdxSet<T> for SparseIdxSet<T> {
fn union_into(&self, other: &mut IdxSet<T>) -> bool {
let mut changed = false;
for elem in self.iter() {
changed |= other.add(&elem);
}
changed
}
}
impl<T: Idx> SubtractFromIdxSet<T> for SparseIdxSet<T> {
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool {
let mut changed = false;
for elem in self.iter() {
changed |= other.remove(&elem);
}
changed
}
}
pub struct SparseIter<'a, T: Idx> {
iter: slice::Iter<'a, T>,
}
impl<'a, T: Idx> Iterator for SparseIter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
self.iter.next().map(|e| *e)
}
}
/// Like IdxSet, but with a hybrid representation: sparse when there are few
/// elements in the set, but dense when there are many. It's especially
/// efficient for sets that typically have a small number of elements, but a
/// large `domain_size`, and are cleared frequently.
#[derive(Clone, Debug)]
pub enum HybridIdxSet<T: Idx> {
Sparse(SparseIdxSet<T>, usize),
Dense(IdxSet<T>, usize),
}
impl<T: Idx> HybridIdxSet<T> {
pub fn new_empty(domain_size: usize) -> Self {
HybridIdxSet::Sparse(SparseIdxSet::new(), domain_size)
}
pub fn clear(&mut self) {
let domain_size = match *self {
HybridIdxSet::Sparse(_, size) => size,
HybridIdxSet::Dense(_, size) => size,
};
*self = HybridIdxSet::new_empty(domain_size);
}
/// Returns true iff set `self` contains `elem`.
pub fn contains(&self, elem: &T) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.contains(elem),
HybridIdxSet::Dense(dense, _) => dense.contains(elem),
}
}
/// Adds `elem` to the set `self`.
pub fn add(&mut self, elem: &T) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) if sparse.len() < SPARSE_MAX => {
// The set is sparse and has space for `elem`.
sparse.add(elem)
}
HybridIdxSet::Sparse(sparse, _) if sparse.contains(elem) => {
// The set is sparse and does not have space for `elem`, but
// that doesn't matter because `elem` is already present.
false
}
HybridIdxSet::Sparse(_, _) => {
// The set is sparse and full. Convert to a dense set.
//
// FIXME: This code is awful, but I can't work out how else to
// appease the borrow checker.
let dummy = HybridIdxSet::Sparse(SparseIdxSet::new(), 0);
match mem::replace(self, dummy) {
HybridIdxSet::Sparse(sparse, domain_size) => {
let mut dense = sparse.to_dense(domain_size);
let changed = dense.add(elem);
assert!(changed);
mem::replace(self, HybridIdxSet::Dense(dense, domain_size));
changed
}
_ => panic!("impossible"),
}
}
HybridIdxSet::Dense(dense, _) => dense.add(elem),
}
}
/// Removes `elem` from the set `self`.
pub fn remove(&mut self, elem: &T) -> bool {
// Note: we currently don't bother going from Dense back to Sparse.
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.remove(elem),
HybridIdxSet::Dense(dense, _) => dense.remove(elem),
}
}
/// Converts to a dense set, consuming itself in the process.
pub fn to_dense(self) -> IdxSet<T> {
match self {
HybridIdxSet::Sparse(sparse, domain_size) => sparse.to_dense(domain_size),
HybridIdxSet::Dense(dense, _) => dense,
}
}
/// Iteration order is unspecified.
pub fn iter(&self) -> HybridIter<T> {
match self {
HybridIdxSet::Sparse(sparse, _) => HybridIter::Sparse(sparse.iter()),
HybridIdxSet::Dense(dense, _) => HybridIter::Dense(dense.iter()),
}
}
}
impl<T: Idx> UnionIntoIdxSet<T> for HybridIdxSet<T> {
fn union_into(&self, other: &mut IdxSet<T>) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.union_into(other),
HybridIdxSet::Dense(dense, _) => dense.union_into(other),
}
}
}
impl<T: Idx> SubtractFromIdxSet<T> for HybridIdxSet<T> {
fn subtract_from(&self, other: &mut IdxSet<T>) -> bool {
match self {
HybridIdxSet::Sparse(sparse, _) => sparse.subtract_from(other),
HybridIdxSet::Dense(dense, _) => dense.subtract_from(other),
}
}
}
pub enum HybridIter<'a, T: Idx> {
Sparse(SparseIter<'a, T>),
Dense(Iter<'a, T>),
}
impl<'a, T: Idx> Iterator for HybridIter<'a, T> {
type Item = T;
fn next(&mut self) -> Option<T> {
match self {
HybridIter::Sparse(sparse) => sparse.next(),
HybridIter::Dense(dense) => dense.next(),
}
}
}
#[test]
fn test_trim_to() {
use std::cmp;
for i in 0..256 {
let mut idx_buf: IdxSet<usize> = IdxSet::new_filled(128);
idx_buf.trim_to(i);
let elems: Vec<usize> = idx_buf.iter().collect();
let expected: Vec<usize> = (0..cmp::min(i, 128)).collect();
assert_eq!(elems, expected);
}
}
#[test]
fn test_set_up_to() {
for i in 0..128 {
for mut idx_buf in
vec![IdxSet::new_empty(128), IdxSet::new_filled(128)]
.into_iter()
{
idx_buf.set_up_to(i);
let elems: Vec<usize> = idx_buf.iter().collect();
let expected: Vec<usize> = (0..i).collect(); |
#[test]
fn test_new_filled() {
for i in 0..128 {
let idx_buf = IdxSet::new_filled(i);
let elems: Vec<usize> = idx_buf.iter().collect();
let expected: Vec<usize> = (0..i).collect();
assert_eq!(elems, expected);
}
} | assert_eq!(elems, expected);
}
}
} | random_line_split |
source_contributions.rs | extern crate clap;
extern crate csv;
extern crate reqwest;
extern crate serde;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use csv::StringRecord;
use reqwest::{Client, Url};
use serde::de::DeserializeOwned;
use serde::Deserialize;
use std::{thread, time};
use std::collections::HashSet;
use std::env;
use std::fs::{File, OpenOptions};
use std::io::Write;
enum HttpMethod {
Get,
}
#[derive(Debug, Fail)]
enum AppError {
// Returned when we couldn't extract an owner and a repo from the repository URL.
#[fail(display = "Couldn't extract project metadata for {}", repo_url)]
MetadataExtractionFailed { repo_url: String },
// Returned in case of generic I/O error.
#[fail(display = "i/o error when reading/writing on the CSV file {}", _0)]
IOError(std::io::Error),
// Returned when the OSRANK_GITHUB_TOKEN is not present as an env var.
#[fail(display = "Couldn't find OSRANK_GITHUB_TOKEN in your env vars: {}", _0)]
GithubTokenNotFound(std::env::VarError),
// Returned when we failed to issue the HTTP request.
#[fail(display = "Request to Github failed: {}", _0)]
GithubAPIRequestFailed(reqwest::Error),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "Github returned non-200 {} with body {}", _0, _1)]
GithubAPINotOK(reqwest::StatusCode, String),
// Returned when the parsing of the http URL to query Github failed.
#[fail(display = "Github URL failed parsing into a valid HTTP URL: {}", _0)]
GithubUrlParsingFailed(reqwest::UrlError),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "Couldn't deserialise the JSON returned by Github: {}", _0)]
DeserialisationFailure(reqwest::Error),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "No more retries.")]
NoRetriesLeft,
}
impl From<std::io::Error> for AppError {
fn from(err: std::io::Error) -> AppError {
AppError::IOError(err)
}
}
impl From<std::env::VarError> for AppError {
fn from(err: std::env::VarError) -> AppError {
AppError::GithubTokenNotFound(err)
}
}
impl From<reqwest::Error> for AppError {
fn from(err: reqwest::Error) -> AppError {
AppError::GithubAPIRequestFailed(err)
}
}
impl From<reqwest::UrlError> for AppError {
fn from(err: reqwest::UrlError) -> AppError {
AppError::GithubUrlParsingFailed(err)
}
}
// The order of the fields must be the same of the input file.
#[derive(Debug)]
struct Project<'a> {
id: u32,
platform: &'a str,
project_name: &'a str,
repository_url: &'a str,
repository_fork: bool,
repository_display_name: &'a str,
}
struct Retries {
retries_num: u8,
}
impl Retries {
fn new(retries_num: u8) -> Self {
Retries { retries_num }
}
}
#[derive(Debug, Deserialize)]
struct GithubContribution {
total: u64,
author: GithubUser,
weeks: Vec<GithubWeek>,
}
#[derive(Debug, Deserialize)]
struct GithubWeek {
// Unix timestamp of the beginning of this week.
w: u64,
}
#[derive(Debug, Deserialize)]
struct GithubUser {
login: String,
id: u64,
}
type UniqueProjects = HashSet<String>;
/// Calls the Github API using the given HttpMethod and url_path. Due to the
/// fact some endpoints like the statistics one use cached information and
/// might return a 202 with an empty JSON as the stats are computed, we need
/// to wait a little bit and retry, up to a certain number of times.
fn call_github<T>(
http_client: &Client,
http_method: HttpMethod,
token: &str,
url_path: &str,
retries: Retries,
) -> Result<T, AppError>
where
T: DeserializeOwned,
{
let retries_left = retries.retries_num;
if retries_left == 0 {
Err(AppError::NoRetriesLeft)
} else {
let bearer = format!("Bearer {}", token);
match http_method {
HttpMethod::Get => {
let url: Url = format!("{}{}", GITHUB_BASE_URL, url_path)
.as_str()
.parse()?;
let mut res = http_client
.get(url)
.header(reqwest::header::AUTHORIZATION, bearer.as_str())
.send()?;
match res.status() {
reqwest::StatusCode::OK => res
.json()
.or_else(|e| Err(AppError::DeserialisationFailure(e))),
// Github needs a bit more time to compute the stats.
// We retry.
reqwest::StatusCode::ACCEPTED => {
println!("Retrying, only {} retries left ...", retries_left);
thread::sleep(time::Duration::from_secs(1));
call_github(
http_client,
http_method,
token,
url_path,
Retries::new(retries_left - 1),
)
}
err => {
let body = res.text()?;
Err(AppError::GithubAPINotOK(err, body))
}
}
}
}
}
}
fn deserialise_project(sr: &StringRecord) -> Option<Project> {
if let Some(Ok(pid)) = sr.get(0).map(|s: &str| s.parse::<u32>()) {
let platform = sr.get(1);
let project_name = sr.get(2);
let repository_url = sr.get(9);
let repository_fork = sr.get(24).and_then(|s: &str| match s {
"0" => Some(false),
"1" => Some(true),
"t" => Some(true),
"f" => Some(false),
"true" => Some(true),
"false" => Some(false),
_ => None,
});
let repository_display_name = sr.get(54);
match (
platform,
project_name,
repository_url,
repository_fork,
repository_display_name,
) {
(Some(pl), Some(pn), Some(ru), Some(rf), Some(dn)) => Some(Project {
id: pid,
platform: pl,
project_name: pn,
repository_url: ru,
repository_fork: rf,
repository_display_name: dn,
}),
_ => None,
}
} else {
None
}
}
fn source_contributors(
github_token: &str,
path: &str,
platform: &str,
resume_from: Option<&str>,
) -> Result<(), AppError> {
let projects_file = File::open(path)?;
// Build the CSV reader and iterate over each record.
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(projects_file);
let mut contributions = OpenOptions::new()
.append(resume_from.is_some())
.write(resume_from.is_none())
.create_new(resume_from.is_none()) // Allow re-opening if we need to resume.
.open(format!("data/{}_contributions.csv", platform.to_lowercase()).as_str())?;
let mut unique_projects = HashSet::new();
let http_client = reqwest::Client::new();
let mut skip_resumed_record = resume_from.is_some();
//Write the header (if we are not resuming)
if resume_from.is_none() {
contributions.write_all(b"ID,MAINTAINER,REPO,CONTRIBUTIONS,NAME\n")?;
}
for result in rdr
.records()
.filter_map(|e| e.ok())
.filter(by_platform(platform))
.skip_while(resumes(resume_from))
{
// As we cannot know which is the /next/ element we need to process
// and we are resuming from the last (known) one, we need to skip it
// in order to not create a dupe.
if skip_resumed_record {
skip_resumed_record = false;
continue;
}
if let Some(project) = deserialise_project(&result) {
extract_contribution(
&http_client,
&mut contributions,
&mut unique_projects,
project,
github_token,
)?;
}
}
Ok(())
}
fn | (repo_url: &str) -> Result<(&str, &str), AppError> {
match repo_url.split('/').collect::<Vec<&str>>().as_slice() {
[_, "", "github.com", owner, repo] => Ok((owner, repo)),
_ => Err(AppError::MetadataExtractionFailed {
repo_url: repo_url.to_string(),
}),
}
}
// Extract the contribution relative to this project. For now only GitHub is
// supported.
fn extract_contribution(
http_client: &Client,
contributions: &mut File,
unique_projects: &mut UniqueProjects,
project: Project,
auth_token: &str,
) -> Result<(), AppError> {
// If this is an authentic project and not a fork, proceed.
if !project.repository_fork && unique_projects.get(project.project_name) == None {
match extract_github_owner_and_repo(project.repository_url) {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
Ok(())
}
Ok((owner, name)) => {
unique_projects.insert(String::from(project.project_name));
println!("Processing {} ({}/{})", project.project_name, owner, name);
let res: Result<Vec<GithubContribution>, AppError> = call_github(
&http_client,
HttpMethod::Get,
auth_token,
format!("/repos/{}/{}/stats/contributors", owner, name).as_str(),
Retries::new(5),
);
match res {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
}
Ok(stats) => {
let stats_len = stats.len();
for contribution in stats {
if is_maintainer(&owner, &contribution, stats_len) {
contributions.write_all(
format!(
"{},github@{},{},{},{}\n",
project.id,
contribution.author.login,
project.repository_url,
contribution.total,
project.project_name
)
.as_bytes(),
)?;
}
}
}
}
// Wait 800 ms to not overload Github and not hit the quota limit.
// GH allows us 5000 requests per hour. If we wait 800ms, we
// aim for the theoretical limit, while preserving a certain
// slack.
let delay = time::Duration::from_millis(800);
thread::sleep(delay);
Ok(())
}
}
} else {
Ok(())
}
}
// FIXME(adn) Totally arbitrary choice: consider a maintainer
// for a project a user that has been contributed for more
// than 6 months. Furthermore, it needs to have a somewhat steady contribution
// history.
fn is_maintainer(owner: &str, stat: &GithubContribution, stats_len: usize) -> bool {
// Users are considered a contributor if one of the following occur:
// 1. The owner of the repo is equal to their username;
// 2. They have at least 50 contributions
// 3. They are the only contributor to the repo.
stat.author.login == owner || { stat.total > 50 } || stats_len as u32 == 1
}
fn by_platform<'a>(platform: &'a str) -> Box<dyn FnMut(&StringRecord) -> bool + 'a> {
Box::new(move |e| e[1] == *platform)
}
// Returns false if the user didn't ask to resume the process from a particular
// project URL. If the user supplied a project, it skips StringRecord entries
// until it matches the input URL.
fn resumes<'a>(resume_from: Option<&'a str>) -> Box<dyn FnMut(&StringRecord) -> bool + 'a> {
Box::new(move |e| match resume_from {
None => false,
Some(repo_url) => Some(repo_url) != e.get(9),
})
}
const GITHUB_BASE_URL: &str = "https://api.github.com";
fn main() -> Result<(), AppError> {
let github_token = env::var("OSRANK_GITHUB_TOKEN")?;
let input_help = r###"Where to read the data from.
Example: ~/Downloads/libraries-1.4.0-2018-12-22/projects_with_repository_fields-1.4.0-2018-12-22.csv"###;
let matches = App::new("Source contributions from Github")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.help(input_help)
.index(1)
.required(true),
)
.arg(
Arg::with_name("platform")
.short("p")
.long("platform")
.help("Example: Rust,NPM,Rubygems,..")
.index(2)
.required(true),
)
.arg(
Arg::with_name("resume-from")
.long("resume-from")
.help("which repository URL to resume from.")
.takes_value(true)
.required(false),
)
.get_matches();
source_contributors(
&github_token,
matches
.value_of("input")
.expect("input parameter wasn't given."),
matches
.value_of("platform")
.expect("platform parameter wasn't given."),
matches.value_of("resume-from"),
)
}
#[test]
fn test_rncryptor_deserialise() {
let input:String = String::from(r###"
2084361,Cargo,rncryptor,2016-12-23 09:57:46 UTC,2018-01-03 08:59:05 UTC,Rust implementation of the RNCryptor AES file format,"",http://rncryptor.github.io/,MIT,https://github.com/RNCryptor/rncryptor-rs,1,0,2016-12-23 09:57:29 UTC,0.1.0,,0,Rust,,2018-01-03 08:59:02 UTC,0,17362897,GitHub,RNCryptor/rncryptor-rs,Pure Rust implementation of the RNCryptor cryptographic format by Rob Napier,false,2016-12-18 17:37:39 UTC,2016-12-30 02:04:24 UTC,2016-12-26 17:33:32 UTC,,58,4,Rust,true,true,false,0,,1,master,0,76797122,,MIT,0,"","","","","","","",,2016-12-18 17:38:00 UTC,2,GitHub,,git,,,""
"###);
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(input.as_bytes());
for result in rdr.records() {
let r = result.expect("impossible");
assert_eq!(deserialise_project(&r).is_some(), true)
}
}
#[test]
fn skip_while_ok() {
let a = [1, -1i32, 0, 1];
let mut iter = a.into_iter().skip_while(|x| x.is_negative());
assert_eq!(iter.next(), Some(&1));
}
| extract_github_owner_and_repo | identifier_name |
source_contributions.rs | extern crate clap;
extern crate csv;
extern crate reqwest;
extern crate serde;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use csv::StringRecord;
use reqwest::{Client, Url};
use serde::de::DeserializeOwned;
use serde::Deserialize;
use std::{thread, time};
use std::collections::HashSet;
use std::env;
use std::fs::{File, OpenOptions};
use std::io::Write;
enum HttpMethod {
Get,
}
#[derive(Debug, Fail)]
enum AppError {
// Returned when we couldn't extract an owner and a repo from the repository URL.
#[fail(display = "Couldn't extract project metadata for {}", repo_url)]
MetadataExtractionFailed { repo_url: String },
// Returned in case of generic I/O error.
#[fail(display = "i/o error when reading/writing on the CSV file {}", _0)]
IOError(std::io::Error),
// Returned when the OSRANK_GITHUB_TOKEN is not present as an env var.
#[fail(display = "Couldn't find OSRANK_GITHUB_TOKEN in your env vars: {}", _0)]
GithubTokenNotFound(std::env::VarError),
// Returned when we failed to issue the HTTP request.
#[fail(display = "Request to Github failed: {}", _0)]
GithubAPIRequestFailed(reqwest::Error),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "Github returned non-200 {} with body {}", _0, _1)]
GithubAPINotOK(reqwest::StatusCode, String),
// Returned when the parsing of the http URL to query Github failed.
#[fail(display = "Github URL failed parsing into a valid HTTP URL: {}", _0)]
GithubUrlParsingFailed(reqwest::UrlError),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "Couldn't deserialise the JSON returned by Github: {}", _0)]
DeserialisationFailure(reqwest::Error),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "No more retries.")]
NoRetriesLeft,
}
impl From<std::io::Error> for AppError {
fn from(err: std::io::Error) -> AppError {
AppError::IOError(err)
}
}
impl From<std::env::VarError> for AppError {
fn from(err: std::env::VarError) -> AppError {
AppError::GithubTokenNotFound(err)
}
}
impl From<reqwest::Error> for AppError {
fn from(err: reqwest::Error) -> AppError {
AppError::GithubAPIRequestFailed(err)
}
}
impl From<reqwest::UrlError> for AppError {
fn from(err: reqwest::UrlError) -> AppError {
AppError::GithubUrlParsingFailed(err)
}
}
// The order of the fields must be the same of the input file.
#[derive(Debug)]
struct Project<'a> {
id: u32,
platform: &'a str,
project_name: &'a str,
repository_url: &'a str,
repository_fork: bool,
repository_display_name: &'a str,
}
struct Retries {
retries_num: u8,
}
impl Retries {
fn new(retries_num: u8) -> Self {
Retries { retries_num }
}
}
#[derive(Debug, Deserialize)]
struct GithubContribution {
total: u64,
author: GithubUser,
weeks: Vec<GithubWeek>,
}
#[derive(Debug, Deserialize)]
struct GithubWeek {
// Unix timestamp of the beginning of this week.
w: u64,
}
#[derive(Debug, Deserialize)]
struct GithubUser {
login: String,
id: u64,
}
type UniqueProjects = HashSet<String>;
/// Calls the Github API using the given HttpMethod and url_path. Due to the
/// fact some endpoints like the statistics one use cached information and
/// might return a 202 with an empty JSON as the stats are computed, we need
/// to wait a little bit and retry, up to a certain number of times.
fn call_github<T>(
http_client: &Client,
http_method: HttpMethod,
token: &str,
url_path: &str,
retries: Retries,
) -> Result<T, AppError>
where
T: DeserializeOwned,
|
fn deserialise_project(sr: &StringRecord) -> Option<Project> {
if let Some(Ok(pid)) = sr.get(0).map(|s: &str| s.parse::<u32>()) {
let platform = sr.get(1);
let project_name = sr.get(2);
let repository_url = sr.get(9);
let repository_fork = sr.get(24).and_then(|s: &str| match s {
"0" => Some(false),
"1" => Some(true),
"t" => Some(true),
"f" => Some(false),
"true" => Some(true),
"false" => Some(false),
_ => None,
});
let repository_display_name = sr.get(54);
match (
platform,
project_name,
repository_url,
repository_fork,
repository_display_name,
) {
(Some(pl), Some(pn), Some(ru), Some(rf), Some(dn)) => Some(Project {
id: pid,
platform: pl,
project_name: pn,
repository_url: ru,
repository_fork: rf,
repository_display_name: dn,
}),
_ => None,
}
} else {
None
}
}
fn source_contributors(
github_token: &str,
path: &str,
platform: &str,
resume_from: Option<&str>,
) -> Result<(), AppError> {
let projects_file = File::open(path)?;
// Build the CSV reader and iterate over each record.
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(projects_file);
let mut contributions = OpenOptions::new()
.append(resume_from.is_some())
.write(resume_from.is_none())
.create_new(resume_from.is_none()) // Allow re-opening if we need to resume.
.open(format!("data/{}_contributions.csv", platform.to_lowercase()).as_str())?;
let mut unique_projects = HashSet::new();
let http_client = reqwest::Client::new();
let mut skip_resumed_record = resume_from.is_some();
//Write the header (if we are not resuming)
if resume_from.is_none() {
contributions.write_all(b"ID,MAINTAINER,REPO,CONTRIBUTIONS,NAME\n")?;
}
for result in rdr
.records()
.filter_map(|e| e.ok())
.filter(by_platform(platform))
.skip_while(resumes(resume_from))
{
// As we cannot know which is the /next/ element we need to process
// and we are resuming from the last (known) one, we need to skip it
// in order to not create a dupe.
if skip_resumed_record {
skip_resumed_record = false;
continue;
}
if let Some(project) = deserialise_project(&result) {
extract_contribution(
&http_client,
&mut contributions,
&mut unique_projects,
project,
github_token,
)?;
}
}
Ok(())
}
fn extract_github_owner_and_repo(repo_url: &str) -> Result<(&str, &str), AppError> {
match repo_url.split('/').collect::<Vec<&str>>().as_slice() {
[_, "", "github.com", owner, repo] => Ok((owner, repo)),
_ => Err(AppError::MetadataExtractionFailed {
repo_url: repo_url.to_string(),
}),
}
}
// Extract the contribution relative to this project. For now only GitHub is
// supported.
fn extract_contribution(
http_client: &Client,
contributions: &mut File,
unique_projects: &mut UniqueProjects,
project: Project,
auth_token: &str,
) -> Result<(), AppError> {
// If this is an authentic project and not a fork, proceed.
if !project.repository_fork && unique_projects.get(project.project_name) == None {
match extract_github_owner_and_repo(project.repository_url) {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
Ok(())
}
Ok((owner, name)) => {
unique_projects.insert(String::from(project.project_name));
println!("Processing {} ({}/{})", project.project_name, owner, name);
let res: Result<Vec<GithubContribution>, AppError> = call_github(
&http_client,
HttpMethod::Get,
auth_token,
format!("/repos/{}/{}/stats/contributors", owner, name).as_str(),
Retries::new(5),
);
match res {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
}
Ok(stats) => {
let stats_len = stats.len();
for contribution in stats {
if is_maintainer(&owner, &contribution, stats_len) {
contributions.write_all(
format!(
"{},github@{},{},{},{}\n",
project.id,
contribution.author.login,
project.repository_url,
contribution.total,
project.project_name
)
.as_bytes(),
)?;
}
}
}
}
// Wait 800 ms to not overload Github and not hit the quota limit.
// GH allows us 5000 requests per hour. If we wait 800ms, we
// aim for the theoretical limit, while preserving a certain
// slack.
let delay = time::Duration::from_millis(800);
thread::sleep(delay);
Ok(())
}
}
} else {
Ok(())
}
}
// FIXME(adn) Totally arbitrary choice: consider a maintainer
// for a project a user that has been contributed for more
// than 6 months. Furthermore, it needs to have a somewhat steady contribution
// history.
fn is_maintainer(owner: &str, stat: &GithubContribution, stats_len: usize) -> bool {
// Users are considered a contributor if one of the following occur:
// 1. The owner of the repo is equal to their username;
// 2. They have at least 50 contributions
// 3. They are the only contributor to the repo.
stat.author.login == owner || { stat.total > 50 } || stats_len as u32 == 1
}
fn by_platform<'a>(platform: &'a str) -> Box<dyn FnMut(&StringRecord) -> bool + 'a> {
Box::new(move |e| e[1] == *platform)
}
// Returns false if the user didn't ask to resume the process from a particular
// project URL. If the user supplied a project, it skips StringRecord entries
// until it matches the input URL.
fn resumes<'a>(resume_from: Option<&'a str>) -> Box<dyn FnMut(&StringRecord) -> bool + 'a> {
Box::new(move |e| match resume_from {
None => false,
Some(repo_url) => Some(repo_url) != e.get(9),
})
}
const GITHUB_BASE_URL: &str = "https://api.github.com";
fn main() -> Result<(), AppError> {
let github_token = env::var("OSRANK_GITHUB_TOKEN")?;
let input_help = r###"Where to read the data from.
Example: ~/Downloads/libraries-1.4.0-2018-12-22/projects_with_repository_fields-1.4.0-2018-12-22.csv"###;
let matches = App::new("Source contributions from Github")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.help(input_help)
.index(1)
.required(true),
)
.arg(
Arg::with_name("platform")
.short("p")
.long("platform")
.help("Example: Rust,NPM,Rubygems,..")
.index(2)
.required(true),
)
.arg(
Arg::with_name("resume-from")
.long("resume-from")
.help("which repository URL to resume from.")
.takes_value(true)
.required(false),
)
.get_matches();
source_contributors(
&github_token,
matches
.value_of("input")
.expect("input parameter wasn't given."),
matches
.value_of("platform")
.expect("platform parameter wasn't given."),
matches.value_of("resume-from"),
)
}
#[test]
fn test_rncryptor_deserialise() {
let input:String = String::from(r###"
2084361,Cargo,rncryptor,2016-12-23 09:57:46 UTC,2018-01-03 08:59:05 UTC,Rust implementation of the RNCryptor AES file format,"",http://rncryptor.github.io/,MIT,https://github.com/RNCryptor/rncryptor-rs,1,0,2016-12-23 09:57:29 UTC,0.1.0,,0,Rust,,2018-01-03 08:59:02 UTC,0,17362897,GitHub,RNCryptor/rncryptor-rs,Pure Rust implementation of the RNCryptor cryptographic format by Rob Napier,false,2016-12-18 17:37:39 UTC,2016-12-30 02:04:24 UTC,2016-12-26 17:33:32 UTC,,58,4,Rust,true,true,false,0,,1,master,0,76797122,,MIT,0,"","","","","","","",,2016-12-18 17:38:00 UTC,2,GitHub,,git,,,""
"###);
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(input.as_bytes());
for result in rdr.records() {
let r = result.expect("impossible");
assert_eq!(deserialise_project(&r).is_some(), true)
}
}
#[test]
fn skip_while_ok() {
let a = [1, -1i32, 0, 1];
let mut iter = a.into_iter().skip_while(|x| x.is_negative());
assert_eq!(iter.next(), Some(&1));
}
| {
let retries_left = retries.retries_num;
if retries_left == 0 {
Err(AppError::NoRetriesLeft)
} else {
let bearer = format!("Bearer {}", token);
match http_method {
HttpMethod::Get => {
let url: Url = format!("{}{}", GITHUB_BASE_URL, url_path)
.as_str()
.parse()?;
let mut res = http_client
.get(url)
.header(reqwest::header::AUTHORIZATION, bearer.as_str())
.send()?;
match res.status() {
reqwest::StatusCode::OK => res
.json()
.or_else(|e| Err(AppError::DeserialisationFailure(e))),
// Github needs a bit more time to compute the stats.
// We retry.
reqwest::StatusCode::ACCEPTED => {
println!("Retrying, only {} retries left ...", retries_left);
thread::sleep(time::Duration::from_secs(1));
call_github(
http_client,
http_method,
token,
url_path,
Retries::new(retries_left - 1),
)
}
err => {
let body = res.text()?;
Err(AppError::GithubAPINotOK(err, body))
}
}
}
}
}
} | identifier_body |
source_contributions.rs | extern crate clap;
extern crate csv;
extern crate reqwest;
extern crate serde;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use csv::StringRecord;
use reqwest::{Client, Url};
use serde::de::DeserializeOwned;
use serde::Deserialize;
use std::{thread, time};
use std::collections::HashSet;
use std::env;
use std::fs::{File, OpenOptions};
use std::io::Write;
enum HttpMethod {
Get,
}
#[derive(Debug, Fail)]
enum AppError {
// Returned when we couldn't extract an owner and a repo from the repository URL.
#[fail(display = "Couldn't extract project metadata for {}", repo_url)]
MetadataExtractionFailed { repo_url: String },
// Returned in case of generic I/O error.
#[fail(display = "i/o error when reading/writing on the CSV file {}", _0)]
IOError(std::io::Error),
// Returned when the OSRANK_GITHUB_TOKEN is not present as an env var.
#[fail(display = "Couldn't find OSRANK_GITHUB_TOKEN in your env vars: {}", _0)]
GithubTokenNotFound(std::env::VarError),
// Returned when we failed to issue the HTTP request.
#[fail(display = "Request to Github failed: {}", _0)]
GithubAPIRequestFailed(reqwest::Error),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "Github returned non-200 {} with body {}", _0, _1)]
GithubAPINotOK(reqwest::StatusCode, String),
// Returned when the parsing of the http URL to query Github failed.
#[fail(display = "Github URL failed parsing into a valid HTTP URL: {}", _0)]
GithubUrlParsingFailed(reqwest::UrlError),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "Couldn't deserialise the JSON returned by Github: {}", _0)]
DeserialisationFailure(reqwest::Error),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "No more retries.")]
NoRetriesLeft,
}
impl From<std::io::Error> for AppError {
fn from(err: std::io::Error) -> AppError {
AppError::IOError(err)
}
}
impl From<std::env::VarError> for AppError {
fn from(err: std::env::VarError) -> AppError {
AppError::GithubTokenNotFound(err)
}
}
impl From<reqwest::Error> for AppError {
fn from(err: reqwest::Error) -> AppError {
AppError::GithubAPIRequestFailed(err)
}
}
impl From<reqwest::UrlError> for AppError {
fn from(err: reqwest::UrlError) -> AppError {
AppError::GithubUrlParsingFailed(err)
}
}
// The order of the fields must be the same of the input file.
#[derive(Debug)]
struct Project<'a> {
id: u32,
platform: &'a str,
project_name: &'a str,
repository_url: &'a str,
repository_fork: bool,
repository_display_name: &'a str,
}
struct Retries {
retries_num: u8,
}
impl Retries {
fn new(retries_num: u8) -> Self {
Retries { retries_num }
}
}
#[derive(Debug, Deserialize)]
struct GithubContribution {
total: u64,
author: GithubUser,
weeks: Vec<GithubWeek>,
}
#[derive(Debug, Deserialize)]
struct GithubWeek {
// Unix timestamp of the beginning of this week.
w: u64,
}
#[derive(Debug, Deserialize)]
struct GithubUser {
login: String,
id: u64,
}
type UniqueProjects = HashSet<String>;
/// Calls the Github API using the given HttpMethod and url_path. Due to the
/// fact some endpoints like the statistics one use cached information and
/// might return a 202 with an empty JSON as the stats are computed, we need
/// to wait a little bit and retry, up to a certain number of times.
fn call_github<T>(
http_client: &Client,
http_method: HttpMethod,
token: &str,
url_path: &str,
retries: Retries,
) -> Result<T, AppError>
where
T: DeserializeOwned,
{
let retries_left = retries.retries_num;
if retries_left == 0 {
Err(AppError::NoRetriesLeft)
} else {
let bearer = format!("Bearer {}", token);
match http_method {
HttpMethod::Get => {
let url: Url = format!("{}{}", GITHUB_BASE_URL, url_path)
.as_str()
.parse()?;
let mut res = http_client
.get(url)
.header(reqwest::header::AUTHORIZATION, bearer.as_str())
.send()?;
match res.status() {
reqwest::StatusCode::OK => res
.json()
.or_else(|e| Err(AppError::DeserialisationFailure(e))),
// Github needs a bit more time to compute the stats.
// We retry.
reqwest::StatusCode::ACCEPTED => {
println!("Retrying, only {} retries left ...", retries_left);
thread::sleep(time::Duration::from_secs(1));
call_github(
http_client,
http_method,
token,
url_path,
Retries::new(retries_left - 1),
)
}
err => {
let body = res.text()?;
Err(AppError::GithubAPINotOK(err, body))
}
}
}
}
}
}
fn deserialise_project(sr: &StringRecord) -> Option<Project> {
if let Some(Ok(pid)) = sr.get(0).map(|s: &str| s.parse::<u32>()) {
let platform = sr.get(1);
let project_name = sr.get(2);
let repository_url = sr.get(9);
let repository_fork = sr.get(24).and_then(|s: &str| match s {
"0" => Some(false),
"1" => Some(true),
"t" => Some(true),
"f" => Some(false),
"true" => Some(true),
"false" => Some(false),
_ => None,
});
let repository_display_name = sr.get(54);
match (
platform,
project_name,
repository_url,
repository_fork,
repository_display_name,
) {
(Some(pl), Some(pn), Some(ru), Some(rf), Some(dn)) => Some(Project {
id: pid,
platform: pl,
project_name: pn,
repository_url: ru,
repository_fork: rf,
repository_display_name: dn,
}),
_ => None,
}
} else {
None
}
}
fn source_contributors(
github_token: &str,
path: &str,
platform: &str,
resume_from: Option<&str>,
) -> Result<(), AppError> {
let projects_file = File::open(path)?;
// Build the CSV reader and iterate over each record.
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(projects_file);
let mut contributions = OpenOptions::new()
.append(resume_from.is_some())
.write(resume_from.is_none())
.create_new(resume_from.is_none()) // Allow re-opening if we need to resume.
.open(format!("data/{}_contributions.csv", platform.to_lowercase()).as_str())?;
let mut unique_projects = HashSet::new();
let http_client = reqwest::Client::new();
let mut skip_resumed_record = resume_from.is_some();
//Write the header (if we are not resuming)
if resume_from.is_none() {
contributions.write_all(b"ID,MAINTAINER,REPO,CONTRIBUTIONS,NAME\n")?;
}
for result in rdr
.records()
.filter_map(|e| e.ok())
.filter(by_platform(platform))
.skip_while(resumes(resume_from))
{
// As we cannot know which is the /next/ element we need to process
// and we are resuming from the last (known) one, we need to skip it
// in order to not create a dupe.
if skip_resumed_record {
skip_resumed_record = false;
continue;
}
if let Some(project) = deserialise_project(&result) {
extract_contribution(
&http_client,
&mut contributions,
&mut unique_projects,
project,
github_token,
)?;
}
}
Ok(())
}
| match repo_url.split('/').collect::<Vec<&str>>().as_slice() {
[_, "", "github.com", owner, repo] => Ok((owner, repo)),
_ => Err(AppError::MetadataExtractionFailed {
repo_url: repo_url.to_string(),
}),
}
}
// Extract the contribution relative to this project. For now only GitHub is
// supported.
fn extract_contribution(
http_client: &Client,
contributions: &mut File,
unique_projects: &mut UniqueProjects,
project: Project,
auth_token: &str,
) -> Result<(), AppError> {
// If this is an authentic project and not a fork, proceed.
if !project.repository_fork && unique_projects.get(project.project_name) == None {
match extract_github_owner_and_repo(project.repository_url) {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
Ok(())
}
Ok((owner, name)) => {
unique_projects.insert(String::from(project.project_name));
println!("Processing {} ({}/{})", project.project_name, owner, name);
let res: Result<Vec<GithubContribution>, AppError> = call_github(
&http_client,
HttpMethod::Get,
auth_token,
format!("/repos/{}/{}/stats/contributors", owner, name).as_str(),
Retries::new(5),
);
match res {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
}
Ok(stats) => {
let stats_len = stats.len();
for contribution in stats {
if is_maintainer(&owner, &contribution, stats_len) {
contributions.write_all(
format!(
"{},github@{},{},{},{}\n",
project.id,
contribution.author.login,
project.repository_url,
contribution.total,
project.project_name
)
.as_bytes(),
)?;
}
}
}
}
// Wait 800 ms to not overload Github and not hit the quota limit.
// GH allows us 5000 requests per hour. If we wait 800ms, we
// aim for the theoretical limit, while preserving a certain
// slack.
let delay = time::Duration::from_millis(800);
thread::sleep(delay);
Ok(())
}
}
} else {
Ok(())
}
}
// FIXME(adn) Totally arbitrary choice: consider a maintainer
// for a project a user that has been contributed for more
// than 6 months. Furthermore, it needs to have a somewhat steady contribution
// history.
fn is_maintainer(owner: &str, stat: &GithubContribution, stats_len: usize) -> bool {
// Users are considered a contributor if one of the following occur:
// 1. The owner of the repo is equal to their username;
// 2. They have at least 50 contributions
// 3. They are the only contributor to the repo.
stat.author.login == owner || { stat.total > 50 } || stats_len as u32 == 1
}
fn by_platform<'a>(platform: &'a str) -> Box<dyn FnMut(&StringRecord) -> bool + 'a> {
Box::new(move |e| e[1] == *platform)
}
// Returns false if the user didn't ask to resume the process from a particular
// project URL. If the user supplied a project, it skips StringRecord entries
// until it matches the input URL.
fn resumes<'a>(resume_from: Option<&'a str>) -> Box<dyn FnMut(&StringRecord) -> bool + 'a> {
Box::new(move |e| match resume_from {
None => false,
Some(repo_url) => Some(repo_url) != e.get(9),
})
}
const GITHUB_BASE_URL: &str = "https://api.github.com";
fn main() -> Result<(), AppError> {
let github_token = env::var("OSRANK_GITHUB_TOKEN")?;
let input_help = r###"Where to read the data from.
Example: ~/Downloads/libraries-1.4.0-2018-12-22/projects_with_repository_fields-1.4.0-2018-12-22.csv"###;
let matches = App::new("Source contributions from Github")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.help(input_help)
.index(1)
.required(true),
)
.arg(
Arg::with_name("platform")
.short("p")
.long("platform")
.help("Example: Rust,NPM,Rubygems,..")
.index(2)
.required(true),
)
.arg(
Arg::with_name("resume-from")
.long("resume-from")
.help("which repository URL to resume from.")
.takes_value(true)
.required(false),
)
.get_matches();
source_contributors(
&github_token,
matches
.value_of("input")
.expect("input parameter wasn't given."),
matches
.value_of("platform")
.expect("platform parameter wasn't given."),
matches.value_of("resume-from"),
)
}
#[test]
fn test_rncryptor_deserialise() {
let input:String = String::from(r###"
2084361,Cargo,rncryptor,2016-12-23 09:57:46 UTC,2018-01-03 08:59:05 UTC,Rust implementation of the RNCryptor AES file format,"",http://rncryptor.github.io/,MIT,https://github.com/RNCryptor/rncryptor-rs,1,0,2016-12-23 09:57:29 UTC,0.1.0,,0,Rust,,2018-01-03 08:59:02 UTC,0,17362897,GitHub,RNCryptor/rncryptor-rs,Pure Rust implementation of the RNCryptor cryptographic format by Rob Napier,false,2016-12-18 17:37:39 UTC,2016-12-30 02:04:24 UTC,2016-12-26 17:33:32 UTC,,58,4,Rust,true,true,false,0,,1,master,0,76797122,,MIT,0,"","","","","","","",,2016-12-18 17:38:00 UTC,2,GitHub,,git,,,""
"###);
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(input.as_bytes());
for result in rdr.records() {
let r = result.expect("impossible");
assert_eq!(deserialise_project(&r).is_some(), true)
}
}
#[test]
fn skip_while_ok() {
let a = [1, -1i32, 0, 1];
let mut iter = a.into_iter().skip_while(|x| x.is_negative());
assert_eq!(iter.next(), Some(&1));
} | fn extract_github_owner_and_repo(repo_url: &str) -> Result<(&str, &str), AppError> { | random_line_split |
shanten_improve.go | package util
import (
"fmt"
"sort"
"math"
"github.com/EndlessCheng/mahjong-helper/util/model"
)
// map[改良牌]进张(选择进张数最大的)
type Improves map[int]Waits
// 1/4/7/10/13 张手牌的分析结果
type WaitsWithImproves13 struct {
// 原手牌
Tiles34 []int
// 剩余牌
LeftTiles34 []int
// 是否已鸣牌
IsNaki bool
// 向听数
Shanten int
// 进张
// 考虑了剩余枚数
// 若某个进张牌 4 枚都可见,则该进张的 value 值为 0
Waits Waits
// TODO: 鸣牌进张:他家打出这张牌,可以鸣牌,且能让向听数前进
//MeldWaits Waits
// map[进张牌]向听前进后的进张数(这里让向听前进的切牌选择的是使「向听前进后的进张数最大」的切牌)
NextShantenWaitsCountMap map[int]int
// 向听前进后的进张数的加权均值
AvgNextShantenWaitsCount float64
// 综合了进张与向听前进后进张的评分
MixedWaitsScore float64
// 改良:摸到这张牌虽不能让向听数前进,但可以让进张变多
// len(Improves) 即为改良的牌的种数
Improves Improves
// 改良情况数,这里计算的是有多少种使进张增加的切牌方式
ImproveWayCount int
// 在没有摸到进张时的改良后进张数的加权均值(计算时,对于既不是进张也不是改良的牌,其进张数为 Waits.AllCount())
// 这里只考虑一巡的改良均值
// TODO: 在考虑改良的情况下,如何计算向听前进所需要的摸牌次数的期望值?
AvgImproveWaitsCount float64
// 向听前进后,若听牌,其最大和率的加权均值
// 若已听牌,则该值为当前手牌和率
AvgAgariRate float64
// 振听可能率(一向听和听牌时)
FuritenRate float64
// 役种
YakuTypes []int
// 宝牌个数(手牌+副露)
DoraCount int
// 无立直时的荣和打点期望
RonPoint float64
// 立直时的荣和打点期望
RiichiRonPoint float64
// 自摸打点期望
TsumoPoint float64
// TODO: 赤牌改良提醒
}
// 进张和向听前进后进张的评分
// 这里粗略地近似为向听前进两次的概率
func (r *WaitsWithImproves13) mixedWaitsScore() float64 {
if r.Waits.AllCount() == 0 || r.AvgNextShantenWaitsCount == 0 {
return 0
}
leftCount := float64(CountOfTiles34(r.LeftTiles34))
p2 := float64(r.Waits.AllCount()) / leftCount
//p2 := r.AvgImproveWaitsCount / leftCount
p1 := r.AvgNextShantenWaitsCount / leftCount
//if r.AvgAgariRate > 0 {
// p1 = r.AvgAgariRate / 100
//}
p2_, p1_ := 1-p2, 1-p1
const leftTurns = 10.0 // math.Max(5.0, leftCount/4)
sumP2 := p2_ * (1 - math.Pow(p2_, leftTurns)) / p2
sumP1 := p1_ * (1 - math.Pow(p1_, leftTurns)) / p1
result := p2 * p1 * (sumP2 - sumP1) / (p2_ - p1_)
return result * 100
}
// 调试用
func (r *WaitsWithImproves13) String() string {
s := fmt.Sprintf("%d 进张 %s\n%.2f 改良进张 [%d(%d) 种]",
r.Waits.AllCount(),
//r.Waits.AllCount()+r.MeldWaits.AllCount(),
TilesToStrWithBracket(r.Waits.indexes()),
r.AvgImproveWaitsCount,
len(r.Improves),
r.ImproveWayCount,
)
if r.Shanten >= 1 {
mixedScore := r.MixedWaitsScore
//for i := 2; i <= r.Shanten; i++ {
// mixedScore /= 4
//}
s += fmt.Sprintf(" %.2f %s进张(%.2f 综合分)",
r.AvgNextShantenWaitsCount,
NumberToChineseShanten(r.Shanten-1),
mixedScore,
)
}
if r.Shanten >= 0 && r.Shanten <= 1 {
s += fmt.Sprintf("(%.2f%% 参考和率)", r.AvgAgariRate)
if r.FuritenRate > 0 {
if r.FuritenRate < 1 {
s += "[可能振听]"
} else {
s += "[振听]"
}
}
s += YakuTypesWithDoraToStr(r.YakuTypes, r.DoraCount)
}
if r.RonPoint > 0 {
s += fmt.Sprintf("[(默听)荣和%d]", int(math.Round(r.RonPoint)))
}
if r.RiichiRonPoint > 0 {
s += fmt.Sprintf("[立直荣和%d]", int(math.Round(r.RiichiRonPoint)))
}
if r.TsumoPoint > 0 {
s += fmt.Sprintf("[自摸%d]", int(math.Round(r.TsumoPoint)))
}
return s
}
// 1/4/7/10/13 张牌,计算向听数、进张(考虑了剩余枚数)
func CalculateShantenAndWaits13(tiles34 []int, leftTiles34 []int) (shanten int, waits Waits) {
if len(leftTiles34) == 0 {
leftTiles34 = InitLeftTiles34WithTiles34(tiles34)
}
shanten = CalculateShanten(tiles34)
// 剪枝:检测非浮牌,在不考虑国士无双的情况下,这种牌是不可能让向听数前进的(但有改良的可能,不过 CalculateShantenAndWaits13 函数不考虑这个)
// 此处优化提升了约 30% 的性能
//needCheck34 := make([]bool, 34)
//idx := -1
//for i := 0; i < 3; i++ {
// for j := 0; j < 9; j++ {
// idx++
// if tiles34[idx] == 0 {
// continue
// }
// if j == 0 {
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 1 {
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j < 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// } else {
// needCheck34[i | heck34[idx] = true
// }
// }
//}
//for i := 27; i < 34; i++ {
// if tiles34[i] > 0 {
// needCheck34[i] = true
// }
//}
waits = Waits{}
for i := 0; i < 34; i++ {
//if !needCheck34[i] {
// continue
//}
if tiles34[i] == 4 {
// 无法摸到这张牌
continue
}
// 摸牌
tiles34[i]++
if newShanten := CalculateShanten(tiles34); newShanten < shanten {
// 向听前进了,则换的这张牌为进张,进张数即剩余枚数
// 有可能为 0,但这对于判断振听是有帮助的,所以记录
waits[i] = leftTiles34[i]
}
tiles34[i]--
}
return
}
// 1/4/7/10/13 张牌,计算向听数、进张、改良等(考虑了剩余枚数)
func CalculateShantenWithImproves13(playerInfo *model.PlayerInfo) (r *WaitsWithImproves13) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
leftTiles34 := playerInfo.LeftTiles34
shanten13, waits := CalculateShantenAndWaits13(tiles34, leftTiles34)
waitsCount := waits.AllCount()
nextShantenWaitsCountMap := map[int]int{} // map[进张牌]听多少张牌
improves := Improves{}
improveWayCount := 0
// 对于每张牌,摸到之后的手牌进张数(如果摸到的是 waits 中的牌,则进张数视作 waitsCount)
maxImproveWaitsCount34 := make([]int, 34)
for i := 0; i < 34; i++ {
maxImproveWaitsCount34[i] = waitsCount // 初始化成基本进张
}
avgAgariRate := 0.0
avgRonPoint := 0.0
ronPointWeight := 0
avgRiichiRonPoint := 0.0
canYaku := make([]bool, maxYakuType)
if len(playerInfo.Melds) == 0 && CountPairsOfTiles34(tiles34)+shanten13 == 6 {
// 对于三向听,除非进张很差才会考虑七对子
if shanten13 == 3 {
if waitsCount <= 21 {
canYaku[YakuChiitoi] = true
}
} else if shanten13 == 1 || shanten13 == 2 {
// 一向听和两向听考虑七对子
canYaku[YakuChiitoi] = true
}
}
fillYakuTypes := func(_shanten13 int, _waits Waits) {
if _shanten13 != 0 {
return
}
// 听牌
for tile, left := range _waits {
if left == 0 {
continue
}
tiles34[tile]++
playerInfo.WinTile = tile
_yakuTypes := FindAllYakuTypes(playerInfo)
for _, t := range _yakuTypes {
canYaku[t] = true
}
tiles34[tile]--
}
}
// 计算可能的役种
fillYakuTypes(shanten13, waits)
for i := 0; i < 34; i++ {
// 从剩余牌中摸牌
if leftTiles34[i] == 0 {
continue
}
leftTiles34[i]--
tiles34[i]++
if _, ok := waits[i]; ok { // 摸到的是进张
maxAgariRate := 0.0 // 摸到此进张后的和率
maxAvgRonPoint := 0.0 // 平均打点
maxAvgRiichiRonPoint := 0.0
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 向听前进才是正确的切牌
if newShanten13, newWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 < shanten13 {
// 切牌一般切进张最多的
if waitsCount := newWaits.AllCount(); waitsCount > nextShantenWaitsCountMap[i] {
nextShantenWaitsCountMap[i] = waitsCount
}
// 听牌了
if newShanten13 == 0 {
// 听牌一般切和率最高的,TODO: 除非打点更高,比如说听到 dora 上,或者有三色等
_agariRate := CalculateAvgAgariRate(newWaits, playerInfo.DiscardTiles)
if _agariRate >= maxAgariRate {
maxAgariRate = _agariRate
// 计算荣和点数
// TODO: 这里简化了,和率优先,需要更加精细的考量
// TODO: maxAvgRonPoint = CalcAvgRonPoint(playerInfo, newWaits)
// TODO: maxAvgRiichiRonPoint = CalcAvgRiichiRonPoint(playerInfo, newWaits)
}
// 计算可能的役种
fillYakuTypes(newShanten13, newWaits)
}
}
playerInfo.UndoDiscardTile(j, _isRedFive)
}
// 加权:进张牌的剩余枚数*和率
w := leftTiles34[i] + 1
avgAgariRate += maxAgariRate * float64(w)
if maxAvgRonPoint > 0 {
avgRonPoint += maxAvgRonPoint * float64(w)
ronPointWeight += w
}
//fmt.Println(i, maxAvgRiichiRonPoint)
avgRiichiRonPoint += maxAvgRiichiRonPoint * float64(w)
} else { // 摸到的不是进张,但可能有改良
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 正确的切牌
if newShanten13, improveWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 == shanten13 {
// 若进张数变多,则为改良
if improveWaitsCount := improveWaits.AllCount(); improveWaitsCount > waitsCount {
improveWayCount++
if improveWaitsCount > maxImproveWaitsCount34[i] {
maxImproveWaitsCount34[i] = improveWaitsCount
// improves 选的是进张数最大的改良
improves[i] = improveWaits
}
//fmt.Println(fmt.Sprintf(" 摸 %s 切 %s 改良:", MahjongZH[i], MahjongZH[j]), improveWaitsCount, TilesToStrWithBracket(improveWaits.indexes()))
}
}
playerInfo.UndoDiscardTile(j, _isRedFive)
}
}
tiles34[i]--
leftTiles34[i]++
}
if waitsCount > 0 {
avgAgariRate /= float64(waitsCount)
if ronPointWeight > 0 {
avgRonPoint /= float64(ronPointWeight)
}
avgRiichiRonPoint /= float64(waitsCount)
if shanten13 == 0 {
avgAgariRate = CalculateAvgAgariRate(waits, playerInfo.DiscardTiles)
avgRonPoint = CalcAvgRonPoint(playerInfo, waits)
avgRiichiRonPoint = CalcAvgRiichiRonPoint(playerInfo, waits)
}
}
yakuTypes := []int{}
for yakuType, can := range canYaku {
if can {
yakuTypes = append(yakuTypes, yakuType)
}
}
if len(yakuTypes) == 0 {
// 无役,若能立直则立直
if shanten13 == 0 && !playerInfo.IsNaki() {
savedIsRiichi := playerInfo.IsRiichi
playerInfo.IsRiichi = true
defer func() { playerInfo.IsRiichi = savedIsRiichi }()
yakuTypes = append(yakuTypes, YakuRiichi)
}
}
_tiles34 := make([]int, 34)
copy(_tiles34, tiles34)
r = &WaitsWithImproves13{
Tiles34: _tiles34,
LeftTiles34: leftTiles34,
IsNaki: playerInfo.IsNaki(),
Shanten: shanten13,
Waits: waits,
NextShantenWaitsCountMap: nextShantenWaitsCountMap,
Improves: improves,
ImproveWayCount: improveWayCount,
AvgImproveWaitsCount: float64(waitsCount),
AvgAgariRate: avgAgariRate,
YakuTypes: yakuTypes,
DoraCount: playerInfo.CountDora(),
}
// 对于听牌及一向听,判断是否有振听可能
if shanten13 <= 1 {
for _, discardTile := range playerInfo.DiscardTiles {
if _, ok := waits[discardTile]; ok {
r.FuritenRate = 0.5 // TODO: 待完善
if shanten13 == 0 {
// 听牌时,若听的牌在舍牌中,则构成振听
r.FuritenRate = 1
// 修正振听时的和率
r.AvgAgariRate *= FuritenAgariMulti
}
}
}
}
// 非振听时计算荣和点数
if r.FuritenRate < 1 && shanten13 <= 1 {
r.RonPoint = avgRonPoint
if !playerInfo.IsNaki() {
r.RiichiRonPoint = avgRiichiRonPoint
}
}
// TODO: 自摸点数
// 分析
if len(nextShantenWaitsCountMap) > 0 {
nextShantenWaitsSum := 0
weight := 0
for tile, c := range nextShantenWaitsCountMap {
w := leftTiles34[tile]
nextShantenWaitsSum += w * c
weight += w
}
r.AvgNextShantenWaitsCount = float64(nextShantenWaitsSum) / float64(weight)
}
if len(improves) > 0 {
improveWaitsSum := 0
weight := 0
for i := 0; i < 34; i++ {
w := leftTiles34[i]
improveWaitsSum += w * maxImproveWaitsCount34[i]
weight += w
}
r.AvgImproveWaitsCount = float64(improveWaitsSum) / float64(weight)
}
r.MixedWaitsScore = r.mixedWaitsScore()
return
}
type WaitsWithImproves14 struct {
// 需要切的牌
DiscardTile int
// 切牌后的手牌分析结果
Result13 *WaitsWithImproves13
// 副露信息(没有副露就是 nil)
// 比如用 23m 吃了牌,OpenTiles 就是 [1,2]
OpenTiles []int
}
func (r *WaitsWithImproves14) String() string {
meldInfo := ""
if len(r.OpenTiles) > 0 {
meldType := "吃"
if r.OpenTiles[0] == r.OpenTiles[1] {
meldType = "碰"
}
meldInfo = fmt.Sprintf("用 %s%s %s,", string([]rune(MahjongZH[r.OpenTiles[0]])[:1]), MahjongZH[r.OpenTiles[1]], meldType)
}
return meldInfo + fmt.Sprintf("切 %s: %s", MahjongZH[r.DiscardTile], r.Result13.String())
}
type WaitsWithImproves14List []*WaitsWithImproves14
// 排序,若 needImprove 为 true,则优先按照 AvgImproveWaitsCount 排序
func (l WaitsWithImproves14List) Sort(needImprove bool) {
sort.Slice(l, func(i, j int) bool {
ri, rj := l[i].Result13, l[j].Result13
// 听牌的话和率优先
// TODO: 考虑打点
if l[0].Result13.Shanten == 0 {
if !Equal(ri.AvgAgariRate, rj.AvgAgariRate) {
return ri.AvgAgariRate > rj.AvgAgariRate
}
}
if needImprove {
if !Equal(ri.AvgImproveWaitsCount, rj.AvgImproveWaitsCount) {
return ri.AvgImproveWaitsCount > rj.AvgImproveWaitsCount
}
}
// 排序规则:综合评分 - 进张 - 前进后的进张 - 和率 - 改良 - 好牌先走
// 必须注意到的一点是,随着游戏的进行,进张会被他家打出,所以进张是有减少的趋势的
// 对于一向听,考虑到未听牌之前要听的牌会被他家打出而造成听牌时的枚数降低,所以听牌枚数比和率更重要
// 对比当前进张与前进后的进张,在二者乘积相近的情况下(注意这个前提),由于进张越大听牌速度越快,听牌时的进张数也就越接近预期进张数,所以进张越多越好(再次强调是在二者乘积相近的情况下)
if !Equal(ri.MixedWaitsScore, rj.MixedWaitsScore) {
return ri.MixedWaitsScore > rj.MixedWaitsScore
}
riWaitsCount, rjWaitsCount := ri.Waits.AllCount(), rj.Waits.AllCount()
if riWaitsCount != rjWaitsCount {
return riWaitsCount > rjWaitsCount
}
if !Equal(ri.AvgNextShantenWaitsCount, rj.AvgNextShantenWaitsCount) {
return ri.AvgNextShantenWaitsCount > rj.AvgNextShantenWaitsCount
}
// shanten == 1
if !Equal(ri.AvgAgariRate, rj.AvgAgariRate) {
return ri.AvgAgariRate > rj.AvgAgariRate
}
if !Equal(ri.AvgImproveWaitsCount, rj.AvgImproveWaitsCount) {
return ri.AvgImproveWaitsCount > rj.AvgImproveWaitsCount
}
idxI, idxJ := l[i].DiscardTile, l[j].DiscardTile
if idxI >= 27 && idxJ >= 27 {
// TODO 场风不为自风时:下家风 > 对家风 > 上家风 > 场风
}
// 好牌先走
if idxI < 27 && idxJ < 27 {
idxI %= 9
if idxI > 4 {
idxI = 8 - idxI
}
idxJ %= 9
if idxJ > 4 {
idxJ = 8 - idxJ
}
return idxI > idxJ
}
return idxI < idxJ
//// 改良种类、方式多的优先
//if len(ri.Improves) != len(rj.Improves) {
// return len(ri.Improves) > len(rj.Improves)
//}
//if ri.ImproveWayCount != rj.ImproveWayCount {
// return ri.ImproveWayCount > rj.ImproveWayCount
//}
})
}
func (l *WaitsWithImproves14List) filterOutDiscard(cantDiscardTile int) {
newResults := WaitsWithImproves14List{}
for _, r := range *l {
if r.DiscardTile != cantDiscardTile {
newResults = append(newResults, r)
}
}
*l = newResults
}
func (l WaitsWithImproves14List) addOpenTile(openTiles []int) {
for _, r := range l {
r.OpenTiles = openTiles
}
}
// 2/5/8/11/14 张牌,计算向听数、进张、改良、向听倒退等
func CalculateShantenWithImproves14(playerInfo *model.PlayerInfo) (shanten int, waitsWithImproves WaitsWithImproves14List, incShantenResults WaitsWithImproves14List) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
shanten = CalculateShanten(tiles34)
for i := 0; i < 34; i++ {
if tiles34[i] == 0 {
continue
}
isRedFive := playerInfo.IsOnlyRedFive(i)
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
playerInfo.DiscardTile(i, isRedFive)
result13 := CalculateShantenWithImproves13(playerInfo)
// 记录切牌后的分析结果
r := &WaitsWithImproves14{
DiscardTile: i,
Result13: result13,
}
if result13.Shanten == shanten {
waitsWithImproves = append(waitsWithImproves, r)
} else {
// 向听倒退
incShantenResults = append(incShantenResults, r)
}
playerInfo.UndoDiscardTile(i, isRedFive)
}
needImprove := func(l []*WaitsWithImproves14) bool {
if len(l) == 0 {
return false
}
shanten := l[0].Result13.Shanten
// 一向听及以下进张优先,改良其次
if shanten <= 1 {
return false
}
maxWaitsCount := 0
for _, r := range l {
maxWaitsCount = MaxInt(maxWaitsCount, r.Result13.Waits.AllCount())
}
// 两向听及以上的七对子考虑改良
return maxWaitsCount <= 6*shanten+3
}
ni := needImprove(waitsWithImproves)
waitsWithImproves.Sort(ni)
ni = needImprove(incShantenResults)
incShantenResults.Sort(ni)
return
}
// 计算最小向听数,鸣牌方式
func calculateMeldShanten(tiles34 []int, calledTile int, isRedFive bool, allowChi bool) (minShanten int, meldCombinations []model.Meld) {
// 是否能碰
if tiles34[calledTile] >= 2 {
meldCombinations = append(meldCombinations, model.Meld{
MeldType: model.MeldTypePon,
Tiles: []int{calledTile, calledTile, calledTile},
SelfTiles: []int{calledTile, calledTile},
CalledTile: calledTile,
RedFiveFromOthers: isRedFive,
})
}
// 是否能吃
if allowChi && calledTile < 27 {
checkChi := func(tileA, tileB int) {
if tiles34[tileA] > 0 && tiles34[tileB] > 0 {
_tiles := []int{tileA, tileB, calledTile}
sort.Ints(_tiles)
meldCombinations = append(meldCombinations, model.Meld{
MeldType: model.MeldTypeChi,
Tiles: _tiles,
SelfTiles: []int{tileA, tileB},
CalledTile: calledTile,
RedFiveFromOthers: isRedFive,
})
}
}
t9 := calledTile % 9
if t9 >= 2 {
checkChi(calledTile-2, calledTile-1)
}
if t9 >= 1 && t9 <= 7 {
checkChi(calledTile-1, calledTile+1)
}
if t9 <= 6 {
checkChi(calledTile+1, calledTile+2)
}
}
// 计算所有鸣牌下的最小向听数
minShanten = 99
for _, c := range meldCombinations {
tiles34[c.SelfTiles[0]]--
tiles34[c.SelfTiles[1]]--
minShanten = MinInt(minShanten, CalculateShanten(tiles34))
tiles34[c.SelfTiles[0]]++
tiles34[c.SelfTiles[1]]++
}
return
}
// TODO 鸣牌的情况判断(待重构)
// 编程时注意他家切掉的这张牌是否算到剩余数中
//if isOpen {
//if newShanten, combinations, shantens := calculateMeldShanten(tiles34, i, true); newShanten < shanten {
// // 向听前进了,说明鸣牌成功,则换的这张牌为鸣牌进张
// // 计算进张数:若能碰则 =剩余数*3,否则 =剩余数
// meldWaits[i] = leftTile - tiles34[i]
// for i, comb := range combinations {
// if comb[0] == comb[1] && shantens[i] == newShanten {
// meldWaits[i] *= 3
// break
// }
// }
//}
//}
// 计算鸣牌下的何切分析
// calledTile 他家出的牌,尝试鸣这张牌
// isRedFive 这张牌是否为赤5
// allowChi 是否允许吃这张牌
func CalculateMeld(playerInfo *model.PlayerInfo, calledTile int, isRedFive bool, allowChi bool) (minShanten int, waitsWithImproves WaitsWithImproves14List, incShantenResults WaitsWithImproves14List) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
minShanten, meldCombinations := calculateMeldShanten(playerInfo.HandTiles34, calledTile, isRedFive, allowChi)
for _, c := range meldCombinations {
// 尝试鸣这张牌
playerInfo.AddMeld(c)
_shanten, _waitsWithImproves, _incShantenResults := CalculateShantenWithImproves14(playerInfo)
playerInfo.UndoAddMeld()
// 去掉现物食替的情况
_waitsWithImproves.filterOutDiscard(calledTile)
_incShantenResults.filterOutDiscard(calledTile)
// 去掉筋食替的情况
if c.MeldType == model.MeldTypeChi {
cannotDiscardTile := -1
if c.SelfTiles[0] < calledTile && c.SelfTiles[1] < calledTile && calledTile%9 >= 3 {
cannotDiscardTile = calledTile - 3
} else if c.SelfTiles[0] > calledTile && c.SelfTiles[1] > calledTile && calledTile%9 <= 5 {
cannotDiscardTile = calledTile + 3
}
if cannotDiscardTile != -1 {
_waitsWithImproves.filterOutDiscard(cannotDiscardTile)
_incShantenResults.filterOutDiscard(cannotDiscardTile)
}
}
// 添加副露信息,用于输出
_waitsWithImproves.addOpenTile(c.SelfTiles)
_incShantenResults.addOpenTile(c.SelfTiles)
// 整理副露结果
if _shanten == minShanten {
waitsWithImproves = append(waitsWithImproves, _waitsWithImproves...)
incShantenResults = append(incShantenResults, _incShantenResults...)
} else if _shanten == minShanten+1 {
incShantenResults = append(incShantenResults, _waitsWithImproves...)
}
}
waitsWithImproves.Sort(false)
incShantenResults.Sort(false)
return
}
| dx-2] = true
// needCheck34[idx-1] = true
// needC | conditional_block |
shanten_improve.go | package util
import (
"fmt"
"sort"
"math"
"github.com/EndlessCheng/mahjong-helper/util/model"
)
// map[改良牌]进张(选择进张数最大的)
type Improves map[int]Waits
// 1/4/7/10/13 张手牌的分析结果
type WaitsWithImproves13 struct {
// 原手牌
Tiles34 []int
// 剩余牌
LeftTiles34 []int
// 是否已鸣牌
IsNaki bool
// 向听数
Shanten int
// 进张
// 考虑了剩余枚数 |
// map[进张牌]向听前进后的进张数(这里让向听前进的切牌选择的是使「向听前进后的进张数最大」的切牌)
NextShantenWaitsCountMap map[int]int
// 向听前进后的进张数的加权均值
AvgNextShantenWaitsCount float64
// 综合了进张与向听前进后进张的评分
MixedWaitsScore float64
// 改良:摸到这张牌虽不能让向听数前进,但可以让进张变多
// len(Improves) 即为改良的牌的种数
Improves Improves
// 改良情况数,这里计算的是有多少种使进张增加的切牌方式
ImproveWayCount int
// 在没有摸到进张时的改良后进张数的加权均值(计算时,对于既不是进张也不是改良的牌,其进张数为 Waits.AllCount())
// 这里只考虑一巡的改良均值
// TODO: 在考虑改良的情况下,如何计算向听前进所需要的摸牌次数的期望值?
AvgImproveWaitsCount float64
// 向听前进后,若听牌,其最大和率的加权均值
// 若已听牌,则该值为当前手牌和率
AvgAgariRate float64
// 振听可能率(一向听和听牌时)
FuritenRate float64
// 役种
YakuTypes []int
// 宝牌个数(手牌+副露)
DoraCount int
// 无立直时的荣和打点期望
RonPoint float64
// 立直时的荣和打点期望
RiichiRonPoint float64
// 自摸打点期望
TsumoPoint float64
// TODO: 赤牌改良提醒
}
// 进张和向听前进后进张的评分
// 这里粗略地近似为向听前进两次的概率
func (r *WaitsWithImproves13) mixedWaitsScore() float64 {
if r.Waits.AllCount() == 0 || r.AvgNextShantenWaitsCount == 0 {
return 0
}
leftCount := float64(CountOfTiles34(r.LeftTiles34))
p2 := float64(r.Waits.AllCount()) / leftCount
//p2 := r.AvgImproveWaitsCount / leftCount
p1 := r.AvgNextShantenWaitsCount / leftCount
//if r.AvgAgariRate > 0 {
// p1 = r.AvgAgariRate / 100
//}
p2_, p1_ := 1-p2, 1-p1
const leftTurns = 10.0 // math.Max(5.0, leftCount/4)
sumP2 := p2_ * (1 - math.Pow(p2_, leftTurns)) / p2
sumP1 := p1_ * (1 - math.Pow(p1_, leftTurns)) / p1
result := p2 * p1 * (sumP2 - sumP1) / (p2_ - p1_)
return result * 100
}
// 调试用
func (r *WaitsWithImproves13) String() string {
s := fmt.Sprintf("%d 进张 %s\n%.2f 改良进张 [%d(%d) 种]",
r.Waits.AllCount(),
//r.Waits.AllCount()+r.MeldWaits.AllCount(),
TilesToStrWithBracket(r.Waits.indexes()),
r.AvgImproveWaitsCount,
len(r.Improves),
r.ImproveWayCount,
)
if r.Shanten >= 1 {
mixedScore := r.MixedWaitsScore
//for i := 2; i <= r.Shanten; i++ {
// mixedScore /= 4
//}
s += fmt.Sprintf(" %.2f %s进张(%.2f 综合分)",
r.AvgNextShantenWaitsCount,
NumberToChineseShanten(r.Shanten-1),
mixedScore,
)
}
if r.Shanten >= 0 && r.Shanten <= 1 {
s += fmt.Sprintf("(%.2f%% 参考和率)", r.AvgAgariRate)
if r.FuritenRate > 0 {
if r.FuritenRate < 1 {
s += "[可能振听]"
} else {
s += "[振听]"
}
}
s += YakuTypesWithDoraToStr(r.YakuTypes, r.DoraCount)
}
if r.RonPoint > 0 {
s += fmt.Sprintf("[(默听)荣和%d]", int(math.Round(r.RonPoint)))
}
if r.RiichiRonPoint > 0 {
s += fmt.Sprintf("[立直荣和%d]", int(math.Round(r.RiichiRonPoint)))
}
if r.TsumoPoint > 0 {
s += fmt.Sprintf("[自摸%d]", int(math.Round(r.TsumoPoint)))
}
return s
}
// 1/4/7/10/13 张牌,计算向听数、进张(考虑了剩余枚数)
func CalculateShantenAndWaits13(tiles34 []int, leftTiles34 []int) (shanten int, waits Waits) {
if len(leftTiles34) == 0 {
leftTiles34 = InitLeftTiles34WithTiles34(tiles34)
}
shanten = CalculateShanten(tiles34)
// 剪枝:检测非浮牌,在不考虑国士无双的情况下,这种牌是不可能让向听数前进的(但有改良的可能,不过 CalculateShantenAndWaits13 函数不考虑这个)
// 此处优化提升了约 30% 的性能
//needCheck34 := make([]bool, 34)
//idx := -1
//for i := 0; i < 3; i++ {
// for j := 0; j < 9; j++ {
// idx++
// if tiles34[idx] == 0 {
// continue
// }
// if j == 0 {
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 1 {
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j < 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// } else {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// }
// }
//}
//for i := 27; i < 34; i++ {
// if tiles34[i] > 0 {
// needCheck34[i] = true
// }
//}
waits = Waits{}
for i := 0; i < 34; i++ {
//if !needCheck34[i] {
// continue
//}
if tiles34[i] == 4 {
// 无法摸到这张牌
continue
}
// 摸牌
tiles34[i]++
if newShanten := CalculateShanten(tiles34); newShanten < shanten {
// 向听前进了,则换的这张牌为进张,进张数即剩余枚数
// 有可能为 0,但这对于判断振听是有帮助的,所以记录
waits[i] = leftTiles34[i]
}
tiles34[i]--
}
return
}
// 1/4/7/10/13 张牌,计算向听数、进张、改良等(考虑了剩余枚数)
func CalculateShantenWithImproves13(playerInfo *model.PlayerInfo) (r *WaitsWithImproves13) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
leftTiles34 := playerInfo.LeftTiles34
shanten13, waits := CalculateShantenAndWaits13(tiles34, leftTiles34)
waitsCount := waits.AllCount()
nextShantenWaitsCountMap := map[int]int{} // map[进张牌]听多少张牌
improves := Improves{}
improveWayCount := 0
// 对于每张牌,摸到之后的手牌进张数(如果摸到的是 waits 中的牌,则进张数视作 waitsCount)
maxImproveWaitsCount34 := make([]int, 34)
for i := 0; i < 34; i++ {
maxImproveWaitsCount34[i] = waitsCount // 初始化成基本进张
}
avgAgariRate := 0.0
avgRonPoint := 0.0
ronPointWeight := 0
avgRiichiRonPoint := 0.0
canYaku := make([]bool, maxYakuType)
if len(playerInfo.Melds) == 0 && CountPairsOfTiles34(tiles34)+shanten13 == 6 {
// 对于三向听,除非进张很差才会考虑七对子
if shanten13 == 3 {
if waitsCount <= 21 {
canYaku[YakuChiitoi] = true
}
} else if shanten13 == 1 || shanten13 == 2 {
// 一向听和两向听考虑七对子
canYaku[YakuChiitoi] = true
}
}
fillYakuTypes := func(_shanten13 int, _waits Waits) {
if _shanten13 != 0 {
return
}
// 听牌
for tile, left := range _waits {
if left == 0 {
continue
}
tiles34[tile]++
playerInfo.WinTile = tile
_yakuTypes := FindAllYakuTypes(playerInfo)
for _, t := range _yakuTypes {
canYaku[t] = true
}
tiles34[tile]--
}
}
// 计算可能的役种
fillYakuTypes(shanten13, waits)
for i := 0; i < 34; i++ {
// 从剩余牌中摸牌
if leftTiles34[i] == 0 {
continue
}
leftTiles34[i]--
tiles34[i]++
if _, ok := waits[i]; ok { // 摸到的是进张
maxAgariRate := 0.0 // 摸到此进张后的和率
maxAvgRonPoint := 0.0 // 平均打点
maxAvgRiichiRonPoint := 0.0
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 向听前进才是正确的切牌
if newShanten13, newWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 < shanten13 {
// 切牌一般切进张最多的
if waitsCount := newWaits.AllCount(); waitsCount > nextShantenWaitsCountMap[i] {
nextShantenWaitsCountMap[i] = waitsCount
}
// 听牌了
if newShanten13 == 0 {
// 听牌一般切和率最高的,TODO: 除非打点更高,比如说听到 dora 上,或者有三色等
_agariRate := CalculateAvgAgariRate(newWaits, playerInfo.DiscardTiles)
if _agariRate >= maxAgariRate {
maxAgariRate = _agariRate
// 计算荣和点数
// TODO: 这里简化了,和率优先,需要更加精细的考量
// TODO: maxAvgRonPoint = CalcAvgRonPoint(playerInfo, newWaits)
// TODO: maxAvgRiichiRonPoint = CalcAvgRiichiRonPoint(playerInfo, newWaits)
}
// 计算可能的役种
fillYakuTypes(newShanten13, newWaits)
}
}
playerInfo.UndoDiscardTile(j, _isRedFive)
}
// 加权:进张牌的剩余枚数*和率
w := leftTiles34[i] + 1
avgAgariRate += maxAgariRate * float64(w)
if maxAvgRonPoint > 0 {
avgRonPoint += maxAvgRonPoint * float64(w)
ronPointWeight += w
}
//fmt.Println(i, maxAvgRiichiRonPoint)
avgRiichiRonPoint += maxAvgRiichiRonPoint * float64(w)
} else { // 摸到的不是进张,但可能有改良
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 正确的切牌
if newShanten13, improveWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 == shanten13 {
// 若进张数变多,则为改良
if improveWaitsCount := improveWaits.AllCount(); improveWaitsCount > waitsCount {
improveWayCount++
if improveWaitsCount > maxImproveWaitsCount34[i] {
maxImproveWaitsCount34[i] = improveWaitsCount
// improves 选的是进张数最大的改良
improves[i] = improveWaits
}
//fmt.Println(fmt.Sprintf(" 摸 %s 切 %s 改良:", MahjongZH[i], MahjongZH[j]), improveWaitsCount, TilesToStrWithBracket(improveWaits.indexes()))
}
}
playerInfo.UndoDiscardTile(j, _isRedFive)
}
}
tiles34[i]--
leftTiles34[i]++
}
if waitsCount > 0 {
avgAgariRate /= float64(waitsCount)
if ronPointWeight > 0 {
avgRonPoint /= float64(ronPointWeight)
}
avgRiichiRonPoint /= float64(waitsCount)
if shanten13 == 0 {
avgAgariRate = CalculateAvgAgariRate(waits, playerInfo.DiscardTiles)
avgRonPoint = CalcAvgRonPoint(playerInfo, waits)
avgRiichiRonPoint = CalcAvgRiichiRonPoint(playerInfo, waits)
}
}
yakuTypes := []int{}
for yakuType, can := range canYaku {
if can {
yakuTypes = append(yakuTypes, yakuType)
}
}
if len(yakuTypes) == 0 {
// 无役,若能立直则立直
if shanten13 == 0 && !playerInfo.IsNaki() {
savedIsRiichi := playerInfo.IsRiichi
playerInfo.IsRiichi = true
defer func() { playerInfo.IsRiichi = savedIsRiichi }()
yakuTypes = append(yakuTypes, YakuRiichi)
}
}
_tiles34 := make([]int, 34)
copy(_tiles34, tiles34)
r = &WaitsWithImproves13{
Tiles34: _tiles34,
LeftTiles34: leftTiles34,
IsNaki: playerInfo.IsNaki(),
Shanten: shanten13,
Waits: waits,
NextShantenWaitsCountMap: nextShantenWaitsCountMap,
Improves: improves,
ImproveWayCount: improveWayCount,
AvgImproveWaitsCount: float64(waitsCount),
AvgAgariRate: avgAgariRate,
YakuTypes: yakuTypes,
DoraCount: playerInfo.CountDora(),
}
// 对于听牌及一向听,判断是否有振听可能
if shanten13 <= 1 {
for _, discardTile := range playerInfo.DiscardTiles {
if _, ok := waits[discardTile]; ok {
r.FuritenRate = 0.5 // TODO: 待完善
if shanten13 == 0 {
// 听牌时,若听的牌在舍牌中,则构成振听
r.FuritenRate = 1
// 修正振听时的和率
r.AvgAgariRate *= FuritenAgariMulti
}
}
}
}
// 非振听时计算荣和点数
if r.FuritenRate < 1 && shanten13 <= 1 {
r.RonPoint = avgRonPoint
if !playerInfo.IsNaki() {
r.RiichiRonPoint = avgRiichiRonPoint
}
}
// TODO: 自摸点数
// 分析
if len(nextShantenWaitsCountMap) > 0 {
nextShantenWaitsSum := 0
weight := 0
for tile, c := range nextShantenWaitsCountMap {
w := leftTiles34[tile]
nextShantenWaitsSum += w * c
weight += w
}
r.AvgNextShantenWaitsCount = float64(nextShantenWaitsSum) / float64(weight)
}
if len(improves) > 0 {
improveWaitsSum := 0
weight := 0
for i := 0; i < 34; i++ {
w := leftTiles34[i]
improveWaitsSum += w * maxImproveWaitsCount34[i]
weight += w
}
r.AvgImproveWaitsCount = float64(improveWaitsSum) / float64(weight)
}
r.MixedWaitsScore = r.mixedWaitsScore()
return
}
type WaitsWithImproves14 struct {
// 需要切的牌
DiscardTile int
// 切牌后的手牌分析结果
Result13 *WaitsWithImproves13
// 副露信息(没有副露就是 nil)
// 比如用 23m 吃了牌,OpenTiles 就是 [1,2]
OpenTiles []int
}
func (r *WaitsWithImproves14) String() string {
meldInfo := ""
if len(r.OpenTiles) > 0 {
meldType := "吃"
if r.OpenTiles[0] == r.OpenTiles[1] {
meldType = "碰"
}
meldInfo = fmt.Sprintf("用 %s%s %s,", string([]rune(MahjongZH[r.OpenTiles[0]])[:1]), MahjongZH[r.OpenTiles[1]], meldType)
}
return meldInfo + fmt.Sprintf("切 %s: %s", MahjongZH[r.DiscardTile], r.Result13.String())
}
type WaitsWithImproves14List []*WaitsWithImproves14
// 排序,若 needImprove 为 true,则优先按照 AvgImproveWaitsCount 排序
func (l WaitsWithImproves14List) Sort(needImprove bool) {
sort.Slice(l, func(i, j int) bool {
ri, rj := l[i].Result13, l[j].Result13
// 听牌的话和率优先
// TODO: 考虑打点
if l[0].Result13.Shanten == 0 {
if !Equal(ri.AvgAgariRate, rj.AvgAgariRate) {
return ri.AvgAgariRate > rj.AvgAgariRate
}
}
if needImprove {
if !Equal(ri.AvgImproveWaitsCount, rj.AvgImproveWaitsCount) {
return ri.AvgImproveWaitsCount > rj.AvgImproveWaitsCount
}
}
// 排序规则:综合评分 - 进张 - 前进后的进张 - 和率 - 改良 - 好牌先走
// 必须注意到的一点是,随着游戏的进行,进张会被他家打出,所以进张是有减少的趋势的
// 对于一向听,考虑到未听牌之前要听的牌会被他家打出而造成听牌时的枚数降低,所以听牌枚数比和率更重要
// 对比当前进张与前进后的进张,在二者乘积相近的情况下(注意这个前提),由于进张越大听牌速度越快,听牌时的进张数也就越接近预期进张数,所以进张越多越好(再次强调是在二者乘积相近的情况下)
if !Equal(ri.MixedWaitsScore, rj.MixedWaitsScore) {
return ri.MixedWaitsScore > rj.MixedWaitsScore
}
riWaitsCount, rjWaitsCount := ri.Waits.AllCount(), rj.Waits.AllCount()
if riWaitsCount != rjWaitsCount {
return riWaitsCount > rjWaitsCount
}
if !Equal(ri.AvgNextShantenWaitsCount, rj.AvgNextShantenWaitsCount) {
return ri.AvgNextShantenWaitsCount > rj.AvgNextShantenWaitsCount
}
// shanten == 1
if !Equal(ri.AvgAgariRate, rj.AvgAgariRate) {
return ri.AvgAgariRate > rj.AvgAgariRate
}
if !Equal(ri.AvgImproveWaitsCount, rj.AvgImproveWaitsCount) {
return ri.AvgImproveWaitsCount > rj.AvgImproveWaitsCount
}
idxI, idxJ := l[i].DiscardTile, l[j].DiscardTile
if idxI >= 27 && idxJ >= 27 {
// TODO 场风不为自风时:下家风 > 对家风 > 上家风 > 场风
}
// 好牌先走
if idxI < 27 && idxJ < 27 {
idxI %= 9
if idxI > 4 {
idxI = 8 - idxI
}
idxJ %= 9
if idxJ > 4 {
idxJ = 8 - idxJ
}
return idxI > idxJ
}
return idxI < idxJ
//// 改良种类、方式多的优先
//if len(ri.Improves) != len(rj.Improves) {
// return len(ri.Improves) > len(rj.Improves)
//}
//if ri.ImproveWayCount != rj.ImproveWayCount {
// return ri.ImproveWayCount > rj.ImproveWayCount
//}
})
}
func (l *WaitsWithImproves14List) filterOutDiscard(cantDiscardTile int) {
newResults := WaitsWithImproves14List{}
for _, r := range *l {
if r.DiscardTile != cantDiscardTile {
newResults = append(newResults, r)
}
}
*l = newResults
}
func (l WaitsWithImproves14List) addOpenTile(openTiles []int) {
for _, r := range l {
r.OpenTiles = openTiles
}
}
// 2/5/8/11/14 张牌,计算向听数、进张、改良、向听倒退等
func CalculateShantenWithImproves14(playerInfo *model.PlayerInfo) (shanten int, waitsWithImproves WaitsWithImproves14List, incShantenResults WaitsWithImproves14List) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
shanten = CalculateShanten(tiles34)
for i := 0; i < 34; i++ {
if tiles34[i] == 0 {
continue
}
isRedFive := playerInfo.IsOnlyRedFive(i)
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
playerInfo.DiscardTile(i, isRedFive)
result13 := CalculateShantenWithImproves13(playerInfo)
// 记录切牌后的分析结果
r := &WaitsWithImproves14{
DiscardTile: i,
Result13: result13,
}
if result13.Shanten == shanten {
waitsWithImproves = append(waitsWithImproves, r)
} else {
// 向听倒退
incShantenResults = append(incShantenResults, r)
}
playerInfo.UndoDiscardTile(i, isRedFive)
}
needImprove := func(l []*WaitsWithImproves14) bool {
if len(l) == 0 {
return false
}
shanten := l[0].Result13.Shanten
// 一向听及以下进张优先,改良其次
if shanten <= 1 {
return false
}
maxWaitsCount := 0
for _, r := range l {
maxWaitsCount = MaxInt(maxWaitsCount, r.Result13.Waits.AllCount())
}
// 两向听及以上的七对子考虑改良
return maxWaitsCount <= 6*shanten+3
}
ni := needImprove(waitsWithImproves)
waitsWithImproves.Sort(ni)
ni = needImprove(incShantenResults)
incShantenResults.Sort(ni)
return
}
// 计算最小向听数,鸣牌方式
func calculateMeldShanten(tiles34 []int, calledTile int, isRedFive bool, allowChi bool) (minShanten int, meldCombinations []model.Meld) {
// 是否能碰
if tiles34[calledTile] >= 2 {
meldCombinations = append(meldCombinations, model.Meld{
MeldType: model.MeldTypePon,
Tiles: []int{calledTile, calledTile, calledTile},
SelfTiles: []int{calledTile, calledTile},
CalledTile: calledTile,
RedFiveFromOthers: isRedFive,
})
}
// 是否能吃
if allowChi && calledTile < 27 {
checkChi := func(tileA, tileB int) {
if tiles34[tileA] > 0 && tiles34[tileB] > 0 {
_tiles := []int{tileA, tileB, calledTile}
sort.Ints(_tiles)
meldCombinations = append(meldCombinations, model.Meld{
MeldType: model.MeldTypeChi,
Tiles: _tiles,
SelfTiles: []int{tileA, tileB},
CalledTile: calledTile,
RedFiveFromOthers: isRedFive,
})
}
}
t9 := calledTile % 9
if t9 >= 2 {
checkChi(calledTile-2, calledTile-1)
}
if t9 >= 1 && t9 <= 7 {
checkChi(calledTile-1, calledTile+1)
}
if t9 <= 6 {
checkChi(calledTile+1, calledTile+2)
}
}
// 计算所有鸣牌下的最小向听数
minShanten = 99
for _, c := range meldCombinations {
tiles34[c.SelfTiles[0]]--
tiles34[c.SelfTiles[1]]--
minShanten = MinInt(minShanten, CalculateShanten(tiles34))
tiles34[c.SelfTiles[0]]++
tiles34[c.SelfTiles[1]]++
}
return
}
// TODO 鸣牌的情况判断(待重构)
// 编程时注意他家切掉的这张牌是否算到剩余数中
//if isOpen {
//if newShanten, combinations, shantens := calculateMeldShanten(tiles34, i, true); newShanten < shanten {
// // 向听前进了,说明鸣牌成功,则换的这张牌为鸣牌进张
// // 计算进张数:若能碰则 =剩余数*3,否则 =剩余数
// meldWaits[i] = leftTile - tiles34[i]
// for i, comb := range combinations {
// if comb[0] == comb[1] && shantens[i] == newShanten {
// meldWaits[i] *= 3
// break
// }
// }
//}
//}
// 计算鸣牌下的何切分析
// calledTile 他家出的牌,尝试鸣这张牌
// isRedFive 这张牌是否为赤5
// allowChi 是否允许吃这张牌
func CalculateMeld(playerInfo *model.PlayerInfo, calledTile int, isRedFive bool, allowChi bool) (minShanten int, waitsWithImproves WaitsWithImproves14List, incShantenResults WaitsWithImproves14List) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
minShanten, meldCombinations := calculateMeldShanten(playerInfo.HandTiles34, calledTile, isRedFive, allowChi)
for _, c := range meldCombinations {
// 尝试鸣这张牌
playerInfo.AddMeld(c)
_shanten, _waitsWithImproves, _incShantenResults := CalculateShantenWithImproves14(playerInfo)
playerInfo.UndoAddMeld()
// 去掉现物食替的情况
_waitsWithImproves.filterOutDiscard(calledTile)
_incShantenResults.filterOutDiscard(calledTile)
// 去掉筋食替的情况
if c.MeldType == model.MeldTypeChi {
cannotDiscardTile := -1
if c.SelfTiles[0] < calledTile && c.SelfTiles[1] < calledTile && calledTile%9 >= 3 {
cannotDiscardTile = calledTile - 3
} else if c.SelfTiles[0] > calledTile && c.SelfTiles[1] > calledTile && calledTile%9 <= 5 {
cannotDiscardTile = calledTile + 3
}
if cannotDiscardTile != -1 {
_waitsWithImproves.filterOutDiscard(cannotDiscardTile)
_incShantenResults.filterOutDiscard(cannotDiscardTile)
}
}
// 添加副露信息,用于输出
_waitsWithImproves.addOpenTile(c.SelfTiles)
_incShantenResults.addOpenTile(c.SelfTiles)
// 整理副露结果
if _shanten == minShanten {
waitsWithImproves = append(waitsWithImproves, _waitsWithImproves...)
incShantenResults = append(incShantenResults, _incShantenResults...)
} else if _shanten == minShanten+1 {
incShantenResults = append(incShantenResults, _waitsWithImproves...)
}
}
waitsWithImproves.Sort(false)
incShantenResults.Sort(false)
return
} | // 若某个进张牌 4 枚都可见,则该进张的 value 值为 0
Waits Waits
// TODO: 鸣牌进张:他家打出这张牌,可以鸣牌,且能让向听数前进
//MeldWaits Waits | random_line_split |
shanten_improve.go | package util
import (
"fmt"
"sort"
"math"
"github.com/EndlessCheng/mahjong-helper/util/model"
)
// map[改良牌]进张(选择进张数最大的)
type Improves map[int]Waits
// 1/4/7/10/13 张手牌的分析结果
type WaitsWithImproves13 struct {
// 原手牌
Tiles34 []int
// 剩余牌
LeftTiles34 []int
// 是否已鸣牌
IsNaki bool
// 向听数
Shanten int
// 进张
// 考虑了剩余枚数
// 若某个进张牌 4 枚都可见,则该进张的 value 值为 0
Waits Waits
// TODO: 鸣牌进张:他家打出这张牌,可以鸣牌,且能让向听数前进
//MeldWaits Waits
// map[进张牌]向听前进后的进张数(这里让向听前进的切牌选择的是使「向听前进后的进张数最大」的切牌)
NextShantenWaitsCountMap map[int]int
// 向听前进后的进张数的加权均值
AvgNextShantenWaitsCount float64
// 综合了进张与向听前进后进张的评分
MixedWaitsScore float64
// 改良:摸到这张牌虽不能让向听数前进,但可以让进张变多
// len(Improves) 即为改良的牌的种数
Improves Improves
// 改良情况数,这里计算的是有多少种使进张增加的切牌方式
ImproveWayCount int
// 在没有摸到进张时的改良后进张数的加权均值(计算时,对于既不是进张也不是改良的牌,其进张数为 Waits.AllCount())
// 这里只考虑一巡的改良均值
// TODO: 在考虑改良的情况下,如何计算向听前进所需要的摸牌次数的期望值?
AvgImproveWaitsCount float64
// 向听前进后,若听牌,其最大和率的加权均值
// 若已听牌,则该值为当前手牌和率
AvgAgariRate float64
// 振听可能率(一向听和听牌时)
FuritenRate float64
// 役种
YakuTypes []int
// 宝牌个数(手牌+副露)
DoraCount int
// 无立直时的荣和打点期望
RonPoint float64
// 立直时的荣和打点期望
RiichiRonPoint float64
// 自摸打点期望
TsumoPoint float64
// TODO: 赤牌改良提醒
}
// 进张和向听前进后进张的评分
// 这里粗略地近似为向听前进两次的概率
func (r *WaitsWithImproves13) mixedWaitsScore() float64 {
if r.Waits.AllCount() == 0 || r.AvgNextShantenWaitsCount == 0 {
return 0
}
leftCount := float64(CountOfTiles34(r.LeftTiles34))
p2 := float64(r.Waits.AllCount()) / leftCount
//p2 := r.AvgImproveWaitsCount / leftCount
p1 := r.AvgNextShantenWaitsCount / leftCount
//if r.AvgAgariRate > 0 {
// p1 = r.AvgAgariRate / 100
//}
p2_, p1_ := 1-p2, 1-p1
const leftTurns = 10.0 // math.Max(5.0, leftCount/4)
sumP2 := p2_ * (1 - math.Pow(p2_, leftTurns)) / p2
sumP1 := p1_ * (1 - math.Pow(p1_, leftTurns)) / p1
result := p2 * p1 * (sumP2 - sumP1) / (p2_ - p1_)
return result * 100
}
// 调试用
func (r *WaitsWithImproves13) String() string {
s := fmt.Sprintf("%d 进张 %s\n%.2f 改良进张 [%d(%d) 种]",
r.Waits.AllCount(),
//r.Waits.AllCount()+r.MeldWaits.AllCount(),
TilesToStrWithBracket(r.Waits.indexes()),
r.AvgImproveWaitsCount,
len(r.Improves),
r.ImproveWayCount,
)
if r.Shanten >= 1 {
mixedScore := r.MixedWaitsScore
//for i := 2; i <= r.Shanten; i++ {
// mixedScore /= 4
//}
s += fmt.Sprintf(" %.2f %s进张(%.2f 综合分)",
r.AvgNextShantenWaitsCount,
NumberToChineseShanten(r.Shanten-1),
mixedScore,
)
}
if r.Shanten >= 0 && r.Shanten <= 1 {
s += fmt.Sprintf("(%.2f%% 参考和率)", r.AvgAgariRate)
if r.FuritenRate > 0 {
if r.FuritenRate < 1 {
s += "[可能振听]"
} else {
s += "[振听]"
}
}
s += YakuTypesWithDoraToStr(r.YakuTypes, r.DoraCount)
}
if r.RonPoint > 0 {
s += fmt.Sprintf("[(默听)荣和%d]", int(math.Round(r.RonPoint)))
}
if r.RiichiRonPoint > 0 {
s += fmt.Sprintf("[立直荣和%d]", int(math.Round(r.RiichiRonPoint)))
}
if r.TsumoPoint > 0 {
s += fmt.Sprintf("[自摸%d]", int(math.Round(r.TsumoPoint)))
}
return s
}
// 1/4/7/10/13 张牌,计算向听数、进张(考虑了剩余枚数)
func CalculateShantenAndWaits13(tiles34 []int, leftTiles34 []int) (shanten int, waits Waits) {
if len(leftTiles34) == 0 {
leftTiles34 = InitLeftTiles34WithTiles34(tiles34)
}
shanten = CalculateShanten(tiles34)
// 剪枝:检测非浮牌,在不考虑国士无双的情况下,这种牌是不可能让向听数前进的(但有改良的可能,不过 CalculateShantenAndWaits13 函数不考虑这个)
// 此处优化提升了约 30% 的性能
//needCheck34 := make([]bool, 34)
//idx := -1
//for i := 0; i < 3; i++ {
// for j := 0; j < 9; j++ {
// idx++
// if tiles34[idx] == 0 {
// continue
// }
// if j == 0 {
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 1 {
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j < 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 7 {
// needCheck34[idx-2] = true
// needC | needCheck34[idx] = true
// needCheck34[idx+1] = true
// } else {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// }
// }
//}
//for i := 27; i < 34; i++ {
// if tiles34[i] > 0 {
// needCheck34[i] = true
// }
//}
waits = Waits{}
for i := 0; i < 34; i++ {
//if !needCheck34[i] {
// continue
//}
if tiles34[i] == 4 {
// 无法摸到这张牌
continue
}
// 摸牌
tiles34[i]++
if newShanten := CalculateShanten(tiles34); newShanten < shanten {
// 向听前进了,则换的这张牌为进张,进张数即剩余枚数
// 有可能为 0,但这对于判断振听是有帮助的,所以记录
waits[i] = leftTiles34[i]
}
tiles34[i]--
}
return
}
// 1/4/7/10/13 张牌,计算向听数、进张、改良等(考虑了剩余枚数)
func CalculateShantenWithImproves13(playerInfo *model.PlayerInfo) (r *WaitsWithImproves13) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
leftTiles34 := playerInfo.LeftTiles34
shanten13, waits := CalculateShantenAndWaits13(tiles34, leftTiles34)
waitsCount := waits.AllCount()
nextShantenWaitsCountMap := map[int]int{} // map[进张牌]听多少张牌
improves := Improves{}
improveWayCount := 0
// 对于每张牌,摸到之后的手牌进张数(如果摸到的是 waits 中的牌,则进张数视作 waitsCount)
maxImproveWaitsCount34 := make([]int, 34)
for i := 0; i < 34; i++ {
maxImproveWaitsCount34[i] = waitsCount // 初始化成基本进张
}
avgAgariRate := 0.0
avgRonPoint := 0.0
ronPointWeight := 0
avgRiichiRonPoint := 0.0
canYaku := make([]bool, maxYakuType)
if len(playerInfo.Melds) == 0 && CountPairsOfTiles34(tiles34)+shanten13 == 6 {
// 对于三向听,除非进张很差才会考虑七对子
if shanten13 == 3 {
if waitsCount <= 21 {
canYaku[YakuChiitoi] = true
}
} else if shanten13 == 1 || shanten13 == 2 {
// 一向听和两向听考虑七对子
canYaku[YakuChiitoi] = true
}
}
fillYakuTypes := func(_shanten13 int, _waits Waits) {
if _shanten13 != 0 {
return
}
// 听牌
for tile, left := range _waits {
if left == 0 {
continue
}
tiles34[tile]++
playerInfo.WinTile = tile
_yakuTypes := FindAllYakuTypes(playerInfo)
for _, t := range _yakuTypes {
canYaku[t] = true
}
tiles34[tile]--
}
}
// 计算可能的役种
fillYakuTypes(shanten13, waits)
for i := 0; i < 34; i++ {
// 从剩余牌中摸牌
if leftTiles34[i] == 0 {
continue
}
leftTiles34[i]--
tiles34[i]++
if _, ok := waits[i]; ok { // 摸到的是进张
maxAgariRate := 0.0 // 摸到此进张后的和率
maxAvgRonPoint := 0.0 // 平均打点
maxAvgRiichiRonPoint := 0.0
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 向听前进才是正确的切牌
if newShanten13, newWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 < shanten13 {
// 切牌一般切进张最多的
if waitsCount := newWaits.AllCount(); waitsCount > nextShantenWaitsCountMap[i] {
nextShantenWaitsCountMap[i] = waitsCount
}
// 听牌了
if newShanten13 == 0 {
// 听牌一般切和率最高的,TODO: 除非打点更高,比如说听到 dora 上,或者有三色等
_agariRate := CalculateAvgAgariRate(newWaits, playerInfo.DiscardTiles)
if _agariRate >= maxAgariRate {
maxAgariRate = _agariRate
// 计算荣和点数
// TODO: 这里简化了,和率优先,需要更加精细的考量
// TODO: maxAvgRonPoint = CalcAvgRonPoint(playerInfo, newWaits)
// TODO: maxAvgRiichiRonPoint = CalcAvgRiichiRonPoint(playerInfo, newWaits)
}
// 计算可能的役种
fillYakuTypes(newShanten13, newWaits)
}
}
playerInfo.UndoDiscardTile(j, _isRedFive)
}
// 加权:进张牌的剩余枚数*和率
w := leftTiles34[i] + 1
avgAgariRate += maxAgariRate * float64(w)
if maxAvgRonPoint > 0 {
avgRonPoint += maxAvgRonPoint * float64(w)
ronPointWeight += w
}
//fmt.Println(i, maxAvgRiichiRonPoint)
avgRiichiRonPoint += maxAvgRiichiRonPoint * float64(w)
} else { // 摸到的不是进张,但可能有改良
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 正确的切牌
if newShanten13, improveWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 == shanten13 {
// 若进张数变多,则为改良
if improveWaitsCount := improveWaits.AllCount(); improveWaitsCount > waitsCount {
improveWayCount++
if improveWaitsCount > maxImproveWaitsCount34[i] {
maxImproveWaitsCount34[i] = improveWaitsCount
// improves 选的是进张数最大的改良
improves[i] = improveWaits
}
//fmt.Println(fmt.Sprintf(" 摸 %s 切 %s 改良:", MahjongZH[i], MahjongZH[j]), improveWaitsCount, TilesToStrWithBracket(improveWaits.indexes()))
}
}
playerInfo.UndoDiscardTile(j, _isRedFive)
}
}
tiles34[i]--
leftTiles34[i]++
}
if waitsCount > 0 {
avgAgariRate /= float64(waitsCount)
if ronPointWeight > 0 {
avgRonPoint /= float64(ronPointWeight)
}
avgRiichiRonPoint /= float64(waitsCount)
if shanten13 == 0 {
avgAgariRate = CalculateAvgAgariRate(waits, playerInfo.DiscardTiles)
avgRonPoint = CalcAvgRonPoint(playerInfo, waits)
avgRiichiRonPoint = CalcAvgRiichiRonPoint(playerInfo, waits)
}
}
yakuTypes := []int{}
for yakuType, can := range canYaku {
if can {
yakuTypes = append(yakuTypes, yakuType)
}
}
if len(yakuTypes) == 0 {
// 无役,若能立直则立直
if shanten13 == 0 && !playerInfo.IsNaki() {
savedIsRiichi := playerInfo.IsRiichi
playerInfo.IsRiichi = true
defer func() { playerInfo.IsRiichi = savedIsRiichi }()
yakuTypes = append(yakuTypes, YakuRiichi)
}
}
_tiles34 := make([]int, 34)
copy(_tiles34, tiles34)
r = &WaitsWithImproves13{
Tiles34: _tiles34,
LeftTiles34: leftTiles34,
IsNaki: playerInfo.IsNaki(),
Shanten: shanten13,
Waits: waits,
NextShantenWaitsCountMap: nextShantenWaitsCountMap,
Improves: improves,
ImproveWayCount: improveWayCount,
AvgImproveWaitsCount: float64(waitsCount),
AvgAgariRate: avgAgariRate,
YakuTypes: yakuTypes,
DoraCount: playerInfo.CountDora(),
}
// 对于听牌及一向听,判断是否有振听可能
if shanten13 <= 1 {
for _, discardTile := range playerInfo.DiscardTiles {
if _, ok := waits[discardTile]; ok {
r.FuritenRate = 0.5 // TODO: 待完善
if shanten13 == 0 {
// 听牌时,若听的牌在舍牌中,则构成振听
r.FuritenRate = 1
// 修正振听时的和率
r.AvgAgariRate *= FuritenAgariMulti
}
}
}
}
// 非振听时计算荣和点数
if r.FuritenRate < 1 && shanten13 <= 1 {
r.RonPoint = avgRonPoint
if !playerInfo.IsNaki() {
r.RiichiRonPoint = avgRiichiRonPoint
}
}
// TODO: 自摸点数
// 分析
if len(nextShantenWaitsCountMap) > 0 {
nextShantenWaitsSum := 0
weight := 0
for tile, c := range nextShantenWaitsCountMap {
w := leftTiles34[tile]
nextShantenWaitsSum += w * c
weight += w
}
r.AvgNextShantenWaitsCount = float64(nextShantenWaitsSum) / float64(weight)
}
if len(improves) > 0 {
improveWaitsSum := 0
weight := 0
for i := 0; i < 34; i++ {
w := leftTiles34[i]
improveWaitsSum += w * maxImproveWaitsCount34[i]
weight += w
}
r.AvgImproveWaitsCount = float64(improveWaitsSum) / float64(weight)
}
r.MixedWaitsScore = r.mixedWaitsScore()
return
}
type WaitsWithImproves14 struct {
// 需要切的牌
DiscardTile int
// 切牌后的手牌分析结果
Result13 *WaitsWithImproves13
// 副露信息(没有副露就是 nil)
// 比如用 23m 吃了牌,OpenTiles 就是 [1,2]
OpenTiles []int
}
func (r *WaitsWithImproves14) String() string {
meldInfo := ""
if len(r.OpenTiles) > 0 {
meldType := "吃"
if r.OpenTiles[0] == r.OpenTiles[1] {
meldType = "碰"
}
meldInfo = fmt.Sprintf("用 %s%s %s,", string([]rune(MahjongZH[r.OpenTiles[0]])[:1]), MahjongZH[r.OpenTiles[1]], meldType)
}
return meldInfo + fmt.Sprintf("切 %s: %s", MahjongZH[r.DiscardTile], r.Result13.String())
}
type WaitsWithImproves14List []*WaitsWithImproves14
// 排序,若 needImprove 为 true,则优先按照 AvgImproveWaitsCount 排序
func (l WaitsWithImproves14List) Sort(needImprove bool) {
sort.Slice(l, func(i, j int) bool {
ri, rj := l[i].Result13, l[j].Result13
// 听牌的话和率优先
// TODO: 考虑打点
if l[0].Result13.Shanten == 0 {
if !Equal(ri.AvgAgariRate, rj.AvgAgariRate) {
return ri.AvgAgariRate > rj.AvgAgariRate
}
}
if needImprove {
if !Equal(ri.AvgImproveWaitsCount, rj.AvgImproveWaitsCount) {
return ri.AvgImproveWaitsCount > rj.AvgImproveWaitsCount
}
}
// 排序规则:综合评分 - 进张 - 前进后的进张 - 和率 - 改良 - 好牌先走
// 必须注意到的一点是,随着游戏的进行,进张会被他家打出,所以进张是有减少的趋势的
// 对于一向听,考虑到未听牌之前要听的牌会被他家打出而造成听牌时的枚数降低,所以听牌枚数比和率更重要
// 对比当前进张与前进后的进张,在二者乘积相近的情况下(注意这个前提),由于进张越大听牌速度越快,听牌时的进张数也就越接近预期进张数,所以进张越多越好(再次强调是在二者乘积相近的情况下)
if !Equal(ri.MixedWaitsScore, rj.MixedWaitsScore) {
return ri.MixedWaitsScore > rj.MixedWaitsScore
}
riWaitsCount, rjWaitsCount := ri.Waits.AllCount(), rj.Waits.AllCount()
if riWaitsCount != rjWaitsCount {
return riWaitsCount > rjWaitsCount
}
if !Equal(ri.AvgNextShantenWaitsCount, rj.AvgNextShantenWaitsCount) {
return ri.AvgNextShantenWaitsCount > rj.AvgNextShantenWaitsCount
}
// shanten == 1
if !Equal(ri.AvgAgariRate, rj.AvgAgariRate) {
return ri.AvgAgariRate > rj.AvgAgariRate
}
if !Equal(ri.AvgImproveWaitsCount, rj.AvgImproveWaitsCount) {
return ri.AvgImproveWaitsCount > rj.AvgImproveWaitsCount
}
idxI, idxJ := l[i].DiscardTile, l[j].DiscardTile
if idxI >= 27 && idxJ >= 27 {
// TODO 场风不为自风时:下家风 > 对家风 > 上家风 > 场风
}
// 好牌先走
if idxI < 27 && idxJ < 27 {
idxI %= 9
if idxI > 4 {
idxI = 8 - idxI
}
idxJ %= 9
if idxJ > 4 {
idxJ = 8 - idxJ
}
return idxI > idxJ
}
return idxI < idxJ
//// 改良种类、方式多的优先
//if len(ri.Improves) != len(rj.Improves) {
// return len(ri.Improves) > len(rj.Improves)
//}
//if ri.ImproveWayCount != rj.ImproveWayCount {
// return ri.ImproveWayCount > rj.ImproveWayCount
//}
})
}
func (l *WaitsWithImproves14List) filterOutDiscard(cantDiscardTile int) {
newResults := WaitsWithImproves14List{}
for _, r := range *l {
if r.DiscardTile != cantDiscardTile {
newResults = append(newResults, r)
}
}
*l = newResults
}
func (l WaitsWithImproves14List) addOpenTile(openTiles []int) {
for _, r := range l {
r.OpenTiles = openTiles
}
}
// 2/5/8/11/14 张牌,计算向听数、进张、改良、向听倒退等
func CalculateShantenWithImproves14(playerInfo *model.PlayerInfo) (shanten int, waitsWithImproves WaitsWithImproves14List, incShantenResults WaitsWithImproves14List) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
shanten = CalculateShanten(tiles34)
for i := 0; i < 34; i++ {
if tiles34[i] == 0 {
continue
}
isRedFive := playerInfo.IsOnlyRedFive(i)
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
playerInfo.DiscardTile(i, isRedFive)
result13 := CalculateShantenWithImproves13(playerInfo)
// 记录切牌后的分析结果
r := &WaitsWithImproves14{
DiscardTile: i,
Result13: result13,
}
if result13.Shanten == shanten {
waitsWithImproves = append(waitsWithImproves, r)
} else {
// 向听倒退
incShantenResults = append(incShantenResults, r)
}
playerInfo.UndoDiscardTile(i, isRedFive)
}
needImprove := func(l []*WaitsWithImproves14) bool {
if len(l) == 0 {
return false
}
shanten := l[0].Result13.Shanten
// 一向听及以下进张优先,改良其次
if shanten <= 1 {
return false
}
maxWaitsCount := 0
for _, r := range l {
maxWaitsCount = MaxInt(maxWaitsCount, r.Result13.Waits.AllCount())
}
// 两向听及以上的七对子考虑改良
return maxWaitsCount <= 6*shanten+3
}
ni := needImprove(waitsWithImproves)
waitsWithImproves.Sort(ni)
ni = needImprove(incShantenResults)
incShantenResults.Sort(ni)
return
}
// 计算最小向听数,鸣牌方式
func calculateMeldShanten(tiles34 []int, calledTile int, isRedFive bool, allowChi bool) (minShanten int, meldCombinations []model.Meld) {
// 是否能碰
if tiles34[calledTile] >= 2 {
meldCombinations = append(meldCombinations, model.Meld{
MeldType: model.MeldTypePon,
Tiles: []int{calledTile, calledTile, calledTile},
SelfTiles: []int{calledTile, calledTile},
CalledTile: calledTile,
RedFiveFromOthers: isRedFive,
})
}
// 是否能吃
if allowChi && calledTile < 27 {
checkChi := func(tileA, tileB int) {
if tiles34[tileA] > 0 && tiles34[tileB] > 0 {
_tiles := []int{tileA, tileB, calledTile}
sort.Ints(_tiles)
meldCombinations = append(meldCombinations, model.Meld{
MeldType: model.MeldTypeChi,
Tiles: _tiles,
SelfTiles: []int{tileA, tileB},
CalledTile: calledTile,
RedFiveFromOthers: isRedFive,
})
}
}
t9 := calledTile % 9
if t9 >= 2 {
checkChi(calledTile-2, calledTile-1)
}
if t9 >= 1 && t9 <= 7 {
checkChi(calledTile-1, calledTile+1)
}
if t9 <= 6 {
checkChi(calledTile+1, calledTile+2)
}
}
// 计算所有鸣牌下的最小向听数
minShanten = 99
for _, c := range meldCombinations {
tiles34[c.SelfTiles[0]]--
tiles34[c.SelfTiles[1]]--
minShanten = MinInt(minShanten, CalculateShanten(tiles34))
tiles34[c.SelfTiles[0]]++
tiles34[c.SelfTiles[1]]++
}
return
}
// TODO 鸣牌的情况判断(待重构)
// 编程时注意他家切掉的这张牌是否算到剩余数中
//if isOpen {
//if newShanten, combinations, shantens := calculateMeldShanten(tiles34, i, true); newShanten < shanten {
// // 向听前进了,说明鸣牌成功,则换的这张牌为鸣牌进张
// // 计算进张数:若能碰则 =剩余数*3,否则 =剩余数
// meldWaits[i] = leftTile - tiles34[i]
// for i, comb := range combinations {
// if comb[0] == comb[1] && shantens[i] == newShanten {
// meldWaits[i] *= 3
// break
// }
// }
//}
//}
// 计算鸣牌下的何切分析
// calledTile 他家出的牌,尝试鸣这张牌
// isRedFive 这张牌是否为赤5
// allowChi 是否允许吃这张牌
func CalculateMeld(playerInfo *model.PlayerInfo, calledTile int, isRedFive bool, allowChi bool) (minShanten int, waitsWithImproves WaitsWithImproves14List, incShantenResults WaitsWithImproves14List) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
minShanten, meldCombinations := calculateMeldShanten(playerInfo.HandTiles34, calledTile, isRedFive, allowChi)
for _, c := range meldCombinations {
// 尝试鸣这张牌
playerInfo.AddMeld(c)
_shanten, _waitsWithImproves, _incShantenResults := CalculateShantenWithImproves14(playerInfo)
playerInfo.UndoAddMeld()
// 去掉现物食替的情况
_waitsWithImproves.filterOutDiscard(calledTile)
_incShantenResults.filterOutDiscard(calledTile)
// 去掉筋食替的情况
if c.MeldType == model.MeldTypeChi {
cannotDiscardTile := -1
if c.SelfTiles[0] < calledTile && c.SelfTiles[1] < calledTile && calledTile%9 >= 3 {
cannotDiscardTile = calledTile - 3
} else if c.SelfTiles[0] > calledTile && c.SelfTiles[1] > calledTile && calledTile%9 <= 5 {
cannotDiscardTile = calledTile + 3
}
if cannotDiscardTile != -1 {
_waitsWithImproves.filterOutDiscard(cannotDiscardTile)
_incShantenResults.filterOutDiscard(cannotDiscardTile)
}
}
// 添加副露信息,用于输出
_waitsWithImproves.addOpenTile(c.SelfTiles)
_incShantenResults.addOpenTile(c.SelfTiles)
// 整理副露结果
if _shanten == minShanten {
waitsWithImproves = append(waitsWithImproves, _waitsWithImproves...)
incShantenResults = append(incShantenResults, _incShantenResults...)
} else if _shanten == minShanten+1 {
incShantenResults = append(incShantenResults, _waitsWithImproves...)
}
}
waitsWithImproves.Sort(false)
incShantenResults.Sort(false)
return
}
| heck34[idx-1] = true
// | identifier_name |
shanten_improve.go | package util
import (
"fmt"
"sort"
"math"
"github.com/EndlessCheng/mahjong-helper/util/model"
)
// map[改良牌]进张(选择进张数最大的)
type Improves map[int]Waits
// 1/4/7/10/13 张手牌的分析结果
type WaitsWithImproves13 struct {
// 原手牌
Tiles34 []int
// 剩余牌
LeftTiles34 []int
// 是否已鸣牌
IsNaki bool
// 向听数
Shanten int
// 进张
// 考虑了剩余枚数
// 若某个进张牌 4 枚都可见,则该进张的 value 值为 0
Waits Waits
// TODO: 鸣牌进张:他家打出这张牌,可以鸣牌,且能让向听数前进
//MeldWaits Waits
// map[进张牌]向听前进后的进张数(这里让向听前进的切牌选择的是使「向听前进后的进张数最大」的切牌)
NextShantenWaitsCountMap map[int]int
// 向听前进后的进张数的加权均值
AvgNextShantenWaitsCount float64
// 综合了进张与向听前进后进张的评分
MixedWaitsScore float64
// 改良:摸到这张牌虽不能让向听数前进,但可以让进张变多
// len(Improves) 即为改良的牌的种数
Improves Improves
// 改良情况数,这里计算的是有多少种使进张增加的切牌方式
ImproveWayCount int
// 在没有摸到进张时的改良后进张数的加权均值(计算时,对于既不是进张也不是改良的牌,其进张数为 Waits.AllCount())
// 这里只考虑一巡的改良均值
// TODO: 在考虑改良的情况下,如何计算向听前进所需要的摸牌次数的期望值?
AvgImproveWaitsCount float64
// 向听前进后,若听牌,其最大和率的加权均值
// 若已听牌,则该值为当前手牌和率
AvgAgariRate float64
// 振听可能率(一向听和听牌时)
FuritenRate float64
// 役种
YakuTypes []int
// 宝牌个数(手牌+副露)
DoraCount int
// 无立直时的荣和打点期望
RonPoint float64
// 立直时的荣和打点期望
RiichiRonPoint float64
// 自摸打点期望
TsumoPoint float64
// TODO: 赤牌改良提醒
}
// 进张和向听前进后进张的评分
// 这里粗略地近似为向听前进两次的概率
func (r *WaitsWithImproves13) mixedWaitsScore() float64 {
if r.Waits.AllCount() == 0 || r.AvgNextShantenWaitsCount == 0 {
return 0
}
leftCount := float64(CountOfTiles34(r.LeftTiles34))
p2 := float64(r.Waits.AllCount()) / leftCount
//p2 := r.AvgImproveWaitsCount / leftCount
p1 := r.AvgNextShantenWaitsCount / leftCount
//if r.AvgAgariRate > 0 {
// p1 = r.AvgAgariRate / 100
//}
p2_, p1_ := 1-p2, 1-p1
const leftTurns = 10.0 // math.Max(5.0, leftCount/4)
sumP2 := p2_ * (1 - math.Pow(p2_, leftTurns)) / p2
sumP1 := p1_ * (1 - math.Pow(p1_, leftTurns)) / p1
result := p2 * p1 * (sumP2 - sumP1) / (p2_ - p1_)
return result * 100
}
// 调试用
func (r *WaitsWithImproves13) String() string {
s := fmt.Sprintf("%d 进张 %s\n%.2f 改良进张 [%d(%d) 种]",
r.Waits.AllCount(),
//r.Waits.AllCount()+r.MeldWaits.AllCount(),
TilesToStrWithBracket(r.Waits.indexes()),
r.AvgImproveWaitsCount,
len(r.Improves),
r.ImproveWayCount,
)
if r.Shanten >= 1 {
mixedScore := r.MixedWaitsScore
//for i := 2; i <= r.Shanten; i++ {
// mixedScore /= 4
//}
s += fmt.Sprintf(" %.2f %s进张(%.2f 综合分)",
r.AvgNextShantenWaitsCount,
NumberToChineseShanten(r.Shanten-1),
mixedScore,
)
}
if r.Shanten >= 0 && r.Shanten <= 1 {
s += fmt.Sprintf("(%.2f%% 参考和率)", r.AvgAgariRate)
if r.FuritenRate > 0 {
if r.FuritenRate < 1 {
s += "[可能振听]"
} else {
s += "[振听]"
}
}
s += YakuTypesWithDoraToStr(r.YakuTypes, r.DoraCount)
}
if r.RonPoint > 0 {
s += fmt.Sprintf("[(默听)荣和%d]", int(math.Round(r.RonPoint)))
}
if r.RiichiRonPoint > 0 {
s += fmt.Sprintf("[立直荣和%d]", int(math.Round(r.RiichiRonPoint)))
}
if r.TsumoPoint > 0 {
s += fmt.Sprintf("[自摸%d]", int(math.Round(r.TsumoPoint)))
}
return s
}
// 1/4/7/10/13 张牌,计算向听数、进张(考虑了剩余枚数)
func CalculateShantenAndWaits13(tiles34 []int, leftTiles34 []int) (shanten int, waits Waits) {
if len(leftTiles34) == 0 {
leftTiles34 = InitLeftTiles34WithTiles34(tiles34)
}
shanten = CalculateShanten(tiles34)
// 剪枝:检测非浮牌,在不考虑国士无双的情况下,这种牌是不可能让向听数前进的(但有改良的可能,不过 CalculateShantenAndWaits13 函数不考虑这个)
// 此处优化提升了约 30% 的性能
//needCheck34 := make([]bool, 34)
//idx := -1
//for i := 0; i < 3; i++ {
// for j := 0; j < 9; j++ {
// idx++
// if tiles34[idx] == 0 {
// continue
// }
// if j == 0 {
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 1 {
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j < 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// needCheck34[idx+2] = true
// } else if j == 7 {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// needCheck34[idx+1] = true
// } else {
// needCheck34[idx-2] = true
// needCheck34[idx-1] = true
// needCheck34[idx] = true
// }
// }
//}
//for i := 27; i < 34; i++ {
// if tiles34[i] > 0 {
// needCheck34[i] = true
// }
//}
waits = Waits{}
for i := 0; i < 34; i++ {
//if !needCheck34[i] {
// continue
//}
if tiles34[i] == 4 {
// 无法摸到这张牌
continue
}
// 摸牌
tiles34[i]++
if newShanten := CalculateShanten(tiles34); newShanten < shanten {
// 向听前进了,则换的这张牌为进张,进张数即剩余枚数
// 有可能为 0,但这对于判断振听是有帮助的,所以记录
waits[i] = leftTiles34[i]
}
tiles34[i]--
}
return
}
// 1/4/7/10/13 张牌,计算向听数、进张、改良等(考虑了剩余枚数)
func CalculateShantenWithImproves13(playerInfo *model.PlayerInfo) (r *WaitsWithImproves13) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
leftTiles34 := playerInfo.LeftTiles34
shanten13, waits := CalculateShantenAndWaits13(tiles34, leftTiles34)
waitsCount := waits.AllCount()
nextShantenWaitsCountMap := map[int]int{} // map[进张牌]听多少张牌
improves := Improves{}
improveWayCount := 0
// 对于每张牌,摸到之后的手牌进张数(如果摸到的是 waits 中的牌,则进张数视作 waitsCount)
maxImproveWaitsCount34 := make([]int, 34)
for i := 0; i < 34; i++ {
maxImproveWaitsCount34[i] = waitsCount // 初始化成基本进张
}
avgAgariRate := 0.0
avgRonPoint := 0.0
ronPointWeight := 0
avgRiichiRonPoint := 0.0
canYaku := make([]bool, maxYakuType)
if len(playerInfo.Melds) == 0 && CountPairsOfTiles34(tiles34)+shanten13 == 6 {
// 对于三向听,除非进张很差才会考虑七对子
if shanten13 == 3 {
if waitsCount <= 21 {
canYaku[YakuChiitoi] = true
}
} else if shanten13 == 1 || shanten13 == 2 {
// 一向听和两向听考虑七对子
canYaku[YakuChiitoi] = true
}
}
fillYakuTypes := func(_shanten13 int, _waits Waits) {
if _shanten13 != 0 {
return
}
// 听牌
for tile, left := range _waits {
if left == 0 {
continue
}
tiles34[tile]++
playerInfo.WinTile = tile
_yakuTypes := FindAllYakuTypes(playerInfo)
for _, t := range _yakuTypes {
canYaku[t] = true
}
tiles34[tile]--
}
}
// 计算可能的役种
fillYakuTypes(shanten13, waits)
for i := 0; i < 34; i++ {
// 从剩余牌中摸牌
if leftTiles34[i] == 0 {
continue
}
leftTiles34[i]--
tiles34[i]++
if _, ok := waits[i]; ok { // 摸到的是进张
maxAgariRate := 0.0 // 摸到此进张后的和率
maxAvgRonPoint := 0.0 // 平均打点
maxAvgRiichiRonPoint := 0.0
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 向听前进才是正确的切牌
if newShanten13, newWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 < shanten13 {
// 切牌一般切进张最多的
if waitsCount := newWaits.AllCount(); waitsCount > nextShantenWaitsCountMap[i] {
nextShantenWaitsCountMap[i] = waitsCount
}
// 听牌了
if newShanten13 == 0 {
// 听牌一般切和率最高的,TODO: 除非打点更高,比如说听到 dora 上,或者有三色等
_agariRate := CalculateAvgAgariRate(newWaits, playerInfo.DiscardTiles)
if _agariRate >= maxAgariRate {
maxAgariRate = _agariRate
// 计算荣和点数
// TODO: 这里简化了,和率优先,需要更加精细的考量
// TODO: maxAvgRonPoint = CalcAvgRonPoint(playerInfo, newWaits)
// TODO: maxAvgRiichiRonPoint = CalcAvgRiichiRonPoint(playerInfo, newWaits)
}
// 计算可能的役种
fillYakuTypes(newShanten13, newWaits)
}
}
playerInfo.UndoDiscardTile(j, _isRedFive)
}
// 加权:进张牌的剩余枚数*和率
w := leftTiles34[i] + 1
avgAgariRate += maxAgariRate * float64(w)
if maxAvgRonPoint > 0 {
avgRonPoint += maxAvgRonPoint * float64(w)
ronPointWeight += w
}
//fmt.Println(i, maxAvgRiichiRonPoint)
avgRiichiRonPoint += maxAvgRiichiRonPoint * float64(w)
} else { // 摸到的不是进张,但可能有改良
for j := 0; j < 34; j++ {
if tiles34[j] == 0 || j == i {
continue
}
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
_isRedFive := playerInfo.IsOnlyRedFive(j)
playerInfo.DiscardTile(j, _isRedFive)
// 正确的切牌
if newShanten13, improveWaits := CalculateShantenAndWaits13(tiles34, leftTiles34); newShanten13 == shanten13 {
// 若进张数变多,则为改良
if improveWaitsCount := improveWaits.AllCount(); improveWaitsCount > waitsCount {
improveWayCount++
if improveWaitsCount > maxImproveWaitsCount34[i] {
maxImproveWaitsCount34[i] = improveWaitsCount
// improves 选的是进张数最大的改良
improves[i] = improveWaits
}
//fmt.Println(fmt.Sprintf(" 摸 %s 切 %s 改良:", MahjongZH[i], MahjongZH[j]), improveWaitsCount, TilesToStrWithBracket(improveWaits.indexes()))
}
}
playerInfo.UndoDiscardTile(j, _isRedFive)
}
}
tiles34[i]--
leftTiles34[i]++
}
if waitsCount > 0 {
avgAgariRate /= float64(waitsCount)
if ronPointWeight > 0 {
avgRonPoint /= float64(ronPointWeight)
}
avgRiichiRonPoint /= float64(waitsCount)
if shanten13 == 0 {
avgAgariRate = CalculateAvgAgariRate(waits, playerInfo.DiscardTiles)
avgRonPoint = CalcAvgRonPoint(playerInfo, waits)
avgRiichiRonPoint = CalcAvgRiichiRonPoint(playerInfo, waits)
}
}
yakuTypes := []int{}
for yakuType, can := range canYaku {
if can {
yakuTypes = append(yakuTypes, yakuType)
}
}
if len(yakuTypes) == 0 {
// 无役,若能立直则立直
if shanten13 == 0 && !playerInfo.IsNaki() {
savedIsRiichi := playerInfo.IsRiichi
playerInfo.IsRiichi = true
defer func() { playerInfo.IsRiichi = savedIsRiichi }()
yakuTypes = append(yakuTypes, YakuRiichi)
}
}
_tiles34 := make([]int, 34)
copy(_tiles34, tiles34)
r = &WaitsWithImproves13{
Tiles34: _tiles34,
LeftTiles34: leftTiles34,
IsNaki: playerInfo.IsNaki(),
Shanten: shanten13,
Waits: waits,
NextShantenWaitsCountMap: nextShantenWaitsCountMap,
Improves: improves,
ImproveWayCount: improveWayCount,
AvgImproveWaitsCount: float64(waitsCount),
AvgAgariRate: avgAgariRate,
YakuTypes: yakuTypes,
DoraCount: playerInfo.CountDora(),
}
// 对于听牌及一向听,判断是否有振听可能
if shanten13 <= 1 {
for _, discardTile := range playerInfo.DiscardTiles {
if _, ok := waits[discardTile]; ok {
r.FuritenRate = 0.5 // TODO: 待完善
if shanten13 == 0 {
// 听牌时,若听的牌在舍牌中,则构成振听
r.FuritenRate = 1
// 修正振听时的和率
r.AvgAgariRate *= FuritenAgariMulti
}
}
}
}
// 非振听时计算荣和点数
if r.FuritenRate < 1 && shanten13 <= 1 {
r.RonPoint = avgRonPoint
if !playerInfo.IsNaki() {
r.RiichiRonPoint = avgRiichiRonPoint
}
}
// TODO: 自摸点数
// 分析
if len(nextShantenWaitsCountMap) > 0 {
nextShantenWaitsSum := 0
weight := 0
for tile, c := range nextShantenWaitsCountMap {
w := leftTiles34[tile]
nextShantenWaitsSum += w * c
weight += w
}
r.AvgNextShantenWaitsCount = float64(nextShantenWaitsSum) / float64(weight)
}
if len(improves) > 0 {
improveWaitsSum := 0
weight := 0
for i := 0; i < 34; i++ {
w := leftTiles34[i]
improveWaitsSum += w * maxImproveWaitsCount34[i]
weight += w
}
r.AvgImproveWaitsCount = float64(improveWaitsSum) / float64(weight)
}
r.MixedWaitsScore = r.mixedWaitsScore()
return
}
type WaitsWithImproves14 struct {
// 需要切的牌
DiscardTile int
// 切牌后的手牌分析结果
Result13 *WaitsWithImproves13
// 副露信息(没有副露就是 nil)
// 比如用 23m 吃了牌,OpenTiles 就是 [1,2]
OpenTiles []int
}
func (r *WaitsWithImproves14) String() string {
meldInfo := ""
if len(r.OpenTiles) > 0 {
meldType := "吃"
if r.OpenTiles[0] == r.OpenTiles[1] {
meldType = "碰"
}
meldInfo = fmt.Sprintf("用 %s%s %s,", string([]rune(MahjongZH[r.OpenTiles[0]])[:1]), MahjongZH[r.OpenTiles[1]], meldType)
}
return meldInfo + fmt.Sprintf("切 %s: %s", MahjongZH[r.DiscardTile], r.Result13.String())
}
type WaitsWithImproves14List []*WaitsWithImproves14
// 排序,若 needImprove 为 true,则优先按照 AvgImproveWaitsCount 排序
func (l WaitsWithImproves14List) Sort(needImprove bool) {
sort.Slice(l, func(i, j int) bool {
ri, rj := l[i].Result13, l[j].Result13
// 听牌的话和率优先
// TODO: 考虑打点
if l[0].Result13.Shanten == 0 {
if !Equal(ri.AvgAgariRate, rj.AvgAgariRate) {
return ri.AvgAgariRate > rj.AvgAgariRate
}
}
if needImprove {
if !Equal(ri.AvgImproveWaitsCount, rj.AvgImproveWaitsCount) {
return ri.AvgImproveWaitsCount > rj.AvgImproveWaitsCount
}
}
// 排序规则:综合评分 - 进张 - 前进后的进张 - 和率 - 改良 - 好牌先走
// 必须注意到的一点是,随着游戏的进行,进张会被他家打出,所以进张是有减少的趋势的
// 对于一向听,考虑到未听牌之前要听的牌会被他家打出而造成听牌时的枚数降低,所以听牌枚数比和率更重要
// 对比当前进张与前进后的进张,在二者乘积相近的情况下(注意这个前提),由于进张越大听牌速度越快,听牌时的进张数也就越接近预期进张数,所以进张越多越好(再次强调是在二者乘积相近的情况下)
if !Equal(ri.MixedWaitsScore, rj.MixedWaitsScore) {
return ri.MixedWaitsScore > rj.MixedWaitsScore
}
riWaitsCount, rjWaitsCount := ri.Waits.AllCount(), rj.Waits.AllCount()
if riWaitsCount != rjWaitsCount {
return riWaitsCount > rjWaitsCount
}
if !Equal(ri.AvgNextShantenWaitsCount, rj.AvgNextShantenWaitsCount) {
return ri.AvgNextShantenWaitsCount > rj.AvgNextShantenWaitsCount
}
// shanten == 1
if !Equal(ri.AvgAgariRate, rj.AvgAgariRate) {
return ri.AvgAgariRate > rj.AvgAgariRate
}
if !Equal(ri.AvgImproveWaitsCount, rj.AvgImproveWaitsCount) {
return ri.AvgImproveWaitsCount > rj.AvgImproveWaitsCount
}
idxI, idxJ := l[i].DiscardTile, l[j].DiscardTile
if idxI >= 27 && idxJ >= 27 {
// TODO 场风不为自风时:下家风 > 对家风 > 上家风 > 场风
}
// 好牌先走
if idxI < 27 && idxJ < 27 {
idxI %= 9
if idxI > 4 {
idxI = 8 - idxI
}
idxJ %= 9
if idxJ > 4 {
idxJ = 8 - idxJ
}
return idxI > idxJ
}
return idxI < idxJ
//// 改良种类、方式多的优先
//if len(ri.Improves) != len(rj.Improves) {
// return len(ri.Improves) > len(rj.Improves)
//}
//if ri.ImproveWayCount != rj.ImproveWayCount {
// return ri.ImproveWayCount > rj.ImproveWayCount
//}
})
}
func (l *WaitsWithImproves14List) filterOutDiscard(cantDiscardTile int) {
newResults := WaitsWithImproves14List{}
for _, r := range *l {
if r.DiscardTile != cantDiscardTile {
newResults = append(newResults, r)
}
}
*l = newResults
}
func (l WaitsWithImproves14List) addOpenTile(openTiles []int) {
for _, r := range l {
r.OpenTiles = openTiles
}
}
// 2/5/8/11/14 张牌,计算向听数、进张、改良、向听倒退等
func CalculateShantenWithImproves14(playerInfo *model.PlayerInfo) (shanten int, waitsWithImproves WaitsWithImproves14List, incShantenResults WaitsWithImproves14List) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
tiles34 := playerInfo.HandTiles34
shanten = CalculateShanten(tiles34)
for i := 0; i < 34; i++ {
if tiles34[i] == 0 {
continue
}
isRedFive := playerInfo.IsOnlyRedFive(i)
// 切牌,然后分析 3k+1 张牌下的手牌情况
// 若这张是5,在只有赤5的情况下才会切赤5(TODO: 考虑赤5骗37)
playerInfo.DiscardTile(i, isRedFive)
result13 := CalculateShantenWithImproves13(playerInfo)
// 记录切牌后的分析结果
r := &WaitsWithImproves14{
DiscardTile: i,
Result13: result13,
}
if result13.Shanten == shanten {
waitsWithImproves = append(waitsWithImproves, r)
} else {
// 向听倒退
incShantenResults = append(incShantenResults, r)
}
playerInfo.UndoDiscardTile(i, isRedFive)
}
needImprove := func(l []*WaitsWithImproves14) bool {
if len(l) == 0 {
return false
}
shanten := l[0].Result13.Shanten
// 一向听及以下进张优先,改良其次
if shanten <= 1 {
return false
}
maxWaitsCount := 0
for _, r := range l {
maxWaitsCount = MaxInt(maxWaitsCount, r.Result13.Waits.AllCount())
}
// 两向听及以上的七对子考虑改良
return maxWaitsCount <= 6*shanten+3
}
ni := needImprove(waitsWithImproves)
waitsWithImproves.Sort(ni)
ni = needImprove(incShantenResults)
incShantenResults.Sort(ni)
return
}
// 计算最小向听数,鸣牌方式
func calculateMeldShanten(tiles34 []int, calledTile int, isRedFive bool, allowChi bool) (minShanten int, meldCombinations []model.Meld) {
// 是否能碰
if tiles34[calledTile] >= 2 {
meldCombinations = append(meldCombinations, model.Meld{
MeldType: model.MeldTypePon,
Tiles: []int{calledTile, calledTile, calledTile},
SelfTiles: []int{calledTile, calledTile},
CalledTile: calledTile,
RedFiveFromOthers: isRedFive,
})
}
// 是否能吃
if allowChi && calledTile < 27 {
checkChi := func(tileA, tileB int) {
if tiles34[tileA] > 0 && tiles34[tileB] > 0 {
_tiles := []int{tileA, tileB, calledTile}
sort.Ints(_tiles)
meldCombinations = append(meldCombinations, model.Meld{
MeldType: model.MeldTypeChi,
Tiles: _tiles,
SelfTiles: []int{tileA, tileB},
CalledTile: calledTile,
RedFiveFromOthers: isRedFive,
})
}
}
t9 := calledTile % 9
if t9 >= 2 {
checkChi(calledTile-2, calledTile-1)
}
if t9 >= 1 && t9 <= 7 {
checkChi(calledTile-1, calledTile+1)
}
if t9 <= 6 {
checkChi(calledTile+1, calledTile+2)
}
}
// 计算所有鸣牌下的最小向听数
minShanten = 99
for _, c := range meldCombinations {
tiles34[c.SelfTiles[0]]--
tiles34[c.SelfTiles[1]]--
minShanten = MinInt(minShanten, CalculateShanten(tiles34))
tiles34[c.SelfTiles[0]]++
tiles34[c.SelfTiles[1]]++
}
return
}
// TODO 鸣牌的情况判断(待重构)
// 编程时注意他家切掉的这张牌是否算到剩余数中
//if isOpen {
//if newShanten, combinations, shantens := calculateMeldShanten(tiles34, i, true); newShanten < shanten {
// // 向听前进了,说明鸣牌成功,则换的这张牌为鸣牌进张
// // 计算进张数:若能碰则 =剩余数*3,否则 =剩余数
// meldWaits[i] = leftTile - tiles34[i]
// for i, comb := range combinations {
// if comb[0] == comb[1] && shantens[i] == newShanten {
// meldWaits[i] *= 3
// break
// }
// }
//}
//}
// 计算鸣牌下的何切分析
// calledTile 他家出的牌,尝试鸣这张牌
// isRedFive 这张牌是否为赤5
// allowChi 是否允许吃这张牌
func CalculateMeld(playerInfo *model.PlayerInfo, calledTile int, isRedFive bool, allowChi bool) (minShanten int, waitsWithImproves WaitsWithImproves14List, incShantenResults WaitsWithImproves14List) {
if len(playerInfo.LeftTiles34) == 0 {
playerInfo.FillLeftTiles34()
}
minShanten, meldCombinations := calculateMeldShanten(playerInfo.HandTiles34, calledTile, isRedFive, allowChi)
for _, c := range meldCombinations {
// 尝试鸣这张牌
playerInfo.AddMeld(c)
_shanten, _waitsWithImproves, _incShantenResults := CalculateShantenWithImproves14(playerInfo)
playerInfo.UndoAddMeld()
// 去掉现物食替的情况
_waitsWithImproves.filterOutDiscard(calledTile)
_incShantenResults.filterOutDiscard(calledTile)
// 去掉筋食替的情况
if c.MeldType == model.MeldTypeChi {
cannotDiscardTile := -1
if c.SelfTiles[0] < calledTile && c.SelfTiles[1] < calledTile && calledTile%9 >= 3 {
cannotDiscardTile = calledTile - 3
} else if c.SelfTiles[0] > calledTile && c.SelfTiles[1] > calledTile && calledTile%9 <= 5 {
cannotDiscardTile = calledTile + 3
}
if cannotDiscardTile != -1 {
_waitsWithImproves.filterOutDiscard(cannotDiscardTile)
_incShantenResults.f | ilterOutDiscard(cannotDiscardTile)
}
}
// 添加副露信息,用于输出
_waitsWithImproves.addOpenTile(c.SelfTiles)
_incShantenResults.addOpenTile(c.SelfTiles)
// 整理副露结果
if _shanten == minShanten {
waitsWithImproves = append(waitsWithImproves, _waitsWithImproves...)
incShantenResults = append(incShantenResults, _incShantenResults...)
} else if _shanten == minShanten+1 {
incShantenResults = append(incShantenResults, _waitsWithImproves...)
}
}
waitsWithImproves.Sort(false)
incShantenResults.Sort(false)
return
}
| identifier_body |
|
pod_driver.go | /*
Copyright 2021 Juicedata Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"os"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog"
"k8s.io/utils/mount"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/juicedata/juicefs-csi-driver/pkg/config"
podmount "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount"
"github.com/juicedata/juicefs-csi-driver/pkg/k8sclient"
"github.com/juicedata/juicefs-csi-driver/pkg/util"
)
const defaultCheckoutTimeout = 1 * time.Second
type PodDriver struct {
Client *k8sclient.K8sClient
handlers map[podStatus]podHandler
mit mountInfoTable
mount.SafeFormatAndMount
}
func NewPodDriver(client *k8sclient.K8sClient, mounter mount.SafeFormatAndMount) *PodDriver {
return newPodDriver(client, mounter)
}
func newPodDriver(client *k8sclient.K8sClient, mounter mount.SafeFormatAndMount) *PodDriver {
driver := &PodDriver{
Client: client,
handlers: map[podStatus]podHandler{},
SafeFormatAndMount: mounter,
}
driver.handlers[podReady] = driver.podReadyHandler
driver.handlers[podError] = driver.podErrorHandler
driver.handlers[podPending] = driver.podPendingHandler
driver.handlers[podDeleted] = driver.podDeletedHandler
return driver
}
type podHandler func(ctx context.Context, pod *corev1.Pod) error
type podStatus string
const (
podReady podStatus = "podReady"
podError podStatus = "podError"
podDeleted podStatus = "podDeleted"
podPending podStatus = "podPending"
)
func (p *PodDriver) SetMountInfo(mit mountInfoTable) {
p.mit = mit
}
func (p *PodDriver) Run(ctx context.Context, current *corev1.Pod) error {
// check refs in mount pod annotation first, delete ref that target pod is not found
err := p.checkAnnotations(ctx, current)
if err != nil {
return err
}
podStatus := p.getPodStatus(current)
if podStatus != podError && podStatus != podDeleted {
return p.handlers[podStatus](ctx, current)
}
// resourceVersion of kubelet may be different from apiserver
// so we need get latest pod resourceVersion from apiserver
pod, err := p.Client.GetPod(ctx, current.Name, current.Namespace)
if err != nil {
return err
}
// set mount pod status in mit again, maybe deleted
p.mit.setPodStatus(pod)
return p.handlers[p.getPodStatus(pod)](ctx, pod)
}
// getPodStatus get pod status
func (p *PodDriver) getPodStatus(pod *corev1.Pod) podStatus |
// checkAnnotations
// 1. check refs in mount pod annotation
// 2. delete ref that target pod is not found
func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error {
// check refs in mount pod, the corresponding pod exists or not
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
delAnnotations := []string{}
var existTargets int
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) {
_, exists := p.mit.deletedPods[getPodUid(target)]
if !exists { // only it is not in pod lists can be seen as deleted
// target pod is deleted
delAnnotations = append(delAnnotations, k)
continue
}
existTargets++
}
}
if existTargets != 0 && pod.Annotations[config.DeleteDelayAtKey] != "" {
delAnnotations = append(delAnnotations, config.DeleteDelayAtKey)
}
if len(delAnnotations) != 0 {
// check mount pod reference key, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
if len(util.GetAllRefKeys(*newPod)) != len(util.GetAllRefKeys(*pod)) {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not patch pod"))
}
if err := util.DelPodAnnotation(ctx, p.Client, pod, delAnnotations); err != nil {
return err
}
}
if existTargets == 0 && pod.DeletionTimestamp == nil {
var shouldDelay bool
shouldDelay, err := util.ShouldDelay(ctx, pod, p.Client)
if err != nil {
return err
}
if !shouldDelay {
// check mount pod resourceVersion, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
// check mount pod reference key, if it is not none, return conflict
if len(util.GetAllRefKeys(*newPod)) != 0 {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not delete pod"))
}
// if there are no refs or after delay time, delete it
klog.V(5).Infof("There are no refs in pod %s annotation, delete it", pod.Name)
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("Delete pod %s error: %v", pod.Name, err)
return err
}
// delete related secret
secretName := pod.Name + "-secret"
klog.V(6).Infof("delete related secret of pod: %s", secretName)
if err := p.Client.DeleteSecret(ctx, secretName, pod.Namespace); err != nil {
klog.V(5).Infof("Delete secret %s error: %v", secretName, err)
}
}
}
return nil
}
// podErrorHandler handles mount pod error status
func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podDeletedHandler handles mount pod that will be deleted
func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("get nil pod")
return nil
}
klog.V(5).Infof("Pod %s in namespace %s is to be deleted.", pod.Name, pod.Namespace)
// pod with no finalizer
if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) {
// do nothing
return nil
}
// remove finalizer of pod
if err := util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer); err != nil {
klog.Errorf("remove pod finalizer err:%v", err)
return err
}
// pod with resource error
if util.IsPodResourceError(pod) {
klog.V(6).Infof("The pod is PodResourceError, podDeletedHandler skip delete the pod:%s", pod.Name)
return nil
}
// get mount point
sourcePath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
// check if it needs to create new one
klog.V(6).Infof("Annotations:%v", pod.Annotations)
if pod.Annotations == nil {
return nil
}
annotation := pod.Annotations
existTargets := make(map[string]string)
for k, v := range pod.Annotations {
// annotation is checked in beginning, don't double-check here
if k == util.GetReferenceKey(v) {
existTargets[k] = v
}
}
if len(existTargets) == 0 {
// do not need to create new one, umount
util.UmountPath(ctx, sourcePath)
// clean mount point
err = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
klog.V(5).Infof("Clean mount point : %s", sourcePath)
return mount.CleanupMountPoint(sourcePath, p.SafeFormatAndMount.Interface, false)
})
if err != nil {
klog.Errorf("Clean mount point %s error: %v", sourcePath, err)
}
// cleanup cache should always complete, don't set timeout
go p.CleanUpCache(context.TODO(), pod)
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// create
klog.V(5).Infof("pod targetPath not empty, need create pod:%s", pod.Name)
// check pod delete
for {
po, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil && po.DeletionTimestamp != nil {
klog.V(6).Infof("pod %s %s is being deleted, waiting", pod.Name, pod.Namespace)
time.Sleep(time.Millisecond * 500)
continue
}
if err != nil {
if apierrors.IsTimeout(err) {
break
}
if apierrors.IsNotFound(err) {
// umount mount point before recreate mount pod
err := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
exist, _ := mount.PathExists(sourcePath)
if !exist {
return fmt.Errorf("%s not exist", sourcePath)
}
return nil
})
if err == nil {
klog.Infof("start to umount: %s", sourcePath)
util.UmountPath(ctx, sourcePath)
}
// create pod
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: annotation,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
klog.Infof("Need to create pod %s %s", pod.Name, pod.Namespace)
_, err = p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("Create pod:%s err:%v", pod.Name, err)
}
return nil
}
klog.Errorf("Get pod err:%v", err)
return nil
}
// pod is created elsewhere
if po.Annotations == nil {
po.Annotations = make(map[string]string)
}
for k, v := range existTargets {
// add exist target in annotation
po.Annotations[k] = v
}
if err := util.ReplacePodAnnotation(ctx, p.Client, pod, po.Annotations); err != nil {
klog.Errorf("Update pod %s %s error: %v", po.Name, po.Namespace, err)
}
return err
}
err = fmt.Errorf("old pod %s %s deleting timeout", pod.Name, config.Namespace)
klog.V(5).Infof(err.Error())
return err
}
// podPendingHandler handles mount pod that is pending
func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podReadyHandler handles mount pod that is ready
func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("[podReadyHandler] get nil pod")
return nil
}
if pod.Annotations == nil {
return nil
}
// get mount point
mntPath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
e := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
_, e := os.Stat(mntPath)
return e
})
if e != nil {
klog.Errorf("[podReadyHandler] stat mntPath: %s err: %v, don't do recovery", mntPath, e)
return nil
}
// recovery for each target
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) {
mi := p.mit.resolveTarget(target)
if mi == nil {
klog.Errorf("pod %s target %s resolve fail", pod.Name, target)
continue
}
p.recoverTarget(pod.Name, mntPath, mi.baseTarget, mi)
for _, ti := range mi.subPathTarget {
p.recoverTarget(pod.Name, mntPath, ti, mi)
}
}
}
return nil
}
// recoverTarget recovers target path
func (p *PodDriver) recoverTarget(podName, sourcePath string, ti *targetItem, mi *mountItem) {
switch ti.status {
case targetStatusNotExist:
klog.Errorf("pod %s target %s not exists, item count:%d", podName, ti.target, ti.count)
if ti.count > 0 {
// target exist in /proc/self/mountinfo file
// refer to this case: local target exist, but source which target binded has beed deleted
// if target is for pod subpath (volumeMount.subpath), this will cause user pod delete failed, so we help kubelet umount it
if mi.podDeleted {
p.umountTarget(ti.target, ti.count)
}
}
case targetStatusMounted:
// normal, most likely happen
klog.V(6).Infof("pod %s target %s is normal mounted", podName, ti.target)
case targetStatusNotMount:
klog.V(5).Infof("pod %s target %s is not mounted", podName, ti.target)
case targetStatusCorrupt:
if ti.inconsistent {
// source paths (found in /proc/self/mountinfo) which target binded is inconsistent
// some unexpected things happened
klog.Errorf("pod %s target %s, source inconsistent", podName, ti.target)
break
}
if mi.podDeleted {
klog.V(6).Infof("pod %s target %s, user pod has been deleted, don't do recovery", podName, ti.target)
break
}
// if not umountTarget, mountinfo file will increase unlimited
// if we umount all the target items, `mountPropagation` will lose efficacy
p.umountTarget(ti.target, ti.count-1)
if ti.subpath != "" {
sourcePath += "/" + ti.subpath
_, err := os.Stat(sourcePath)
if err != nil {
klog.Errorf("pod %s target %s, stat volPath:%s err:%v, don't do recovery", podName, ti.target, sourcePath, err)
break
}
}
klog.V(5).Infof("pod %s target %s recover volPath:%s", podName, ti.target, sourcePath)
mountOption := []string{"bind"}
if err := p.Mount(sourcePath, ti.target, "none", mountOption); err != nil {
klog.Errorf("exec cmd: mount -o bind %s %s err:%v", sourcePath, ti.target, err)
}
case targetStatusUnexpect:
klog.Errorf("pod %s target %s reslove err:%v", podName, ti.target, ti.err)
}
}
// umountTarget umount target path
func (p *PodDriver) umountTarget(target string, count int) {
for i := 0; i < count; i++ {
// ignore error
p.Unmount(target)
}
}
// CleanUpCache clean up cache
func (p *PodDriver) CleanUpCache(ctx context.Context, pod *corev1.Pod) {
if pod.Annotations[config.CleanCache] != "true" {
return
}
uuid := pod.Annotations[config.JuiceFSUUID]
uniqueId := pod.Annotations[config.UniqueId]
if uuid == "" && uniqueId == "" {
// no necessary info, return
klog.Errorf("[CleanUpCache] Can't get uuid and uniqueId from pod %s annotation. skip cache clean.", pod.Name)
return
}
// wait for pod deleted.
isDeleted := false
getCtx, getCancel := context.WithTimeout(ctx, 3*time.Minute)
defer getCancel()
for {
if _, err := p.Client.GetPod(getCtx, pod.Name, pod.Namespace); err != nil {
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
klog.V(5).Infof("[CleanUpCache] Get pod %s error %v. Skip clean cache.", pod.Name, err)
return
}
time.Sleep(time.Microsecond * 500)
}
if !isDeleted {
klog.Errorf("[CleanUpCache] Mount pod %s not deleted in 3 min. Skip clean cache.", pod.Name)
return
}
klog.V(5).Infof("[CleanUpCache] Cleanup cache of volume %s in node %s", uniqueId, config.NodeName)
podMnt := podmount.NewPodMount(p.Client, p.SafeFormatAndMount)
cacheDirs := []string{}
for _, dir := range pod.Spec.Volumes {
if strings.HasPrefix(dir.Name, "cachedir-") && dir.HostPath != nil {
cacheDirs = append(cacheDirs, dir.HostPath.Path)
}
}
image := pod.Spec.Containers[0].Image
if err := podMnt.CleanCache(ctx, image, uuid, uniqueId, cacheDirs); err != nil {
klog.V(5).Infof("[CleanUpCache] Cleanup cache of volume %s error %v", uniqueId, err)
}
}
| {
if pod == nil {
return podError
}
if pod.DeletionTimestamp != nil {
return podDeleted
}
if util.IsPodError(pod) {
return podError
}
if util.IsPodReady(pod) {
return podReady
}
return podPending
} | identifier_body |
pod_driver.go | /*
Copyright 2021 Juicedata Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"os"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog"
"k8s.io/utils/mount"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/juicedata/juicefs-csi-driver/pkg/config"
podmount "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount"
"github.com/juicedata/juicefs-csi-driver/pkg/k8sclient"
"github.com/juicedata/juicefs-csi-driver/pkg/util"
)
const defaultCheckoutTimeout = 1 * time.Second
type PodDriver struct {
Client *k8sclient.K8sClient
handlers map[podStatus]podHandler
mit mountInfoTable
mount.SafeFormatAndMount
}
func NewPodDriver(client *k8sclient.K8sClient, mounter mount.SafeFormatAndMount) *PodDriver {
return newPodDriver(client, mounter)
}
func newPodDriver(client *k8sclient.K8sClient, mounter mount.SafeFormatAndMount) *PodDriver {
driver := &PodDriver{
Client: client,
handlers: map[podStatus]podHandler{},
SafeFormatAndMount: mounter,
}
driver.handlers[podReady] = driver.podReadyHandler
driver.handlers[podError] = driver.podErrorHandler
driver.handlers[podPending] = driver.podPendingHandler
driver.handlers[podDeleted] = driver.podDeletedHandler
return driver
}
type podHandler func(ctx context.Context, pod *corev1.Pod) error
type podStatus string
const (
podReady podStatus = "podReady"
podError podStatus = "podError"
podDeleted podStatus = "podDeleted"
podPending podStatus = "podPending"
)
func (p *PodDriver) SetMountInfo(mit mountInfoTable) {
p.mit = mit
}
func (p *PodDriver) Run(ctx context.Context, current *corev1.Pod) error {
// check refs in mount pod annotation first, delete ref that target pod is not found
err := p.checkAnnotations(ctx, current)
if err != nil {
return err
}
podStatus := p.getPodStatus(current)
if podStatus != podError && podStatus != podDeleted {
return p.handlers[podStatus](ctx, current)
}
// resourceVersion of kubelet may be different from apiserver
// so we need get latest pod resourceVersion from apiserver
pod, err := p.Client.GetPod(ctx, current.Name, current.Namespace)
if err != nil {
return err
}
// set mount pod status in mit again, maybe deleted
p.mit.setPodStatus(pod)
return p.handlers[p.getPodStatus(pod)](ctx, pod)
}
// getPodStatus get pod status
func (p *PodDriver) getPodStatus(pod *corev1.Pod) podStatus {
if pod == nil {
return podError
}
if pod.DeletionTimestamp != nil {
return podDeleted
}
if util.IsPodError(pod) {
return podError
}
if util.IsPodReady(pod) {
return podReady
}
return podPending
}
// checkAnnotations
// 1. check refs in mount pod annotation
// 2. delete ref that target pod is not found
func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error {
// check refs in mount pod, the corresponding pod exists or not
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
delAnnotations := []string{}
var existTargets int
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) {
_, exists := p.mit.deletedPods[getPodUid(target)]
if !exists { // only it is not in pod lists can be seen as deleted
// target pod is deleted
delAnnotations = append(delAnnotations, k)
continue
}
existTargets++
}
}
if existTargets != 0 && pod.Annotations[config.DeleteDelayAtKey] != "" {
delAnnotations = append(delAnnotations, config.DeleteDelayAtKey)
}
if len(delAnnotations) != 0 {
// check mount pod reference key, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
if len(util.GetAllRefKeys(*newPod)) != len(util.GetAllRefKeys(*pod)) {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not patch pod"))
}
if err := util.DelPodAnnotation(ctx, p.Client, pod, delAnnotations); err != nil {
return err
}
}
if existTargets == 0 && pod.DeletionTimestamp == nil {
var shouldDelay bool
shouldDelay, err := util.ShouldDelay(ctx, pod, p.Client)
if err != nil {
return err
}
if !shouldDelay {
// check mount pod resourceVersion, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
// check mount pod reference key, if it is not none, return conflict
if len(util.GetAllRefKeys(*newPod)) != 0 {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not delete pod"))
}
// if there are no refs or after delay time, delete it
klog.V(5).Infof("There are no refs in pod %s annotation, delete it", pod.Name)
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("Delete pod %s error: %v", pod.Name, err)
return err
}
// delete related secret
secretName := pod.Name + "-secret"
klog.V(6).Infof("delete related secret of pod: %s", secretName)
if err := p.Client.DeleteSecret(ctx, secretName, pod.Namespace); err != nil {
klog.V(5).Infof("Delete secret %s error: %v", secretName, err)
}
}
}
return nil
}
// podErrorHandler handles mount pod error status
func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podDeletedHandler handles mount pod that will be deleted
func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("get nil pod")
return nil
}
klog.V(5).Infof("Pod %s in namespace %s is to be deleted.", pod.Name, pod.Namespace)
// pod with no finalizer
if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) {
// do nothing
return nil
}
// remove finalizer of pod
if err := util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer); err != nil {
klog.Errorf("remove pod finalizer err:%v", err)
return err
}
// pod with resource error
if util.IsPodResourceError(pod) {
klog.V(6).Infof("The pod is PodResourceError, podDeletedHandler skip delete the pod:%s", pod.Name)
return nil
}
// get mount point
sourcePath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
// check if it needs to create new one
klog.V(6).Infof("Annotations:%v", pod.Annotations)
if pod.Annotations == nil {
return nil
}
annotation := pod.Annotations
existTargets := make(map[string]string)
for k, v := range pod.Annotations {
// annotation is checked in beginning, don't double-check here
if k == util.GetReferenceKey(v) {
existTargets[k] = v
}
}
if len(existTargets) == 0 {
// do not need to create new one, umount
util.UmountPath(ctx, sourcePath)
// clean mount point
err = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
klog.V(5).Infof("Clean mount point : %s", sourcePath)
return mount.CleanupMountPoint(sourcePath, p.SafeFormatAndMount.Interface, false)
})
if err != nil {
klog.Errorf("Clean mount point %s error: %v", sourcePath, err)
}
// cleanup cache should always complete, don't set timeout
go p.CleanUpCache(context.TODO(), pod)
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// create
klog.V(5).Infof("pod targetPath not empty, need create pod:%s", pod.Name)
// check pod delete
for {
po, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil && po.DeletionTimestamp != nil {
klog.V(6).Infof("pod %s %s is being deleted, waiting", pod.Name, pod.Namespace)
time.Sleep(time.Millisecond * 500)
continue
}
if err != nil {
if apierrors.IsTimeout(err) {
break
}
if apierrors.IsNotFound(err) {
// umount mount point before recreate mount pod
err := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
exist, _ := mount.PathExists(sourcePath)
if !exist {
return fmt.Errorf("%s not exist", sourcePath)
}
return nil
})
if err == nil {
klog.Infof("start to umount: %s", sourcePath)
util.UmountPath(ctx, sourcePath)
}
// create pod
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: annotation,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
klog.Infof("Need to create pod %s %s", pod.Name, pod.Namespace)
_, err = p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("Create pod:%s err:%v", pod.Name, err)
}
return nil
}
klog.Errorf("Get pod err:%v", err)
return nil
}
// pod is created elsewhere
if po.Annotations == nil {
po.Annotations = make(map[string]string)
}
for k, v := range existTargets {
// add exist target in annotation
po.Annotations[k] = v
}
if err := util.ReplacePodAnnotation(ctx, p.Client, pod, po.Annotations); err != nil {
klog.Errorf("Update pod %s %s error: %v", po.Name, po.Namespace, err)
}
return err
}
err = fmt.Errorf("old pod %s %s deleting timeout", pod.Name, config.Namespace)
klog.V(5).Infof(err.Error())
return err
}
// podPendingHandler handles mount pod that is pending
func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podReadyHandler handles mount pod that is ready
func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("[podReadyHandler] get nil pod")
return nil
}
if pod.Annotations == nil {
return nil
}
// get mount point
mntPath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
e := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
_, e := os.Stat(mntPath)
return e
})
if e != nil {
klog.Errorf("[podReadyHandler] stat mntPath: %s err: %v, don't do recovery", mntPath, e)
return nil
}
// recovery for each target
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) {
mi := p.mit.resolveTarget(target)
if mi == nil {
klog.Errorf("pod %s target %s resolve fail", pod.Name, target)
continue
}
p.recoverTarget(pod.Name, mntPath, mi.baseTarget, mi)
for _, ti := range mi.subPathTarget {
p.recoverTarget(pod.Name, mntPath, ti, mi) | return nil
}
// recoverTarget recovers target path
func (p *PodDriver) recoverTarget(podName, sourcePath string, ti *targetItem, mi *mountItem) {
switch ti.status {
case targetStatusNotExist:
klog.Errorf("pod %s target %s not exists, item count:%d", podName, ti.target, ti.count)
if ti.count > 0 {
// target exist in /proc/self/mountinfo file
// refer to this case: local target exist, but source which target binded has beed deleted
// if target is for pod subpath (volumeMount.subpath), this will cause user pod delete failed, so we help kubelet umount it
if mi.podDeleted {
p.umountTarget(ti.target, ti.count)
}
}
case targetStatusMounted:
// normal, most likely happen
klog.V(6).Infof("pod %s target %s is normal mounted", podName, ti.target)
case targetStatusNotMount:
klog.V(5).Infof("pod %s target %s is not mounted", podName, ti.target)
case targetStatusCorrupt:
if ti.inconsistent {
// source paths (found in /proc/self/mountinfo) which target binded is inconsistent
// some unexpected things happened
klog.Errorf("pod %s target %s, source inconsistent", podName, ti.target)
break
}
if mi.podDeleted {
klog.V(6).Infof("pod %s target %s, user pod has been deleted, don't do recovery", podName, ti.target)
break
}
// if not umountTarget, mountinfo file will increase unlimited
// if we umount all the target items, `mountPropagation` will lose efficacy
p.umountTarget(ti.target, ti.count-1)
if ti.subpath != "" {
sourcePath += "/" + ti.subpath
_, err := os.Stat(sourcePath)
if err != nil {
klog.Errorf("pod %s target %s, stat volPath:%s err:%v, don't do recovery", podName, ti.target, sourcePath, err)
break
}
}
klog.V(5).Infof("pod %s target %s recover volPath:%s", podName, ti.target, sourcePath)
mountOption := []string{"bind"}
if err := p.Mount(sourcePath, ti.target, "none", mountOption); err != nil {
klog.Errorf("exec cmd: mount -o bind %s %s err:%v", sourcePath, ti.target, err)
}
case targetStatusUnexpect:
klog.Errorf("pod %s target %s reslove err:%v", podName, ti.target, ti.err)
}
}
// umountTarget umount target path
func (p *PodDriver) umountTarget(target string, count int) {
for i := 0; i < count; i++ {
// ignore error
p.Unmount(target)
}
}
// CleanUpCache clean up cache
func (p *PodDriver) CleanUpCache(ctx context.Context, pod *corev1.Pod) {
if pod.Annotations[config.CleanCache] != "true" {
return
}
uuid := pod.Annotations[config.JuiceFSUUID]
uniqueId := pod.Annotations[config.UniqueId]
if uuid == "" && uniqueId == "" {
// no necessary info, return
klog.Errorf("[CleanUpCache] Can't get uuid and uniqueId from pod %s annotation. skip cache clean.", pod.Name)
return
}
// wait for pod deleted.
isDeleted := false
getCtx, getCancel := context.WithTimeout(ctx, 3*time.Minute)
defer getCancel()
for {
if _, err := p.Client.GetPod(getCtx, pod.Name, pod.Namespace); err != nil {
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
klog.V(5).Infof("[CleanUpCache] Get pod %s error %v. Skip clean cache.", pod.Name, err)
return
}
time.Sleep(time.Microsecond * 500)
}
if !isDeleted {
klog.Errorf("[CleanUpCache] Mount pod %s not deleted in 3 min. Skip clean cache.", pod.Name)
return
}
klog.V(5).Infof("[CleanUpCache] Cleanup cache of volume %s in node %s", uniqueId, config.NodeName)
podMnt := podmount.NewPodMount(p.Client, p.SafeFormatAndMount)
cacheDirs := []string{}
for _, dir := range pod.Spec.Volumes {
if strings.HasPrefix(dir.Name, "cachedir-") && dir.HostPath != nil {
cacheDirs = append(cacheDirs, dir.HostPath.Path)
}
}
image := pod.Spec.Containers[0].Image
if err := podMnt.CleanCache(ctx, image, uuid, uniqueId, cacheDirs); err != nil {
klog.V(5).Infof("[CleanUpCache] Cleanup cache of volume %s error %v", uniqueId, err)
}
} | }
}
}
| random_line_split |
pod_driver.go | /*
Copyright 2021 Juicedata Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"os"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog"
"k8s.io/utils/mount"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/juicedata/juicefs-csi-driver/pkg/config"
podmount "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount"
"github.com/juicedata/juicefs-csi-driver/pkg/k8sclient"
"github.com/juicedata/juicefs-csi-driver/pkg/util"
)
const defaultCheckoutTimeout = 1 * time.Second
type PodDriver struct {
Client *k8sclient.K8sClient
handlers map[podStatus]podHandler
mit mountInfoTable
mount.SafeFormatAndMount
}
func NewPodDriver(client *k8sclient.K8sClient, mounter mount.SafeFormatAndMount) *PodDriver {
return newPodDriver(client, mounter)
}
func | (client *k8sclient.K8sClient, mounter mount.SafeFormatAndMount) *PodDriver {
driver := &PodDriver{
Client: client,
handlers: map[podStatus]podHandler{},
SafeFormatAndMount: mounter,
}
driver.handlers[podReady] = driver.podReadyHandler
driver.handlers[podError] = driver.podErrorHandler
driver.handlers[podPending] = driver.podPendingHandler
driver.handlers[podDeleted] = driver.podDeletedHandler
return driver
}
type podHandler func(ctx context.Context, pod *corev1.Pod) error
type podStatus string
const (
podReady podStatus = "podReady"
podError podStatus = "podError"
podDeleted podStatus = "podDeleted"
podPending podStatus = "podPending"
)
func (p *PodDriver) SetMountInfo(mit mountInfoTable) {
p.mit = mit
}
func (p *PodDriver) Run(ctx context.Context, current *corev1.Pod) error {
// check refs in mount pod annotation first, delete ref that target pod is not found
err := p.checkAnnotations(ctx, current)
if err != nil {
return err
}
podStatus := p.getPodStatus(current)
if podStatus != podError && podStatus != podDeleted {
return p.handlers[podStatus](ctx, current)
}
// resourceVersion of kubelet may be different from apiserver
// so we need get latest pod resourceVersion from apiserver
pod, err := p.Client.GetPod(ctx, current.Name, current.Namespace)
if err != nil {
return err
}
// set mount pod status in mit again, maybe deleted
p.mit.setPodStatus(pod)
return p.handlers[p.getPodStatus(pod)](ctx, pod)
}
// getPodStatus get pod status
func (p *PodDriver) getPodStatus(pod *corev1.Pod) podStatus {
if pod == nil {
return podError
}
if pod.DeletionTimestamp != nil {
return podDeleted
}
if util.IsPodError(pod) {
return podError
}
if util.IsPodReady(pod) {
return podReady
}
return podPending
}
// checkAnnotations
// 1. check refs in mount pod annotation
// 2. delete ref that target pod is not found
func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error {
// check refs in mount pod, the corresponding pod exists or not
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
delAnnotations := []string{}
var existTargets int
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) {
_, exists := p.mit.deletedPods[getPodUid(target)]
if !exists { // only it is not in pod lists can be seen as deleted
// target pod is deleted
delAnnotations = append(delAnnotations, k)
continue
}
existTargets++
}
}
if existTargets != 0 && pod.Annotations[config.DeleteDelayAtKey] != "" {
delAnnotations = append(delAnnotations, config.DeleteDelayAtKey)
}
if len(delAnnotations) != 0 {
// check mount pod reference key, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
if len(util.GetAllRefKeys(*newPod)) != len(util.GetAllRefKeys(*pod)) {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not patch pod"))
}
if err := util.DelPodAnnotation(ctx, p.Client, pod, delAnnotations); err != nil {
return err
}
}
if existTargets == 0 && pod.DeletionTimestamp == nil {
var shouldDelay bool
shouldDelay, err := util.ShouldDelay(ctx, pod, p.Client)
if err != nil {
return err
}
if !shouldDelay {
// check mount pod resourceVersion, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
// check mount pod reference key, if it is not none, return conflict
if len(util.GetAllRefKeys(*newPod)) != 0 {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not delete pod"))
}
// if there are no refs or after delay time, delete it
klog.V(5).Infof("There are no refs in pod %s annotation, delete it", pod.Name)
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("Delete pod %s error: %v", pod.Name, err)
return err
}
// delete related secret
secretName := pod.Name + "-secret"
klog.V(6).Infof("delete related secret of pod: %s", secretName)
if err := p.Client.DeleteSecret(ctx, secretName, pod.Namespace); err != nil {
klog.V(5).Infof("Delete secret %s error: %v", secretName, err)
}
}
}
return nil
}
// podErrorHandler handles mount pod error status
func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podDeletedHandler handles mount pod that will be deleted
func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("get nil pod")
return nil
}
klog.V(5).Infof("Pod %s in namespace %s is to be deleted.", pod.Name, pod.Namespace)
// pod with no finalizer
if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) {
// do nothing
return nil
}
// remove finalizer of pod
if err := util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer); err != nil {
klog.Errorf("remove pod finalizer err:%v", err)
return err
}
// pod with resource error
if util.IsPodResourceError(pod) {
klog.V(6).Infof("The pod is PodResourceError, podDeletedHandler skip delete the pod:%s", pod.Name)
return nil
}
// get mount point
sourcePath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
// check if it needs to create new one
klog.V(6).Infof("Annotations:%v", pod.Annotations)
if pod.Annotations == nil {
return nil
}
annotation := pod.Annotations
existTargets := make(map[string]string)
for k, v := range pod.Annotations {
// annotation is checked in beginning, don't double-check here
if k == util.GetReferenceKey(v) {
existTargets[k] = v
}
}
if len(existTargets) == 0 {
// do not need to create new one, umount
util.UmountPath(ctx, sourcePath)
// clean mount point
err = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
klog.V(5).Infof("Clean mount point : %s", sourcePath)
return mount.CleanupMountPoint(sourcePath, p.SafeFormatAndMount.Interface, false)
})
if err != nil {
klog.Errorf("Clean mount point %s error: %v", sourcePath, err)
}
// cleanup cache should always complete, don't set timeout
go p.CleanUpCache(context.TODO(), pod)
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// create
klog.V(5).Infof("pod targetPath not empty, need create pod:%s", pod.Name)
// check pod delete
for {
po, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil && po.DeletionTimestamp != nil {
klog.V(6).Infof("pod %s %s is being deleted, waiting", pod.Name, pod.Namespace)
time.Sleep(time.Millisecond * 500)
continue
}
if err != nil {
if apierrors.IsTimeout(err) {
break
}
if apierrors.IsNotFound(err) {
// umount mount point before recreate mount pod
err := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
exist, _ := mount.PathExists(sourcePath)
if !exist {
return fmt.Errorf("%s not exist", sourcePath)
}
return nil
})
if err == nil {
klog.Infof("start to umount: %s", sourcePath)
util.UmountPath(ctx, sourcePath)
}
// create pod
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: annotation,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
klog.Infof("Need to create pod %s %s", pod.Name, pod.Namespace)
_, err = p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("Create pod:%s err:%v", pod.Name, err)
}
return nil
}
klog.Errorf("Get pod err:%v", err)
return nil
}
// pod is created elsewhere
if po.Annotations == nil {
po.Annotations = make(map[string]string)
}
for k, v := range existTargets {
// add exist target in annotation
po.Annotations[k] = v
}
if err := util.ReplacePodAnnotation(ctx, p.Client, pod, po.Annotations); err != nil {
klog.Errorf("Update pod %s %s error: %v", po.Name, po.Namespace, err)
}
return err
}
err = fmt.Errorf("old pod %s %s deleting timeout", pod.Name, config.Namespace)
klog.V(5).Infof(err.Error())
return err
}
// podPendingHandler handles mount pod that is pending
func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podReadyHandler handles mount pod that is ready
func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("[podReadyHandler] get nil pod")
return nil
}
if pod.Annotations == nil {
return nil
}
// get mount point
mntPath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
e := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
_, e := os.Stat(mntPath)
return e
})
if e != nil {
klog.Errorf("[podReadyHandler] stat mntPath: %s err: %v, don't do recovery", mntPath, e)
return nil
}
// recovery for each target
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) {
mi := p.mit.resolveTarget(target)
if mi == nil {
klog.Errorf("pod %s target %s resolve fail", pod.Name, target)
continue
}
p.recoverTarget(pod.Name, mntPath, mi.baseTarget, mi)
for _, ti := range mi.subPathTarget {
p.recoverTarget(pod.Name, mntPath, ti, mi)
}
}
}
return nil
}
// recoverTarget recovers target path
func (p *PodDriver) recoverTarget(podName, sourcePath string, ti *targetItem, mi *mountItem) {
switch ti.status {
case targetStatusNotExist:
klog.Errorf("pod %s target %s not exists, item count:%d", podName, ti.target, ti.count)
if ti.count > 0 {
// target exist in /proc/self/mountinfo file
// refer to this case: local target exist, but source which target binded has beed deleted
// if target is for pod subpath (volumeMount.subpath), this will cause user pod delete failed, so we help kubelet umount it
if mi.podDeleted {
p.umountTarget(ti.target, ti.count)
}
}
case targetStatusMounted:
// normal, most likely happen
klog.V(6).Infof("pod %s target %s is normal mounted", podName, ti.target)
case targetStatusNotMount:
klog.V(5).Infof("pod %s target %s is not mounted", podName, ti.target)
case targetStatusCorrupt:
if ti.inconsistent {
// source paths (found in /proc/self/mountinfo) which target binded is inconsistent
// some unexpected things happened
klog.Errorf("pod %s target %s, source inconsistent", podName, ti.target)
break
}
if mi.podDeleted {
klog.V(6).Infof("pod %s target %s, user pod has been deleted, don't do recovery", podName, ti.target)
break
}
// if not umountTarget, mountinfo file will increase unlimited
// if we umount all the target items, `mountPropagation` will lose efficacy
p.umountTarget(ti.target, ti.count-1)
if ti.subpath != "" {
sourcePath += "/" + ti.subpath
_, err := os.Stat(sourcePath)
if err != nil {
klog.Errorf("pod %s target %s, stat volPath:%s err:%v, don't do recovery", podName, ti.target, sourcePath, err)
break
}
}
klog.V(5).Infof("pod %s target %s recover volPath:%s", podName, ti.target, sourcePath)
mountOption := []string{"bind"}
if err := p.Mount(sourcePath, ti.target, "none", mountOption); err != nil {
klog.Errorf("exec cmd: mount -o bind %s %s err:%v", sourcePath, ti.target, err)
}
case targetStatusUnexpect:
klog.Errorf("pod %s target %s reslove err:%v", podName, ti.target, ti.err)
}
}
// umountTarget umount target path
func (p *PodDriver) umountTarget(target string, count int) {
for i := 0; i < count; i++ {
// ignore error
p.Unmount(target)
}
}
// CleanUpCache clean up cache
func (p *PodDriver) CleanUpCache(ctx context.Context, pod *corev1.Pod) {
if pod.Annotations[config.CleanCache] != "true" {
return
}
uuid := pod.Annotations[config.JuiceFSUUID]
uniqueId := pod.Annotations[config.UniqueId]
if uuid == "" && uniqueId == "" {
// no necessary info, return
klog.Errorf("[CleanUpCache] Can't get uuid and uniqueId from pod %s annotation. skip cache clean.", pod.Name)
return
}
// wait for pod deleted.
isDeleted := false
getCtx, getCancel := context.WithTimeout(ctx, 3*time.Minute)
defer getCancel()
for {
if _, err := p.Client.GetPod(getCtx, pod.Name, pod.Namespace); err != nil {
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
klog.V(5).Infof("[CleanUpCache] Get pod %s error %v. Skip clean cache.", pod.Name, err)
return
}
time.Sleep(time.Microsecond * 500)
}
if !isDeleted {
klog.Errorf("[CleanUpCache] Mount pod %s not deleted in 3 min. Skip clean cache.", pod.Name)
return
}
klog.V(5).Infof("[CleanUpCache] Cleanup cache of volume %s in node %s", uniqueId, config.NodeName)
podMnt := podmount.NewPodMount(p.Client, p.SafeFormatAndMount)
cacheDirs := []string{}
for _, dir := range pod.Spec.Volumes {
if strings.HasPrefix(dir.Name, "cachedir-") && dir.HostPath != nil {
cacheDirs = append(cacheDirs, dir.HostPath.Path)
}
}
image := pod.Spec.Containers[0].Image
if err := podMnt.CleanCache(ctx, image, uuid, uniqueId, cacheDirs); err != nil {
klog.V(5).Infof("[CleanUpCache] Cleanup cache of volume %s error %v", uniqueId, err)
}
}
| newPodDriver | identifier_name |
pod_driver.go | /*
Copyright 2021 Juicedata Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"os"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog"
"k8s.io/utils/mount"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/juicedata/juicefs-csi-driver/pkg/config"
podmount "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount"
"github.com/juicedata/juicefs-csi-driver/pkg/k8sclient"
"github.com/juicedata/juicefs-csi-driver/pkg/util"
)
const defaultCheckoutTimeout = 1 * time.Second
type PodDriver struct {
Client *k8sclient.K8sClient
handlers map[podStatus]podHandler
mit mountInfoTable
mount.SafeFormatAndMount
}
func NewPodDriver(client *k8sclient.K8sClient, mounter mount.SafeFormatAndMount) *PodDriver {
return newPodDriver(client, mounter)
}
func newPodDriver(client *k8sclient.K8sClient, mounter mount.SafeFormatAndMount) *PodDriver {
driver := &PodDriver{
Client: client,
handlers: map[podStatus]podHandler{},
SafeFormatAndMount: mounter,
}
driver.handlers[podReady] = driver.podReadyHandler
driver.handlers[podError] = driver.podErrorHandler
driver.handlers[podPending] = driver.podPendingHandler
driver.handlers[podDeleted] = driver.podDeletedHandler
return driver
}
type podHandler func(ctx context.Context, pod *corev1.Pod) error
type podStatus string
const (
podReady podStatus = "podReady"
podError podStatus = "podError"
podDeleted podStatus = "podDeleted"
podPending podStatus = "podPending"
)
func (p *PodDriver) SetMountInfo(mit mountInfoTable) {
p.mit = mit
}
func (p *PodDriver) Run(ctx context.Context, current *corev1.Pod) error {
// check refs in mount pod annotation first, delete ref that target pod is not found
err := p.checkAnnotations(ctx, current)
if err != nil {
return err
}
podStatus := p.getPodStatus(current)
if podStatus != podError && podStatus != podDeleted {
return p.handlers[podStatus](ctx, current)
}
// resourceVersion of kubelet may be different from apiserver
// so we need get latest pod resourceVersion from apiserver
pod, err := p.Client.GetPod(ctx, current.Name, current.Namespace)
if err != nil {
return err
}
// set mount pod status in mit again, maybe deleted
p.mit.setPodStatus(pod)
return p.handlers[p.getPodStatus(pod)](ctx, pod)
}
// getPodStatus get pod status
func (p *PodDriver) getPodStatus(pod *corev1.Pod) podStatus {
if pod == nil {
return podError
}
if pod.DeletionTimestamp != nil {
return podDeleted
}
if util.IsPodError(pod) {
return podError
}
if util.IsPodReady(pod) {
return podReady
}
return podPending
}
// checkAnnotations
// 1. check refs in mount pod annotation
// 2. delete ref that target pod is not found
func (p *PodDriver) checkAnnotations(ctx context.Context, pod *corev1.Pod) error {
// check refs in mount pod, the corresponding pod exists or not
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
delAnnotations := []string{}
var existTargets int
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) {
_, exists := p.mit.deletedPods[getPodUid(target)]
if !exists { // only it is not in pod lists can be seen as deleted
// target pod is deleted
delAnnotations = append(delAnnotations, k)
continue
}
existTargets++
}
}
if existTargets != 0 && pod.Annotations[config.DeleteDelayAtKey] != "" {
delAnnotations = append(delAnnotations, config.DeleteDelayAtKey)
}
if len(delAnnotations) != 0 {
// check mount pod reference key, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
if len(util.GetAllRefKeys(*newPod)) != len(util.GetAllRefKeys(*pod)) {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not patch pod"))
}
if err := util.DelPodAnnotation(ctx, p.Client, pod, delAnnotations); err != nil {
return err
}
}
if existTargets == 0 && pod.DeletionTimestamp == nil {
var shouldDelay bool
shouldDelay, err := util.ShouldDelay(ctx, pod, p.Client)
if err != nil {
return err
}
if !shouldDelay {
// check mount pod resourceVersion, if it is not the latest, return conflict
newPod, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err != nil {
return err
}
// check mount pod reference key, if it is not none, return conflict
if len(util.GetAllRefKeys(*newPod)) != 0 {
return apierrors.NewConflict(schema.GroupResource{
Group: pod.GroupVersionKind().Group,
Resource: pod.GroupVersionKind().Kind,
}, pod.Name, fmt.Errorf("can not delete pod"))
}
// if there are no refs or after delay time, delete it
klog.V(5).Infof("There are no refs in pod %s annotation, delete it", pod.Name)
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("Delete pod %s error: %v", pod.Name, err)
return err
}
// delete related secret
secretName := pod.Name + "-secret"
klog.V(6).Infof("delete related secret of pod: %s", secretName)
if err := p.Client.DeleteSecret(ctx, secretName, pod.Namespace); err != nil {
klog.V(5).Infof("Delete secret %s error: %v", secretName, err)
}
}
}
return nil
}
// podErrorHandler handles mount pod error status
func (p *PodDriver) podErrorHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podDeletedHandler handles mount pod that will be deleted
func (p *PodDriver) podDeletedHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("get nil pod")
return nil
}
klog.V(5).Infof("Pod %s in namespace %s is to be deleted.", pod.Name, pod.Namespace)
// pod with no finalizer
if !util.ContainsString(pod.GetFinalizers(), config.Finalizer) {
// do nothing
return nil
}
// remove finalizer of pod
if err := util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer); err != nil {
klog.Errorf("remove pod finalizer err:%v", err)
return err
}
// pod with resource error
if util.IsPodResourceError(pod) {
klog.V(6).Infof("The pod is PodResourceError, podDeletedHandler skip delete the pod:%s", pod.Name)
return nil
}
// get mount point
sourcePath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
// check if it needs to create new one
klog.V(6).Infof("Annotations:%v", pod.Annotations)
if pod.Annotations == nil {
return nil
}
annotation := pod.Annotations
existTargets := make(map[string]string)
for k, v := range pod.Annotations {
// annotation is checked in beginning, don't double-check here
if k == util.GetReferenceKey(v) {
existTargets[k] = v
}
}
if len(existTargets) == 0 {
// do not need to create new one, umount
util.UmountPath(ctx, sourcePath)
// clean mount point
err = util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
klog.V(5).Infof("Clean mount point : %s", sourcePath)
return mount.CleanupMountPoint(sourcePath, p.SafeFormatAndMount.Interface, false)
})
if err != nil {
klog.Errorf("Clean mount point %s error: %v", sourcePath, err)
}
// cleanup cache should always complete, don't set timeout
go p.CleanUpCache(context.TODO(), pod)
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// create
klog.V(5).Infof("pod targetPath not empty, need create pod:%s", pod.Name)
// check pod delete
for {
po, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil && po.DeletionTimestamp != nil {
klog.V(6).Infof("pod %s %s is being deleted, waiting", pod.Name, pod.Namespace)
time.Sleep(time.Millisecond * 500)
continue
}
if err != nil {
if apierrors.IsTimeout(err) {
break
}
if apierrors.IsNotFound(err) {
// umount mount point before recreate mount pod
err := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
exist, _ := mount.PathExists(sourcePath)
if !exist {
return fmt.Errorf("%s not exist", sourcePath)
}
return nil
})
if err == nil {
klog.Infof("start to umount: %s", sourcePath)
util.UmountPath(ctx, sourcePath)
}
// create pod
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: annotation,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
klog.Infof("Need to create pod %s %s", pod.Name, pod.Namespace)
_, err = p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("Create pod:%s err:%v", pod.Name, err)
}
return nil
}
klog.Errorf("Get pod err:%v", err)
return nil
}
// pod is created elsewhere
if po.Annotations == nil {
po.Annotations = make(map[string]string)
}
for k, v := range existTargets {
// add exist target in annotation
po.Annotations[k] = v
}
if err := util.ReplacePodAnnotation(ctx, p.Client, pod, po.Annotations); err != nil {
klog.Errorf("Update pod %s %s error: %v", po.Name, po.Namespace, err)
}
return err
}
err = fmt.Errorf("old pod %s %s deleting timeout", pod.Name, config.Namespace)
klog.V(5).Infof(err.Error())
return err
}
// podPendingHandler handles mount pod that is pending
func (p *PodDriver) podPendingHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
return nil
}
lock := config.GetPodLock(pod.Name)
lock.Lock()
defer lock.Unlock()
// check resource err
if util.IsPodResourceError(pod) {
klog.V(5).Infof("waitUtilMount: Pod %s failed because of resource.", pod.Name)
if util.IsPodHasResource(*pod) {
// if pod is failed because of resource, delete resource and deploy pod again.
_ = util.RemoveFinalizer(ctx, p.Client, pod, config.Finalizer)
klog.V(5).Infof("Delete it and deploy again with no resource.")
if err := p.Client.DeletePod(ctx, pod); err != nil {
klog.Errorf("delete po:%s err:%v", pod.Name, err)
return nil
}
isDeleted := false
// wait pod delete for 1min
for {
_, err := p.Client.GetPod(ctx, pod.Name, pod.Namespace)
if err == nil {
klog.V(6).Infof("pod %s %s still exists wait.", pod.Name, pod.Namespace)
time.Sleep(time.Microsecond * 500)
continue
}
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
if ctx.Err() == context.Canceled || ctx.Err() == context.DeadlineExceeded {
break
}
klog.Errorf("get mountPod err:%v", err)
}
if !isDeleted {
klog.Errorf("Old pod %s %s deleting timeout", pod.Name, config.Namespace)
return nil
}
var newPod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Labels: pod.Labels,
Annotations: pod.Annotations,
},
Spec: pod.Spec,
}
controllerutil.AddFinalizer(newPod, config.Finalizer)
util.DeleteResourceOfPod(newPod)
_, err := p.Client.CreatePod(ctx, newPod)
if err != nil {
klog.Errorf("create pod:%s err:%v", pod.Name, err)
}
} else {
klog.V(5).Infof("mountPod PodResourceError, but pod no resource, do nothing.")
}
}
return nil
}
// podReadyHandler handles mount pod that is ready
func (p *PodDriver) podReadyHandler(ctx context.Context, pod *corev1.Pod) error {
if pod == nil {
klog.Errorf("[podReadyHandler] get nil pod")
return nil
}
if pod.Annotations == nil {
return nil
}
// get mount point
mntPath, _, err := util.GetMountPathOfPod(*pod)
if err != nil {
klog.Error(err)
return nil
}
e := util.DoWithTimeout(ctx, defaultCheckoutTimeout, func() error {
_, e := os.Stat(mntPath)
return e
})
if e != nil {
klog.Errorf("[podReadyHandler] stat mntPath: %s err: %v, don't do recovery", mntPath, e)
return nil
}
// recovery for each target
for k, target := range pod.Annotations {
if k == util.GetReferenceKey(target) |
}
return nil
}
// recoverTarget recovers target path
func (p *PodDriver) recoverTarget(podName, sourcePath string, ti *targetItem, mi *mountItem) {
switch ti.status {
case targetStatusNotExist:
klog.Errorf("pod %s target %s not exists, item count:%d", podName, ti.target, ti.count)
if ti.count > 0 {
// target exist in /proc/self/mountinfo file
// refer to this case: local target exist, but source which target binded has beed deleted
// if target is for pod subpath (volumeMount.subpath), this will cause user pod delete failed, so we help kubelet umount it
if mi.podDeleted {
p.umountTarget(ti.target, ti.count)
}
}
case targetStatusMounted:
// normal, most likely happen
klog.V(6).Infof("pod %s target %s is normal mounted", podName, ti.target)
case targetStatusNotMount:
klog.V(5).Infof("pod %s target %s is not mounted", podName, ti.target)
case targetStatusCorrupt:
if ti.inconsistent {
// source paths (found in /proc/self/mountinfo) which target binded is inconsistent
// some unexpected things happened
klog.Errorf("pod %s target %s, source inconsistent", podName, ti.target)
break
}
if mi.podDeleted {
klog.V(6).Infof("pod %s target %s, user pod has been deleted, don't do recovery", podName, ti.target)
break
}
// if not umountTarget, mountinfo file will increase unlimited
// if we umount all the target items, `mountPropagation` will lose efficacy
p.umountTarget(ti.target, ti.count-1)
if ti.subpath != "" {
sourcePath += "/" + ti.subpath
_, err := os.Stat(sourcePath)
if err != nil {
klog.Errorf("pod %s target %s, stat volPath:%s err:%v, don't do recovery", podName, ti.target, sourcePath, err)
break
}
}
klog.V(5).Infof("pod %s target %s recover volPath:%s", podName, ti.target, sourcePath)
mountOption := []string{"bind"}
if err := p.Mount(sourcePath, ti.target, "none", mountOption); err != nil {
klog.Errorf("exec cmd: mount -o bind %s %s err:%v", sourcePath, ti.target, err)
}
case targetStatusUnexpect:
klog.Errorf("pod %s target %s reslove err:%v", podName, ti.target, ti.err)
}
}
// umountTarget umount target path
func (p *PodDriver) umountTarget(target string, count int) {
for i := 0; i < count; i++ {
// ignore error
p.Unmount(target)
}
}
// CleanUpCache clean up cache
func (p *PodDriver) CleanUpCache(ctx context.Context, pod *corev1.Pod) {
if pod.Annotations[config.CleanCache] != "true" {
return
}
uuid := pod.Annotations[config.JuiceFSUUID]
uniqueId := pod.Annotations[config.UniqueId]
if uuid == "" && uniqueId == "" {
// no necessary info, return
klog.Errorf("[CleanUpCache] Can't get uuid and uniqueId from pod %s annotation. skip cache clean.", pod.Name)
return
}
// wait for pod deleted.
isDeleted := false
getCtx, getCancel := context.WithTimeout(ctx, 3*time.Minute)
defer getCancel()
for {
if _, err := p.Client.GetPod(getCtx, pod.Name, pod.Namespace); err != nil {
if apierrors.IsNotFound(err) {
isDeleted = true
break
}
if apierrors.IsTimeout(err) {
break
}
klog.V(5).Infof("[CleanUpCache] Get pod %s error %v. Skip clean cache.", pod.Name, err)
return
}
time.Sleep(time.Microsecond * 500)
}
if !isDeleted {
klog.Errorf("[CleanUpCache] Mount pod %s not deleted in 3 min. Skip clean cache.", pod.Name)
return
}
klog.V(5).Infof("[CleanUpCache] Cleanup cache of volume %s in node %s", uniqueId, config.NodeName)
podMnt := podmount.NewPodMount(p.Client, p.SafeFormatAndMount)
cacheDirs := []string{}
for _, dir := range pod.Spec.Volumes {
if strings.HasPrefix(dir.Name, "cachedir-") && dir.HostPath != nil {
cacheDirs = append(cacheDirs, dir.HostPath.Path)
}
}
image := pod.Spec.Containers[0].Image
if err := podMnt.CleanCache(ctx, image, uuid, uniqueId, cacheDirs); err != nil {
klog.V(5).Infof("[CleanUpCache] Cleanup cache of volume %s error %v", uniqueId, err)
}
}
| {
mi := p.mit.resolveTarget(target)
if mi == nil {
klog.Errorf("pod %s target %s resolve fail", pod.Name, target)
continue
}
p.recoverTarget(pod.Name, mntPath, mi.baseTarget, mi)
for _, ti := range mi.subPathTarget {
p.recoverTarget(pod.Name, mntPath, ti, mi)
}
} | conditional_block |
kitti_sem_data_loader.py | import logging
import os, sys
import numpy as np
import logging
import pykitti
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))+"/../")
import third_party.parseTrackletXML as parseTrackletXML
import kitti_detection_helper
import utils
import path_def
import se3
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("LOGLEVEL", 'INFO'))
class KittiSemDataLoader():
def __init__(self, kitti_date, kitti_drive, end_index,
object_label_status):
self.kitti_dataset_path = path_def.kitti_dataset_path
self.kitti_date = kitti_date
self.kitti_drive = kitti_drive
self.start_index = 0
self.end_index = end_index
self.cache_path = path_def.kitti_cache_path
self.kitti_dir = self.kitti_date + '_' + self.kitti_drive
# for loading tracklets
self.drive_path = self.kitti_date + '_drive_' + self.kitti_drive + '_sync'
self.tracklet_xml_path = os.path.join(self.kitti_dataset_path, self.kitti_date,
self.drive_path, "tracklet_labels.xml")
self.corner_list = []
self.cuboid_list, self.volume_list = [], []
# key is frame number
self.local_volume_dict = {}
self.local_cuboid_dict = {}
self.local_yaw_dict = {}
self.local_hwl_dict = {}
self.poses_gt = []
self.gt_position = []
self.gt_orientation = []
# load KITTI dataset and extrinsics
self.get_dataset()
self.K = self.dataset.calib.K_cam0
self.load_extrinsics()
self.get_GroundTruth()
if object_label_status == 'tracklet_label':
self.load_tracklet()
elif object_label_status == 'detection_label':
self.load_detection()
else:
return
# generate path to store groundtruth 3D bounding box
self.generate_gt_bbox_path()
# generate path to store the object 3D IOU results
self.generate_object_eval_path()
def get_GroundTruth(self):
"""
load gt position and orientation
"""
# set first pose to identity
# first_pose = self.dataset.oxts[0].T_w_imu
# first_pose_inv = src.se3.inversePose(first_pose)
# do not correct the orientation
# first_pose_inv[:3, :3] = np.eye(3)
# do not set first pose to identity
first_pose_inv = np.eye(4)
for o in self.dataset.oxts:
normalized_pose_original = first_pose_inv @ o.T_w_imu
self.poses_gt.append(normalized_pose_original)
# gt pose is from I to G
for i, pose in enumerate(self.poses_gt):
# get gt position
gt_position = np.reshape(pose[0:3, 3], (-1, 1))
self.gt_position.append(gt_position)
# get gt orientation
R_wIMU = pose[0:3, 0:3]
self.gt_orientation.append(R_wIMU)
def get_dataset(self):
"""
load kitti dataset using pykitti
"""
self.dataset = pykitti.raw(self.kitti_dataset_path, self.kitti_date, self.kitti_drive, frames = range(self.start_index, self.end_index, 1))
LOGGER.info('Drive: ' + str(self.dataset.drive))
LOGGER.info('Frame range: ' + str(self.dataset.frames))
def load_extrinsics(self):
# cam to imu T
T_camvelo = self.dataset.calib.T_cam0_velo
T_veloimu = self.dataset.calib.T_velo_imu
# T_cam0_imu Take a vector from IMU frame to the cam0 frame.
# refer to https://github.com/utiasSTARS/pykitti
# point_velo = np.array([0,0,0,1])
# point_cam0 = data.calib.T_cam0_velo.dot(point_velo)
T_cam0_imu = np.matmul(T_camvelo, T_veloimu)
self.oTi = T_cam0_imu
self.iTo = se3.inversePose(self.oTi)
# add vel to imu transformation
self.iTv = se3.inversePose(self.dataset.calib.T_velo_imu)
self.o2Tv = self.dataset.calib.T_cam2_velo
def generate_gt_bbox_path(self):
|
def generate_object_eval_path(self):
self.pr_table_dir = self.cache_path + self.kitti_dir + '/evaluation/'
if not os.path.exists(self.pr_table_dir):
os.makedirs(self.pr_table_dir)
def load_tracklet(self):
"""
# load tracklet
# to show 3D bounding box
# need to use the groundtruth trajectory
# P.S. here we average all the tracklets for one object,
# if it is moving then it is not accurate
"""
tracklet_all = parseTrackletXML.parseXML(self.tracklet_xml_path)
for i, tracklet in enumerate(tracklet_all):
h, w, l = tracklet.size
if tracklet.objectType not in ["Car", "Van", "Truck"]:
continue
trackletBox = np.array([
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],
[0, 0, 0, 0, h, h, h, h]])
corner_sublist = []
for translation, rotation, state, occlusion, truncation, amtOcclusion, \
amtBorders, absoluteFrameNumber in tracklet:
# determine if object is in the image; otherwise continue
if truncation not in (parseTrackletXML.TRUNC_IN_IMAGE, parseTrackletXML.TRUNC_TRUNCATED):
continue
# print("translation {}".format(translation))
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # other rotations are supposedly 0
assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!'
# transform from camera frame to world frame
FN = absoluteFrameNumber
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
# object to velodyne transform
vTq = np.array([[np.cos(yaw), -np.sin(yaw), 0.0, translation[0]],
[np.sin(yaw), np.cos(yaw), 0.0, translation[1]],
[0.0, 0.0, 1.0, translation[2]],
[0.0, 0.0, 0.0, 1.0]])
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTv @ vTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerInVelo = vTq[:3, :3] @ trackletBox + np.tile(vTq[:3, 3], (8, 1)).T
cornerInCam2 = self.o2Tv @ np.vstack((cornerInVelo, np.ones((1, 8))))
cornerInCam2 = np.eye(3) @ cornerInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerInCam2.T]
self.local_volume_dict[FN] = [h * w * l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[h, w, l]]
else:
self.local_cuboid_dict[FN].append(cornerInCam2.T)
self.local_volume_dict[FN].append(h * w * l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([h, w, l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# for global cuboids
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(h * w * l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def load_detection(self):
"""
load object bounding box labels in detection benchmark
"""
root_dir = self.kitti_dataset_path + 'object/'
kitti_det_loader = kitti_detection_helper.KittiDataset(root_dir,
self.kitti_date, self.kitti_drive)
type_list = ['Car', 'Van', 'Truck']
# some of the bbox are the same one
# need to compute average bbox
for id, object_3d_list in enumerate(kitti_det_loader.all_object_3d):
for object_3d in object_3d_list:
corner_sublist = []
if object_3d.cls_type not in type_list:
continue
trackletBox, oTq, yaw = object_3d.generate_corners3d()
FN = kitti_det_loader.img_idx_list[id]
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTo @ oTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerPosInCam2 = oTq[:3, :3] @ trackletBox + np.tile(oTq[:3, 3], (8, 1)).T
cornerPosInCam2 = np.eye(3) @ cornerPosInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerPosInCam2.T]
self.local_volume_dict[FN] = [object_3d.h * object_3d.w * object_3d.l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[object_3d.h, object_3d.w, object_3d.l]]
else:
self.local_cuboid_dict[FN].append(cornerPosInCam2.T)
self.local_volume_dict[FN].append(object_3d.h * object_3d.w * object_3d.l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([object_3d.h, object_3d.w, object_3d.l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# used for global IOU
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(object_3d.h * object_3d.w * object_3d.l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def plot_all_gt_bboxes(self, axis):
"""
draw gt bboxes from annotations
"""
for corner_sub in self.corner_list:
utils.draw_box(axis, corner_sub, axes=[0, 1, 2], color='blue')
if __name__ == "__main__":
# we can use this script to load the
# intrinsics and extrinsics from kitti
# kitti_date = '2011_09_26'
# kitti_drive = '0022'
kitti_date = '2011_10_03'
kitti_drive = '0027'
start_index = 0
end_index = 10
# refer to https://github.com/moshanATucsd/orcvio_cpp/blob/master/eval_results/kitti_eval/eval_info.md
# for which method to choose for each sequence
# object_label_status = 'tracklet_label'
# object_label_status = 'detection_label'
object_label_status = ''
DL = KittiSemDataLoader(kitti_date, kitti_drive, end_index, object_label_status)
print("intrinsics")
print(DL.K)
print("extrinsics")
print(DL.oTi)
| self.gt_bbox_results_path = self.cache_path + self.kitti_dir + '/gt_bboxes_results/'
if not os.path.exists(self.gt_bbox_results_path):
os.makedirs(self.gt_bbox_results_path) | identifier_body |
kitti_sem_data_loader.py | import logging
import os, sys
import numpy as np
import logging
import pykitti
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))+"/../")
import third_party.parseTrackletXML as parseTrackletXML
import kitti_detection_helper
import utils
import path_def
import se3
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("LOGLEVEL", 'INFO'))
class KittiSemDataLoader():
def __init__(self, kitti_date, kitti_drive, end_index,
object_label_status):
self.kitti_dataset_path = path_def.kitti_dataset_path
self.kitti_date = kitti_date
self.kitti_drive = kitti_drive
self.start_index = 0
self.end_index = end_index
self.cache_path = path_def.kitti_cache_path
self.kitti_dir = self.kitti_date + '_' + self.kitti_drive
# for loading tracklets
self.drive_path = self.kitti_date + '_drive_' + self.kitti_drive + '_sync'
self.tracklet_xml_path = os.path.join(self.kitti_dataset_path, self.kitti_date,
self.drive_path, "tracklet_labels.xml")
self.corner_list = []
self.cuboid_list, self.volume_list = [], []
# key is frame number
self.local_volume_dict = {}
self.local_cuboid_dict = {}
self.local_yaw_dict = {}
self.local_hwl_dict = {}
self.poses_gt = []
self.gt_position = []
self.gt_orientation = []
# load KITTI dataset and extrinsics
self.get_dataset()
self.K = self.dataset.calib.K_cam0
self.load_extrinsics()
self.get_GroundTruth()
if object_label_status == 'tracklet_label':
self.load_tracklet()
elif object_label_status == 'detection_label':
self.load_detection()
else:
return
# generate path to store groundtruth 3D bounding box
self.generate_gt_bbox_path()
# generate path to store the object 3D IOU results
self.generate_object_eval_path()
def get_GroundTruth(self):
"""
load gt position and orientation
"""
# set first pose to identity
# first_pose = self.dataset.oxts[0].T_w_imu
# first_pose_inv = src.se3.inversePose(first_pose)
# do not correct the orientation
# first_pose_inv[:3, :3] = np.eye(3)
# do not set first pose to identity
first_pose_inv = np.eye(4)
for o in self.dataset.oxts:
normalized_pose_original = first_pose_inv @ o.T_w_imu
self.poses_gt.append(normalized_pose_original)
# gt pose is from I to G
for i, pose in enumerate(self.poses_gt):
# get gt position
gt_position = np.reshape(pose[0:3, 3], (-1, 1))
self.gt_position.append(gt_position)
# get gt orientation
R_wIMU = pose[0:3, 0:3]
self.gt_orientation.append(R_wIMU)
def get_dataset(self):
"""
load kitti dataset using pykitti
"""
self.dataset = pykitti.raw(self.kitti_dataset_path, self.kitti_date, self.kitti_drive, frames = range(self.start_index, self.end_index, 1))
LOGGER.info('Drive: ' + str(self.dataset.drive))
LOGGER.info('Frame range: ' + str(self.dataset.frames))
def load_extrinsics(self):
# cam to imu T
T_camvelo = self.dataset.calib.T_cam0_velo
T_veloimu = self.dataset.calib.T_velo_imu
# T_cam0_imu Take a vector from IMU frame to the cam0 frame.
# refer to https://github.com/utiasSTARS/pykitti
# point_velo = np.array([0,0,0,1])
# point_cam0 = data.calib.T_cam0_velo.dot(point_velo)
T_cam0_imu = np.matmul(T_camvelo, T_veloimu)
self.oTi = T_cam0_imu
self.iTo = se3.inversePose(self.oTi)
# add vel to imu transformation
self.iTv = se3.inversePose(self.dataset.calib.T_velo_imu)
self.o2Tv = self.dataset.calib.T_cam2_velo
def generate_gt_bbox_path(self):
self.gt_bbox_results_path = self.cache_path + self.kitti_dir + '/gt_bboxes_results/'
if not os.path.exists(self.gt_bbox_results_path):
os.makedirs(self.gt_bbox_results_path)
def | (self):
self.pr_table_dir = self.cache_path + self.kitti_dir + '/evaluation/'
if not os.path.exists(self.pr_table_dir):
os.makedirs(self.pr_table_dir)
def load_tracklet(self):
"""
# load tracklet
# to show 3D bounding box
# need to use the groundtruth trajectory
# P.S. here we average all the tracklets for one object,
# if it is moving then it is not accurate
"""
tracklet_all = parseTrackletXML.parseXML(self.tracklet_xml_path)
for i, tracklet in enumerate(tracklet_all):
h, w, l = tracklet.size
if tracklet.objectType not in ["Car", "Van", "Truck"]:
continue
trackletBox = np.array([
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],
[0, 0, 0, 0, h, h, h, h]])
corner_sublist = []
for translation, rotation, state, occlusion, truncation, amtOcclusion, \
amtBorders, absoluteFrameNumber in tracklet:
# determine if object is in the image; otherwise continue
if truncation not in (parseTrackletXML.TRUNC_IN_IMAGE, parseTrackletXML.TRUNC_TRUNCATED):
continue
# print("translation {}".format(translation))
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # other rotations are supposedly 0
assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!'
# transform from camera frame to world frame
FN = absoluteFrameNumber
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
# object to velodyne transform
vTq = np.array([[np.cos(yaw), -np.sin(yaw), 0.0, translation[0]],
[np.sin(yaw), np.cos(yaw), 0.0, translation[1]],
[0.0, 0.0, 1.0, translation[2]],
[0.0, 0.0, 0.0, 1.0]])
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTv @ vTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerInVelo = vTq[:3, :3] @ trackletBox + np.tile(vTq[:3, 3], (8, 1)).T
cornerInCam2 = self.o2Tv @ np.vstack((cornerInVelo, np.ones((1, 8))))
cornerInCam2 = np.eye(3) @ cornerInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerInCam2.T]
self.local_volume_dict[FN] = [h * w * l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[h, w, l]]
else:
self.local_cuboid_dict[FN].append(cornerInCam2.T)
self.local_volume_dict[FN].append(h * w * l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([h, w, l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# for global cuboids
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(h * w * l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def load_detection(self):
"""
load object bounding box labels in detection benchmark
"""
root_dir = self.kitti_dataset_path + 'object/'
kitti_det_loader = kitti_detection_helper.KittiDataset(root_dir,
self.kitti_date, self.kitti_drive)
type_list = ['Car', 'Van', 'Truck']
# some of the bbox are the same one
# need to compute average bbox
for id, object_3d_list in enumerate(kitti_det_loader.all_object_3d):
for object_3d in object_3d_list:
corner_sublist = []
if object_3d.cls_type not in type_list:
continue
trackletBox, oTq, yaw = object_3d.generate_corners3d()
FN = kitti_det_loader.img_idx_list[id]
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTo @ oTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerPosInCam2 = oTq[:3, :3] @ trackletBox + np.tile(oTq[:3, 3], (8, 1)).T
cornerPosInCam2 = np.eye(3) @ cornerPosInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerPosInCam2.T]
self.local_volume_dict[FN] = [object_3d.h * object_3d.w * object_3d.l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[object_3d.h, object_3d.w, object_3d.l]]
else:
self.local_cuboid_dict[FN].append(cornerPosInCam2.T)
self.local_volume_dict[FN].append(object_3d.h * object_3d.w * object_3d.l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([object_3d.h, object_3d.w, object_3d.l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# used for global IOU
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(object_3d.h * object_3d.w * object_3d.l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def plot_all_gt_bboxes(self, axis):
"""
draw gt bboxes from annotations
"""
for corner_sub in self.corner_list:
utils.draw_box(axis, corner_sub, axes=[0, 1, 2], color='blue')
if __name__ == "__main__":
# we can use this script to load the
# intrinsics and extrinsics from kitti
# kitti_date = '2011_09_26'
# kitti_drive = '0022'
kitti_date = '2011_10_03'
kitti_drive = '0027'
start_index = 0
end_index = 10
# refer to https://github.com/moshanATucsd/orcvio_cpp/blob/master/eval_results/kitti_eval/eval_info.md
# for which method to choose for each sequence
# object_label_status = 'tracklet_label'
# object_label_status = 'detection_label'
object_label_status = ''
DL = KittiSemDataLoader(kitti_date, kitti_drive, end_index, object_label_status)
print("intrinsics")
print(DL.K)
print("extrinsics")
print(DL.oTi)
| generate_object_eval_path | identifier_name |
kitti_sem_data_loader.py | import logging
import os, sys
import numpy as np
import logging
import pykitti
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))+"/../")
import third_party.parseTrackletXML as parseTrackletXML
import kitti_detection_helper
import utils
import path_def
import se3
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("LOGLEVEL", 'INFO'))
class KittiSemDataLoader():
def __init__(self, kitti_date, kitti_drive, end_index,
object_label_status):
self.kitti_dataset_path = path_def.kitti_dataset_path
self.kitti_date = kitti_date
self.kitti_drive = kitti_drive
self.start_index = 0
self.end_index = end_index
self.cache_path = path_def.kitti_cache_path
self.kitti_dir = self.kitti_date + '_' + self.kitti_drive
# for loading tracklets
self.drive_path = self.kitti_date + '_drive_' + self.kitti_drive + '_sync'
self.tracklet_xml_path = os.path.join(self.kitti_dataset_path, self.kitti_date,
self.drive_path, "tracklet_labels.xml")
self.corner_list = []
self.cuboid_list, self.volume_list = [], []
# key is frame number
self.local_volume_dict = {}
self.local_cuboid_dict = {}
self.local_yaw_dict = {}
self.local_hwl_dict = {}
self.poses_gt = []
self.gt_position = []
self.gt_orientation = []
# load KITTI dataset and extrinsics
self.get_dataset()
self.K = self.dataset.calib.K_cam0
self.load_extrinsics()
self.get_GroundTruth()
if object_label_status == 'tracklet_label':
self.load_tracklet()
elif object_label_status == 'detection_label':
self.load_detection()
else:
return
# generate path to store groundtruth 3D bounding box
self.generate_gt_bbox_path()
# generate path to store the object 3D IOU results
self.generate_object_eval_path()
def get_GroundTruth(self):
"""
load gt position and orientation
""" | # first_pose_inv = src.se3.inversePose(first_pose)
# do not correct the orientation
# first_pose_inv[:3, :3] = np.eye(3)
# do not set first pose to identity
first_pose_inv = np.eye(4)
for o in self.dataset.oxts:
normalized_pose_original = first_pose_inv @ o.T_w_imu
self.poses_gt.append(normalized_pose_original)
# gt pose is from I to G
for i, pose in enumerate(self.poses_gt):
# get gt position
gt_position = np.reshape(pose[0:3, 3], (-1, 1))
self.gt_position.append(gt_position)
# get gt orientation
R_wIMU = pose[0:3, 0:3]
self.gt_orientation.append(R_wIMU)
def get_dataset(self):
"""
load kitti dataset using pykitti
"""
self.dataset = pykitti.raw(self.kitti_dataset_path, self.kitti_date, self.kitti_drive, frames = range(self.start_index, self.end_index, 1))
LOGGER.info('Drive: ' + str(self.dataset.drive))
LOGGER.info('Frame range: ' + str(self.dataset.frames))
def load_extrinsics(self):
# cam to imu T
T_camvelo = self.dataset.calib.T_cam0_velo
T_veloimu = self.dataset.calib.T_velo_imu
# T_cam0_imu Take a vector from IMU frame to the cam0 frame.
# refer to https://github.com/utiasSTARS/pykitti
# point_velo = np.array([0,0,0,1])
# point_cam0 = data.calib.T_cam0_velo.dot(point_velo)
T_cam0_imu = np.matmul(T_camvelo, T_veloimu)
self.oTi = T_cam0_imu
self.iTo = se3.inversePose(self.oTi)
# add vel to imu transformation
self.iTv = se3.inversePose(self.dataset.calib.T_velo_imu)
self.o2Tv = self.dataset.calib.T_cam2_velo
def generate_gt_bbox_path(self):
self.gt_bbox_results_path = self.cache_path + self.kitti_dir + '/gt_bboxes_results/'
if not os.path.exists(self.gt_bbox_results_path):
os.makedirs(self.gt_bbox_results_path)
def generate_object_eval_path(self):
self.pr_table_dir = self.cache_path + self.kitti_dir + '/evaluation/'
if not os.path.exists(self.pr_table_dir):
os.makedirs(self.pr_table_dir)
def load_tracklet(self):
"""
# load tracklet
# to show 3D bounding box
# need to use the groundtruth trajectory
# P.S. here we average all the tracklets for one object,
# if it is moving then it is not accurate
"""
tracklet_all = parseTrackletXML.parseXML(self.tracklet_xml_path)
for i, tracklet in enumerate(tracklet_all):
h, w, l = tracklet.size
if tracklet.objectType not in ["Car", "Van", "Truck"]:
continue
trackletBox = np.array([
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],
[0, 0, 0, 0, h, h, h, h]])
corner_sublist = []
for translation, rotation, state, occlusion, truncation, amtOcclusion, \
amtBorders, absoluteFrameNumber in tracklet:
# determine if object is in the image; otherwise continue
if truncation not in (parseTrackletXML.TRUNC_IN_IMAGE, parseTrackletXML.TRUNC_TRUNCATED):
continue
# print("translation {}".format(translation))
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # other rotations are supposedly 0
assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!'
# transform from camera frame to world frame
FN = absoluteFrameNumber
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
# object to velodyne transform
vTq = np.array([[np.cos(yaw), -np.sin(yaw), 0.0, translation[0]],
[np.sin(yaw), np.cos(yaw), 0.0, translation[1]],
[0.0, 0.0, 1.0, translation[2]],
[0.0, 0.0, 0.0, 1.0]])
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTv @ vTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerInVelo = vTq[:3, :3] @ trackletBox + np.tile(vTq[:3, 3], (8, 1)).T
cornerInCam2 = self.o2Tv @ np.vstack((cornerInVelo, np.ones((1, 8))))
cornerInCam2 = np.eye(3) @ cornerInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerInCam2.T]
self.local_volume_dict[FN] = [h * w * l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[h, w, l]]
else:
self.local_cuboid_dict[FN].append(cornerInCam2.T)
self.local_volume_dict[FN].append(h * w * l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([h, w, l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# for global cuboids
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(h * w * l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def load_detection(self):
"""
load object bounding box labels in detection benchmark
"""
root_dir = self.kitti_dataset_path + 'object/'
kitti_det_loader = kitti_detection_helper.KittiDataset(root_dir,
self.kitti_date, self.kitti_drive)
type_list = ['Car', 'Van', 'Truck']
# some of the bbox are the same one
# need to compute average bbox
for id, object_3d_list in enumerate(kitti_det_loader.all_object_3d):
for object_3d in object_3d_list:
corner_sublist = []
if object_3d.cls_type not in type_list:
continue
trackletBox, oTq, yaw = object_3d.generate_corners3d()
FN = kitti_det_loader.img_idx_list[id]
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTo @ oTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerPosInCam2 = oTq[:3, :3] @ trackletBox + np.tile(oTq[:3, 3], (8, 1)).T
cornerPosInCam2 = np.eye(3) @ cornerPosInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerPosInCam2.T]
self.local_volume_dict[FN] = [object_3d.h * object_3d.w * object_3d.l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[object_3d.h, object_3d.w, object_3d.l]]
else:
self.local_cuboid_dict[FN].append(cornerPosInCam2.T)
self.local_volume_dict[FN].append(object_3d.h * object_3d.w * object_3d.l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([object_3d.h, object_3d.w, object_3d.l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# used for global IOU
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(object_3d.h * object_3d.w * object_3d.l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def plot_all_gt_bboxes(self, axis):
"""
draw gt bboxes from annotations
"""
for corner_sub in self.corner_list:
utils.draw_box(axis, corner_sub, axes=[0, 1, 2], color='blue')
if __name__ == "__main__":
# we can use this script to load the
# intrinsics and extrinsics from kitti
# kitti_date = '2011_09_26'
# kitti_drive = '0022'
kitti_date = '2011_10_03'
kitti_drive = '0027'
start_index = 0
end_index = 10
# refer to https://github.com/moshanATucsd/orcvio_cpp/blob/master/eval_results/kitti_eval/eval_info.md
# for which method to choose for each sequence
# object_label_status = 'tracklet_label'
# object_label_status = 'detection_label'
object_label_status = ''
DL = KittiSemDataLoader(kitti_date, kitti_drive, end_index, object_label_status)
print("intrinsics")
print(DL.K)
print("extrinsics")
print(DL.oTi) |
# set first pose to identity
# first_pose = self.dataset.oxts[0].T_w_imu | random_line_split |
kitti_sem_data_loader.py | import logging
import os, sys
import numpy as np
import logging
import pykitti
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))+"/../")
import third_party.parseTrackletXML as parseTrackletXML
import kitti_detection_helper
import utils
import path_def
import se3
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("LOGLEVEL", 'INFO'))
class KittiSemDataLoader():
def __init__(self, kitti_date, kitti_drive, end_index,
object_label_status):
self.kitti_dataset_path = path_def.kitti_dataset_path
self.kitti_date = kitti_date
self.kitti_drive = kitti_drive
self.start_index = 0
self.end_index = end_index
self.cache_path = path_def.kitti_cache_path
self.kitti_dir = self.kitti_date + '_' + self.kitti_drive
# for loading tracklets
self.drive_path = self.kitti_date + '_drive_' + self.kitti_drive + '_sync'
self.tracklet_xml_path = os.path.join(self.kitti_dataset_path, self.kitti_date,
self.drive_path, "tracklet_labels.xml")
self.corner_list = []
self.cuboid_list, self.volume_list = [], []
# key is frame number
self.local_volume_dict = {}
self.local_cuboid_dict = {}
self.local_yaw_dict = {}
self.local_hwl_dict = {}
self.poses_gt = []
self.gt_position = []
self.gt_orientation = []
# load KITTI dataset and extrinsics
self.get_dataset()
self.K = self.dataset.calib.K_cam0
self.load_extrinsics()
self.get_GroundTruth()
if object_label_status == 'tracklet_label':
self.load_tracklet()
elif object_label_status == 'detection_label':
self.load_detection()
else:
return
# generate path to store groundtruth 3D bounding box
self.generate_gt_bbox_path()
# generate path to store the object 3D IOU results
self.generate_object_eval_path()
def get_GroundTruth(self):
"""
load gt position and orientation
"""
# set first pose to identity
# first_pose = self.dataset.oxts[0].T_w_imu
# first_pose_inv = src.se3.inversePose(first_pose)
# do not correct the orientation
# first_pose_inv[:3, :3] = np.eye(3)
# do not set first pose to identity
first_pose_inv = np.eye(4)
for o in self.dataset.oxts:
normalized_pose_original = first_pose_inv @ o.T_w_imu
self.poses_gt.append(normalized_pose_original)
# gt pose is from I to G
for i, pose in enumerate(self.poses_gt):
# get gt position
gt_position = np.reshape(pose[0:3, 3], (-1, 1))
self.gt_position.append(gt_position)
# get gt orientation
R_wIMU = pose[0:3, 0:3]
self.gt_orientation.append(R_wIMU)
def get_dataset(self):
"""
load kitti dataset using pykitti
"""
self.dataset = pykitti.raw(self.kitti_dataset_path, self.kitti_date, self.kitti_drive, frames = range(self.start_index, self.end_index, 1))
LOGGER.info('Drive: ' + str(self.dataset.drive))
LOGGER.info('Frame range: ' + str(self.dataset.frames))
def load_extrinsics(self):
# cam to imu T
T_camvelo = self.dataset.calib.T_cam0_velo
T_veloimu = self.dataset.calib.T_velo_imu
# T_cam0_imu Take a vector from IMU frame to the cam0 frame.
# refer to https://github.com/utiasSTARS/pykitti
# point_velo = np.array([0,0,0,1])
# point_cam0 = data.calib.T_cam0_velo.dot(point_velo)
T_cam0_imu = np.matmul(T_camvelo, T_veloimu)
self.oTi = T_cam0_imu
self.iTo = se3.inversePose(self.oTi)
# add vel to imu transformation
self.iTv = se3.inversePose(self.dataset.calib.T_velo_imu)
self.o2Tv = self.dataset.calib.T_cam2_velo
def generate_gt_bbox_path(self):
self.gt_bbox_results_path = self.cache_path + self.kitti_dir + '/gt_bboxes_results/'
if not os.path.exists(self.gt_bbox_results_path):
|
def generate_object_eval_path(self):
self.pr_table_dir = self.cache_path + self.kitti_dir + '/evaluation/'
if not os.path.exists(self.pr_table_dir):
os.makedirs(self.pr_table_dir)
def load_tracklet(self):
"""
# load tracklet
# to show 3D bounding box
# need to use the groundtruth trajectory
# P.S. here we average all the tracklets for one object,
# if it is moving then it is not accurate
"""
tracklet_all = parseTrackletXML.parseXML(self.tracklet_xml_path)
for i, tracklet in enumerate(tracklet_all):
h, w, l = tracklet.size
if tracklet.objectType not in ["Car", "Van", "Truck"]:
continue
trackletBox = np.array([
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2],
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2],
[0, 0, 0, 0, h, h, h, h]])
corner_sublist = []
for translation, rotation, state, occlusion, truncation, amtOcclusion, \
amtBorders, absoluteFrameNumber in tracklet:
# determine if object is in the image; otherwise continue
if truncation not in (parseTrackletXML.TRUNC_IN_IMAGE, parseTrackletXML.TRUNC_TRUNCATED):
continue
# print("translation {}".format(translation))
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # other rotations are supposedly 0
assert np.abs(rotation[:2]).sum() == 0, 'object rotations other than yaw given!'
# transform from camera frame to world frame
FN = absoluteFrameNumber
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
# object to velodyne transform
vTq = np.array([[np.cos(yaw), -np.sin(yaw), 0.0, translation[0]],
[np.sin(yaw), np.cos(yaw), 0.0, translation[1]],
[0.0, 0.0, 1.0, translation[2]],
[0.0, 0.0, 0.0, 1.0]])
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTv @ vTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerInVelo = vTq[:3, :3] @ trackletBox + np.tile(vTq[:3, 3], (8, 1)).T
cornerInCam2 = self.o2Tv @ np.vstack((cornerInVelo, np.ones((1, 8))))
cornerInCam2 = np.eye(3) @ cornerInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerInCam2.T]
self.local_volume_dict[FN] = [h * w * l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[h, w, l]]
else:
self.local_cuboid_dict[FN].append(cornerInCam2.T)
self.local_volume_dict[FN].append(h * w * l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([h, w, l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# for global cuboids
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(h * w * l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def load_detection(self):
"""
load object bounding box labels in detection benchmark
"""
root_dir = self.kitti_dataset_path + 'object/'
kitti_det_loader = kitti_detection_helper.KittiDataset(root_dir,
self.kitti_date, self.kitti_drive)
type_list = ['Car', 'Van', 'Truck']
# some of the bbox are the same one
# need to compute average bbox
for id, object_3d_list in enumerate(kitti_det_loader.all_object_3d):
for object_3d in object_3d_list:
corner_sublist = []
if object_3d.cls_type not in type_list:
continue
trackletBox, oTq, yaw = object_3d.generate_corners3d()
FN = kitti_det_loader.img_idx_list[id]
# only load bbox between start and end frame
if FN >= self.end_index:
# print("FN {} end {}".format(FN, self.end_index))
continue
wTi = np.eye(4)
wRi = self.gt_orientation[FN]
# note q is from G to I
wTi[:3, :3] = wRi
wTi[:3, 3] = np.squeeze(self.gt_position[FN])
wTq = wTi @ self.iTo @ oTq
# force only yaw and x,y translation
wTq = utils.poseSE32SE2(wTq)
cornerPosInVelo = wTq[:3, :3] @ trackletBox + np.tile(wTq[:3, 3], (8, 1)).T
corner_sublist.append(cornerPosInVelo)
cornerPosInCam2 = oTq[:3, :3] @ trackletBox + np.tile(oTq[:3, 3], (8, 1)).T
cornerPosInCam2 = np.eye(3) @ cornerPosInCam2[:3, :]
# used for per frame IOU evaluation
if FN not in self.local_cuboid_dict.keys():
self.local_cuboid_dict[FN] = [cornerPosInCam2.T]
self.local_volume_dict[FN] = [object_3d.h * object_3d.w * object_3d.l]
self.local_yaw_dict[FN] = [yaw]
self.local_hwl_dict[FN] = [[object_3d.h, object_3d.w, object_3d.l]]
else:
self.local_cuboid_dict[FN].append(cornerPosInCam2.T)
self.local_volume_dict[FN].append(object_3d.h * object_3d.w * object_3d.l)
self.local_yaw_dict[FN].append(yaw)
self.local_hwl_dict[FN].append([object_3d.h, object_3d.w, object_3d.l])
if len(corner_sublist) > 0:
# for plotting
corner_sublist = np.concatenate([corner_sublist], axis=0)
corner_sub = np.mean(corner_sublist, axis=0)
self.corner_list.append(corner_sub)
# for 3D IOU eval
# used for global IOU
self.cuboid_list.append(np.mean(np.array(corner_sublist), axis=0).T)
self.volume_list.append(object_3d.h * object_3d.w * object_3d.l)
self.cuboid_list = np.array(self.cuboid_list)
self.volume_list = np.array(self.volume_list)
def plot_all_gt_bboxes(self, axis):
"""
draw gt bboxes from annotations
"""
for corner_sub in self.corner_list:
utils.draw_box(axis, corner_sub, axes=[0, 1, 2], color='blue')
if __name__ == "__main__":
# we can use this script to load the
# intrinsics and extrinsics from kitti
# kitti_date = '2011_09_26'
# kitti_drive = '0022'
kitti_date = '2011_10_03'
kitti_drive = '0027'
start_index = 0
end_index = 10
# refer to https://github.com/moshanATucsd/orcvio_cpp/blob/master/eval_results/kitti_eval/eval_info.md
# for which method to choose for each sequence
# object_label_status = 'tracklet_label'
# object_label_status = 'detection_label'
object_label_status = ''
DL = KittiSemDataLoader(kitti_date, kitti_drive, end_index, object_label_status)
print("intrinsics")
print(DL.K)
print("extrinsics")
print(DL.oTi)
| os.makedirs(self.gt_bbox_results_path) | conditional_block |
vspheremachine_controller.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
clusterutilv1 "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/record"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/services"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/session"
infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util"
)
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// AddMachineControllerToManager adds the machine controller to the provided
// manager.
func AddMachineControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager) error {
var (
controlledType = &infrav1.VSphereMachine{}
controlledTypeName = reflect.TypeOf(controlledType).Elem().Name()
controlledTypeGVK = infrav1.GroupVersion.WithKind(controlledTypeName)
controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName))
controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort)
)
// Build the controller context.
controllerContext := &context.ControllerContext{
ControllerManagerContext: ctx,
Name: controllerNameShort,
Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)),
Logger: ctx.Logger.WithName(controllerNameShort),
}
return ctrl.NewControllerManagedBy(mgr).
// Watch the controlled, infrastructure resource.
For(controlledType).
// Watch the CAPI resource that owns this infrastructure resource.
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: clusterutilv1.MachineToInfrastructureMapFunc(controlledTypeGVK),
},
).
// Watch a GenericEvent channel for the controlled resource.
//
// This is useful when there are events outside of Kubernetes that
// should cause a resource to be synchronized, such as a goroutine
// waiting on some asynchronous, external task to complete.
Watches(
&source.Channel{Source: ctx.GetGenericEventChannelFor(controlledTypeGVK)},
&handler.EnqueueRequestForObject{},
).
Complete(machineReconciler{ControllerContext: controllerContext})
}
type machineReconciler struct {
*context.ControllerContext
}
// Reconcile ensures the back-end state reflects the Kubernetes resource state intent.
func (r machineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
// Get the VSphereMachine resource for this request.
vsphereMachine := &infrav1.VSphereMachine{}
if err := r.Client.Get(r, req.NamespacedName, vsphereMachine); err != nil {
if apierrors.IsNotFound(err) {
r.Logger.Info("VSphereMachine not found, won't reconcile", "key", req.NamespacedName)
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the CAPI Machine.
machine, err := clusterutilv1.GetOwnerMachine(r, r.Client, vsphereMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
r.Logger.Info("Waiting for Machine Controller to set OwnerRef on VSphereMachine")
return reconcile.Result{}, nil
}
// Fetch the CAPI Cluster.
cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, machine.ObjectMeta)
if err != nil {
r.Logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
// Fetch the VSphereCluster
vsphereCluster := &infrav1.VSphereCluster{}
vsphereClusterName := client.ObjectKey{
Namespace: vsphereMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(r, vsphereClusterName, vsphereCluster); err != nil {
r.Logger.Info("Waiting for VSphereCluster")
return reconcile.Result{}, nil
}
// Get or create an authenticated session to the vSphere endpoint.
authSession, err := session.GetOrCreate(r.Context,
vsphereCluster.Spec.Server, vsphereMachine.Spec.Datacenter,
r.ControllerManagerContext.Username, r.ControllerManagerContext.Password)
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to create vSphere session")
}
// Create the patch helper.
patchHelper, err := patch.NewHelper(vsphereMachine, r.Client)
if err != nil {
return reconcile.Result{}, errors.Wrapf(
err,
"failed to init patch helper for %s %s/%s",
vsphereMachine.GroupVersionKind(),
vsphereMachine.Namespace,
vsphereMachine.Name)
}
// Create the machine context for this request.
machineContext := &context.MachineContext{
ClusterContext: &context.ClusterContext{
ControllerContext: r.ControllerContext,
Cluster: cluster,
VSphereCluster: vsphereCluster,
},
Machine: machine,
VSphereMachine: vsphereMachine,
Session: authSession,
Logger: r.Logger.WithName(req.Namespace).WithName(req.Name),
PatchHelper: patchHelper,
}
// Print the task-ref upon entry and upon exit.
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnEntry",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
defer func() {
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnExit",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
}()
// Always issue a patch when exiting this function so changes to the
// resource are patched back to the API server.
defer func() {
// Patch the VSphereMachine resource.
if err := machineContext.Patch(); err != nil {
if reterr == nil {
reterr = err
}
machineContext.Logger.Error(err, "patch failed", "machine", machineContext.String())
}
// localObj is a deep copy of the VSphereMachine resource that was
// fetched at the top of this Reconcile function.
localObj := machineContext.VSphereMachine.DeepCopy()
// Fetch the up-to-date VSphereMachine resource into remoteObj until the
// fetched resource has a a different ResourceVersion than the local
// object.
//
// FYI - resource versions are opaque, numeric strings and should not
// be compared with < or >, only for equality -
// https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions.
//
// Since CAPV is currently deployed with a single replica, and this
// controller has a max concurrency of one, the only agent updating the
// VSphereMachine resource should be this controller.
//
// So if the remote resource's ResourceVersion is different than the
// ResourceVersion of the resource fetched at the beginning of this
// reconcile request, then that means the remote resource should be
// newer than the local resource.
wait.PollImmediateInfinite(time.Second*1, func() (bool, error) {
// remoteObj refererences the same VSphereMachine resource as it exists
// on the API server post the patch operation above. In a perfect world,
// the Status for localObj and remoteObj should be the same.
remoteObj := &infrav1.VSphereMachine{}
if err := machineContext.Client.Get(machineContext, req.NamespacedName, remoteObj); err != nil {
if apierrors.IsNotFound(err) {
// It's possible that the remote resource cannot be found
// because it has been removed. Do not error, just exit.
return true, nil
}
// There was an issue getting the remote resource. Sleep for a
// second and try again.
machineContext.Logger.Error(err, "failed to get VSphereMachine while exiting reconcile")
return false, nil
}
// If the remote resource version is not the same as the local
// resource version, then it means we were able to get a resource
// newer than the one we already had.
if localObj.ResourceVersion != remoteObj.ResourceVersion {
machineContext.Logger.Info(
"resource is patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// If the resources are the same resource version, then a previous
// patch may not have resulted in any changes. Check to see if the
// remote status is the same as the local status.
if cmp.Equal(localObj.Status, remoteObj.Status, cmpopts.EquateEmpty()) {
machineContext.Logger.Info(
"resource patch was not required",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// The remote resource version is the same as the local resource
// version, which means the local cache is not yet up-to-date.
machineContext.Logger.Info(
"resource is not patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return false, nil
})
}()
// Handle deleted machines
if !vsphereMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineContext)
}
// Handle non-deleted machines
return r.reconcileNormal(machineContext)
}
func (r machineReconciler) reconcileDelete(ctx *context.MachineContext) (reconcile.Result, error) {
ctx.Logger.Info("Handling deleted VSphereMachine")
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
vm, err := vmService.DestroyVM(ctx)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to destroy VM")
}
// Requeue the operation until the VM is "notfound".
if vm.State != infrav1.VirtualMachineStateNotFound {
ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateNotFound, "actual-vm-state", vm.State)
return reconcile.Result{}, nil
}
// The VM is deleted so remove the finalizer.
ctx.VSphereMachine.Finalizers = clusterutilv1.Filter(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
return reconcile.Result{}, nil
}
func (r machineReconciler) reconcileNormal(ctx *context.MachineContext) (reconcile.Result, error) |
func (r machineReconciler) reconcileNetwork(ctx *context.MachineContext, vm infrav1.VirtualMachine, vmService services.VirtualMachineService) (bool, error) {
expNetCount, actNetCount := len(ctx.VSphereMachine.Spec.Network.Devices), len(vm.Network)
if expNetCount != actNetCount {
return false, errors.Errorf("invalid network count for %q: exp=%d act=%d", ctx, expNetCount, actNetCount)
}
ctx.VSphereMachine.Status.Network = vm.Network
// If the VM is powered on then issue requeues until all of the VM's
// networks have IP addresses.
var ipAddrs []corev1.NodeAddress
for _, netStatus := range ctx.VSphereMachine.Status.Network {
for _, ip := range netStatus.IPAddrs {
ipAddrs = append(ipAddrs, corev1.NodeAddress{
Type: corev1.NodeInternalIP,
Address: ip,
})
}
}
if len(ipAddrs) == 0 {
ctx.Logger.Info("waiting on IP addresses")
return false, nil
}
// Use the collected IP addresses to assign the Machine's addresses.
ctx.VSphereMachine.Status.Addresses = ipAddrs
return true, nil
}
func (r machineReconciler) reconcileProviderID(ctx *context.MachineContext, vm infrav1.VirtualMachine, vmService services.VirtualMachineService) error {
providerID := infrautilv1.ConvertUUIDToProviderID(vm.BiosUUID)
if providerID == "" {
return errors.Errorf("invalid BIOS UUID %s for %s", vm.BiosUUID, ctx)
}
if ctx.VSphereMachine.Spec.ProviderID == nil || *ctx.VSphereMachine.Spec.ProviderID != providerID {
ctx.VSphereMachine.Spec.ProviderID = &providerID
ctx.Logger.Info("updated provider ID", "provider-id", providerID)
}
return nil
}
| {
// If the VSphereMachine is in an error state, return early.
if ctx.VSphereMachine.Status.ErrorReason != nil || ctx.VSphereMachine.Status.ErrorMessage != nil {
ctx.Logger.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the VSphereMachine doesn't have our finalizer, add it.
if !clusterutilv1.Contains(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer) {
ctx.VSphereMachine.Finalizers = append(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
}
if !ctx.Cluster.Status.InfrastructureReady {
ctx.Logger.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
// Make sure bootstrap data is available and populated.
// Make sure bootstrap data is available and populated.
if ctx.Machine.Spec.Bootstrap.DataSecretName == nil {
ctx.Logger.Info("Waiting for bootstrap data to be available")
return reconcile.Result{}, nil
}
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
// Get or create the VM.
vm, err := vmService.ReconcileVM(ctx)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile VM")
}
if vm.State != infrav1.VirtualMachineStateReady {
ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateReady, "actual-vm-state", vm.State)
return reconcile.Result{}, nil
}
if ok, err := r.reconcileNetwork(ctx, vm, vmService); !ok {
if err != nil {
return reconcile.Result{}, err
}
ctx.Logger.Info("waiting on vm networking")
return reconcile.Result{}, nil
}
if err := r.reconcileProviderID(ctx, vm, vmService); err != nil {
return reconcile.Result{}, err
}
// Once the provider ID is set then the VSphereMachine is InfrastructureReady
ctx.VSphereMachine.Status.Ready = true
ctx.Logger.Info("VSphereMachine is infrastructure-ready")
return reconcile.Result{}, nil
} | identifier_body |
vspheremachine_controller.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
clusterutilv1 "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/record"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/services"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/session"
infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util"
)
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// AddMachineControllerToManager adds the machine controller to the provided
// manager.
func AddMachineControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager) error {
var (
controlledType = &infrav1.VSphereMachine{}
controlledTypeName = reflect.TypeOf(controlledType).Elem().Name()
controlledTypeGVK = infrav1.GroupVersion.WithKind(controlledTypeName)
controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName))
controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort)
)
// Build the controller context.
controllerContext := &context.ControllerContext{
ControllerManagerContext: ctx,
Name: controllerNameShort,
Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)),
Logger: ctx.Logger.WithName(controllerNameShort),
}
return ctrl.NewControllerManagedBy(mgr).
// Watch the controlled, infrastructure resource.
For(controlledType).
// Watch the CAPI resource that owns this infrastructure resource.
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: clusterutilv1.MachineToInfrastructureMapFunc(controlledTypeGVK),
},
).
// Watch a GenericEvent channel for the controlled resource.
//
// This is useful when there are events outside of Kubernetes that
// should cause a resource to be synchronized, such as a goroutine
// waiting on some asynchronous, external task to complete.
Watches(
&source.Channel{Source: ctx.GetGenericEventChannelFor(controlledTypeGVK)},
&handler.EnqueueRequestForObject{},
).
Complete(machineReconciler{ControllerContext: controllerContext})
}
type machineReconciler struct {
*context.ControllerContext
}
// Reconcile ensures the back-end state reflects the Kubernetes resource state intent.
func (r machineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
// Get the VSphereMachine resource for this request.
vsphereMachine := &infrav1.VSphereMachine{}
if err := r.Client.Get(r, req.NamespacedName, vsphereMachine); err != nil {
if apierrors.IsNotFound(err) {
r.Logger.Info("VSphereMachine not found, won't reconcile", "key", req.NamespacedName)
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the CAPI Machine.
machine, err := clusterutilv1.GetOwnerMachine(r, r.Client, vsphereMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
r.Logger.Info("Waiting for Machine Controller to set OwnerRef on VSphereMachine")
return reconcile.Result{}, nil
}
// Fetch the CAPI Cluster.
cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, machine.ObjectMeta)
if err != nil {
r.Logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
// Fetch the VSphereCluster
vsphereCluster := &infrav1.VSphereCluster{}
vsphereClusterName := client.ObjectKey{
Namespace: vsphereMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(r, vsphereClusterName, vsphereCluster); err != nil {
r.Logger.Info("Waiting for VSphereCluster")
return reconcile.Result{}, nil
}
// Get or create an authenticated session to the vSphere endpoint.
authSession, err := session.GetOrCreate(r.Context,
vsphereCluster.Spec.Server, vsphereMachine.Spec.Datacenter,
r.ControllerManagerContext.Username, r.ControllerManagerContext.Password)
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to create vSphere session")
}
// Create the patch helper.
patchHelper, err := patch.NewHelper(vsphereMachine, r.Client)
if err != nil {
return reconcile.Result{}, errors.Wrapf(
err,
"failed to init patch helper for %s %s/%s",
vsphereMachine.GroupVersionKind(),
vsphereMachine.Namespace,
vsphereMachine.Name)
}
// Create the machine context for this request.
machineContext := &context.MachineContext{
ClusterContext: &context.ClusterContext{
ControllerContext: r.ControllerContext,
Cluster: cluster,
VSphereCluster: vsphereCluster,
},
Machine: machine,
VSphereMachine: vsphereMachine,
Session: authSession,
Logger: r.Logger.WithName(req.Namespace).WithName(req.Name),
PatchHelper: patchHelper,
}
// Print the task-ref upon entry and upon exit.
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnEntry",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
defer func() {
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnExit",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
}()
// Always issue a patch when exiting this function so changes to the
// resource are patched back to the API server.
defer func() {
// Patch the VSphereMachine resource.
if err := machineContext.Patch(); err != nil {
if reterr == nil {
reterr = err
}
machineContext.Logger.Error(err, "patch failed", "machine", machineContext.String())
}
// localObj is a deep copy of the VSphereMachine resource that was
// fetched at the top of this Reconcile function.
localObj := machineContext.VSphereMachine.DeepCopy()
// Fetch the up-to-date VSphereMachine resource into remoteObj until the
// fetched resource has a a different ResourceVersion than the local
// object.
//
// FYI - resource versions are opaque, numeric strings and should not
// be compared with < or >, only for equality -
// https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions.
//
// Since CAPV is currently deployed with a single replica, and this
// controller has a max concurrency of one, the only agent updating the
// VSphereMachine resource should be this controller.
//
// So if the remote resource's ResourceVersion is different than the
// ResourceVersion of the resource fetched at the beginning of this
// reconcile request, then that means the remote resource should be
// newer than the local resource.
wait.PollImmediateInfinite(time.Second*1, func() (bool, error) {
// remoteObj refererences the same VSphereMachine resource as it exists
// on the API server post the patch operation above. In a perfect world,
// the Status for localObj and remoteObj should be the same.
remoteObj := &infrav1.VSphereMachine{}
if err := machineContext.Client.Get(machineContext, req.NamespacedName, remoteObj); err != nil {
if apierrors.IsNotFound(err) {
// It's possible that the remote resource cannot be found
// because it has been removed. Do not error, just exit.
return true, nil
}
// There was an issue getting the remote resource. Sleep for a
// second and try again.
machineContext.Logger.Error(err, "failed to get VSphereMachine while exiting reconcile")
return false, nil
}
// If the remote resource version is not the same as the local
// resource version, then it means we were able to get a resource
// newer than the one we already had.
if localObj.ResourceVersion != remoteObj.ResourceVersion {
machineContext.Logger.Info(
"resource is patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// If the resources are the same resource version, then a previous
// patch may not have resulted in any changes. Check to see if the
// remote status is the same as the local status.
if cmp.Equal(localObj.Status, remoteObj.Status, cmpopts.EquateEmpty()) {
machineContext.Logger.Info(
"resource patch was not required",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// The remote resource version is the same as the local resource
// version, which means the local cache is not yet up-to-date.
machineContext.Logger.Info(
"resource is not patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return false, nil
})
}()
// Handle deleted machines
if !vsphereMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineContext)
}
// Handle non-deleted machines
return r.reconcileNormal(machineContext)
}
func (r machineReconciler) reconcileDelete(ctx *context.MachineContext) (reconcile.Result, error) {
ctx.Logger.Info("Handling deleted VSphereMachine")
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
vm, err := vmService.DestroyVM(ctx)
if err != nil |
// Requeue the operation until the VM is "notfound".
if vm.State != infrav1.VirtualMachineStateNotFound {
ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateNotFound, "actual-vm-state", vm.State)
return reconcile.Result{}, nil
}
// The VM is deleted so remove the finalizer.
ctx.VSphereMachine.Finalizers = clusterutilv1.Filter(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
return reconcile.Result{}, nil
}
func (r machineReconciler) reconcileNormal(ctx *context.MachineContext) (reconcile.Result, error) {
// If the VSphereMachine is in an error state, return early.
if ctx.VSphereMachine.Status.ErrorReason != nil || ctx.VSphereMachine.Status.ErrorMessage != nil {
ctx.Logger.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the VSphereMachine doesn't have our finalizer, add it.
if !clusterutilv1.Contains(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer) {
ctx.VSphereMachine.Finalizers = append(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
}
if !ctx.Cluster.Status.InfrastructureReady {
ctx.Logger.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
// Make sure bootstrap data is available and populated.
// Make sure bootstrap data is available and populated.
if ctx.Machine.Spec.Bootstrap.DataSecretName == nil {
ctx.Logger.Info("Waiting for bootstrap data to be available")
return reconcile.Result{}, nil
}
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
// Get or create the VM.
vm, err := vmService.ReconcileVM(ctx)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile VM")
}
if vm.State != infrav1.VirtualMachineStateReady {
ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateReady, "actual-vm-state", vm.State)
return reconcile.Result{}, nil
}
if ok, err := r.reconcileNetwork(ctx, vm, vmService); !ok {
if err != nil {
return reconcile.Result{}, err
}
ctx.Logger.Info("waiting on vm networking")
return reconcile.Result{}, nil
}
if err := r.reconcileProviderID(ctx, vm, vmService); err != nil {
return reconcile.Result{}, err
}
// Once the provider ID is set then the VSphereMachine is InfrastructureReady
ctx.VSphereMachine.Status.Ready = true
ctx.Logger.Info("VSphereMachine is infrastructure-ready")
return reconcile.Result{}, nil
}
func (r machineReconciler) reconcileNetwork(ctx *context.MachineContext, vm infrav1.VirtualMachine, vmService services.VirtualMachineService) (bool, error) {
expNetCount, actNetCount := len(ctx.VSphereMachine.Spec.Network.Devices), len(vm.Network)
if expNetCount != actNetCount {
return false, errors.Errorf("invalid network count for %q: exp=%d act=%d", ctx, expNetCount, actNetCount)
}
ctx.VSphereMachine.Status.Network = vm.Network
// If the VM is powered on then issue requeues until all of the VM's
// networks have IP addresses.
var ipAddrs []corev1.NodeAddress
for _, netStatus := range ctx.VSphereMachine.Status.Network {
for _, ip := range netStatus.IPAddrs {
ipAddrs = append(ipAddrs, corev1.NodeAddress{
Type: corev1.NodeInternalIP,
Address: ip,
})
}
}
if len(ipAddrs) == 0 {
ctx.Logger.Info("waiting on IP addresses")
return false, nil
}
// Use the collected IP addresses to assign the Machine's addresses.
ctx.VSphereMachine.Status.Addresses = ipAddrs
return true, nil
}
func (r machineReconciler) reconcileProviderID(ctx *context.MachineContext, vm infrav1.VirtualMachine, vmService services.VirtualMachineService) error {
providerID := infrautilv1.ConvertUUIDToProviderID(vm.BiosUUID)
if providerID == "" {
return errors.Errorf("invalid BIOS UUID %s for %s", vm.BiosUUID, ctx)
}
if ctx.VSphereMachine.Spec.ProviderID == nil || *ctx.VSphereMachine.Spec.ProviderID != providerID {
ctx.VSphereMachine.Spec.ProviderID = &providerID
ctx.Logger.Info("updated provider ID", "provider-id", providerID)
}
return nil
}
| {
return reconcile.Result{}, errors.Wrapf(err, "failed to destroy VM")
} | conditional_block |
vspheremachine_controller.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/google/go-cmp/cmp" | corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
clusterutilv1 "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/record"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/services"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/session"
infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util"
)
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// AddMachineControllerToManager adds the machine controller to the provided
// manager.
func AddMachineControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager) error {
var (
controlledType = &infrav1.VSphereMachine{}
controlledTypeName = reflect.TypeOf(controlledType).Elem().Name()
controlledTypeGVK = infrav1.GroupVersion.WithKind(controlledTypeName)
controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName))
controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort)
)
// Build the controller context.
controllerContext := &context.ControllerContext{
ControllerManagerContext: ctx,
Name: controllerNameShort,
Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)),
Logger: ctx.Logger.WithName(controllerNameShort),
}
return ctrl.NewControllerManagedBy(mgr).
// Watch the controlled, infrastructure resource.
For(controlledType).
// Watch the CAPI resource that owns this infrastructure resource.
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: clusterutilv1.MachineToInfrastructureMapFunc(controlledTypeGVK),
},
).
// Watch a GenericEvent channel for the controlled resource.
//
// This is useful when there are events outside of Kubernetes that
// should cause a resource to be synchronized, such as a goroutine
// waiting on some asynchronous, external task to complete.
Watches(
&source.Channel{Source: ctx.GetGenericEventChannelFor(controlledTypeGVK)},
&handler.EnqueueRequestForObject{},
).
Complete(machineReconciler{ControllerContext: controllerContext})
}
type machineReconciler struct {
*context.ControllerContext
}
// Reconcile ensures the back-end state reflects the Kubernetes resource state intent.
func (r machineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
// Get the VSphereMachine resource for this request.
vsphereMachine := &infrav1.VSphereMachine{}
if err := r.Client.Get(r, req.NamespacedName, vsphereMachine); err != nil {
if apierrors.IsNotFound(err) {
r.Logger.Info("VSphereMachine not found, won't reconcile", "key", req.NamespacedName)
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the CAPI Machine.
machine, err := clusterutilv1.GetOwnerMachine(r, r.Client, vsphereMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
r.Logger.Info("Waiting for Machine Controller to set OwnerRef on VSphereMachine")
return reconcile.Result{}, nil
}
// Fetch the CAPI Cluster.
cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, machine.ObjectMeta)
if err != nil {
r.Logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
// Fetch the VSphereCluster
vsphereCluster := &infrav1.VSphereCluster{}
vsphereClusterName := client.ObjectKey{
Namespace: vsphereMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(r, vsphereClusterName, vsphereCluster); err != nil {
r.Logger.Info("Waiting for VSphereCluster")
return reconcile.Result{}, nil
}
// Get or create an authenticated session to the vSphere endpoint.
authSession, err := session.GetOrCreate(r.Context,
vsphereCluster.Spec.Server, vsphereMachine.Spec.Datacenter,
r.ControllerManagerContext.Username, r.ControllerManagerContext.Password)
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to create vSphere session")
}
// Create the patch helper.
patchHelper, err := patch.NewHelper(vsphereMachine, r.Client)
if err != nil {
return reconcile.Result{}, errors.Wrapf(
err,
"failed to init patch helper for %s %s/%s",
vsphereMachine.GroupVersionKind(),
vsphereMachine.Namespace,
vsphereMachine.Name)
}
// Create the machine context for this request.
machineContext := &context.MachineContext{
ClusterContext: &context.ClusterContext{
ControllerContext: r.ControllerContext,
Cluster: cluster,
VSphereCluster: vsphereCluster,
},
Machine: machine,
VSphereMachine: vsphereMachine,
Session: authSession,
Logger: r.Logger.WithName(req.Namespace).WithName(req.Name),
PatchHelper: patchHelper,
}
// Print the task-ref upon entry and upon exit.
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnEntry",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
defer func() {
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnExit",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
}()
// Always issue a patch when exiting this function so changes to the
// resource are patched back to the API server.
defer func() {
// Patch the VSphereMachine resource.
if err := machineContext.Patch(); err != nil {
if reterr == nil {
reterr = err
}
machineContext.Logger.Error(err, "patch failed", "machine", machineContext.String())
}
// localObj is a deep copy of the VSphereMachine resource that was
// fetched at the top of this Reconcile function.
localObj := machineContext.VSphereMachine.DeepCopy()
// Fetch the up-to-date VSphereMachine resource into remoteObj until the
// fetched resource has a a different ResourceVersion than the local
// object.
//
// FYI - resource versions are opaque, numeric strings and should not
// be compared with < or >, only for equality -
// https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions.
//
// Since CAPV is currently deployed with a single replica, and this
// controller has a max concurrency of one, the only agent updating the
// VSphereMachine resource should be this controller.
//
// So if the remote resource's ResourceVersion is different than the
// ResourceVersion of the resource fetched at the beginning of this
// reconcile request, then that means the remote resource should be
// newer than the local resource.
wait.PollImmediateInfinite(time.Second*1, func() (bool, error) {
// remoteObj refererences the same VSphereMachine resource as it exists
// on the API server post the patch operation above. In a perfect world,
// the Status for localObj and remoteObj should be the same.
remoteObj := &infrav1.VSphereMachine{}
if err := machineContext.Client.Get(machineContext, req.NamespacedName, remoteObj); err != nil {
if apierrors.IsNotFound(err) {
// It's possible that the remote resource cannot be found
// because it has been removed. Do not error, just exit.
return true, nil
}
// There was an issue getting the remote resource. Sleep for a
// second and try again.
machineContext.Logger.Error(err, "failed to get VSphereMachine while exiting reconcile")
return false, nil
}
// If the remote resource version is not the same as the local
// resource version, then it means we were able to get a resource
// newer than the one we already had.
if localObj.ResourceVersion != remoteObj.ResourceVersion {
machineContext.Logger.Info(
"resource is patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// If the resources are the same resource version, then a previous
// patch may not have resulted in any changes. Check to see if the
// remote status is the same as the local status.
if cmp.Equal(localObj.Status, remoteObj.Status, cmpopts.EquateEmpty()) {
machineContext.Logger.Info(
"resource patch was not required",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// The remote resource version is the same as the local resource
// version, which means the local cache is not yet up-to-date.
machineContext.Logger.Info(
"resource is not patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return false, nil
})
}()
// Handle deleted machines
if !vsphereMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineContext)
}
// Handle non-deleted machines
return r.reconcileNormal(machineContext)
}
func (r machineReconciler) reconcileDelete(ctx *context.MachineContext) (reconcile.Result, error) {
ctx.Logger.Info("Handling deleted VSphereMachine")
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
vm, err := vmService.DestroyVM(ctx)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to destroy VM")
}
// Requeue the operation until the VM is "notfound".
if vm.State != infrav1.VirtualMachineStateNotFound {
ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateNotFound, "actual-vm-state", vm.State)
return reconcile.Result{}, nil
}
// The VM is deleted so remove the finalizer.
ctx.VSphereMachine.Finalizers = clusterutilv1.Filter(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
return reconcile.Result{}, nil
}
func (r machineReconciler) reconcileNormal(ctx *context.MachineContext) (reconcile.Result, error) {
// If the VSphereMachine is in an error state, return early.
if ctx.VSphereMachine.Status.ErrorReason != nil || ctx.VSphereMachine.Status.ErrorMessage != nil {
ctx.Logger.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the VSphereMachine doesn't have our finalizer, add it.
if !clusterutilv1.Contains(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer) {
ctx.VSphereMachine.Finalizers = append(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
}
if !ctx.Cluster.Status.InfrastructureReady {
ctx.Logger.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
// Make sure bootstrap data is available and populated.
// Make sure bootstrap data is available and populated.
if ctx.Machine.Spec.Bootstrap.DataSecretName == nil {
ctx.Logger.Info("Waiting for bootstrap data to be available")
return reconcile.Result{}, nil
}
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
// Get or create the VM.
vm, err := vmService.ReconcileVM(ctx)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile VM")
}
if vm.State != infrav1.VirtualMachineStateReady {
ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateReady, "actual-vm-state", vm.State)
return reconcile.Result{}, nil
}
if ok, err := r.reconcileNetwork(ctx, vm, vmService); !ok {
if err != nil {
return reconcile.Result{}, err
}
ctx.Logger.Info("waiting on vm networking")
return reconcile.Result{}, nil
}
if err := r.reconcileProviderID(ctx, vm, vmService); err != nil {
return reconcile.Result{}, err
}
// Once the provider ID is set then the VSphereMachine is InfrastructureReady
ctx.VSphereMachine.Status.Ready = true
ctx.Logger.Info("VSphereMachine is infrastructure-ready")
return reconcile.Result{}, nil
}
func (r machineReconciler) reconcileNetwork(ctx *context.MachineContext, vm infrav1.VirtualMachine, vmService services.VirtualMachineService) (bool, error) {
expNetCount, actNetCount := len(ctx.VSphereMachine.Spec.Network.Devices), len(vm.Network)
if expNetCount != actNetCount {
return false, errors.Errorf("invalid network count for %q: exp=%d act=%d", ctx, expNetCount, actNetCount)
}
ctx.VSphereMachine.Status.Network = vm.Network
// If the VM is powered on then issue requeues until all of the VM's
// networks have IP addresses.
var ipAddrs []corev1.NodeAddress
for _, netStatus := range ctx.VSphereMachine.Status.Network {
for _, ip := range netStatus.IPAddrs {
ipAddrs = append(ipAddrs, corev1.NodeAddress{
Type: corev1.NodeInternalIP,
Address: ip,
})
}
}
if len(ipAddrs) == 0 {
ctx.Logger.Info("waiting on IP addresses")
return false, nil
}
// Use the collected IP addresses to assign the Machine's addresses.
ctx.VSphereMachine.Status.Addresses = ipAddrs
return true, nil
}
func (r machineReconciler) reconcileProviderID(ctx *context.MachineContext, vm infrav1.VirtualMachine, vmService services.VirtualMachineService) error {
providerID := infrautilv1.ConvertUUIDToProviderID(vm.BiosUUID)
if providerID == "" {
return errors.Errorf("invalid BIOS UUID %s for %s", vm.BiosUUID, ctx)
}
if ctx.VSphereMachine.Spec.ProviderID == nil || *ctx.VSphereMachine.Spec.ProviderID != providerID {
ctx.VSphereMachine.Spec.ProviderID = &providerID
ctx.Logger.Info("updated provider ID", "provider-id", providerID)
}
return nil
} | "github.com/google/go-cmp/cmp/cmpopts"
"github.com/pkg/errors" | random_line_split |
vspheremachine_controller.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
clusterutilv1 "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/record"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/services"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/session"
infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util"
)
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// AddMachineControllerToManager adds the machine controller to the provided
// manager.
func AddMachineControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager) error {
var (
controlledType = &infrav1.VSphereMachine{}
controlledTypeName = reflect.TypeOf(controlledType).Elem().Name()
controlledTypeGVK = infrav1.GroupVersion.WithKind(controlledTypeName)
controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName))
controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort)
)
// Build the controller context.
controllerContext := &context.ControllerContext{
ControllerManagerContext: ctx,
Name: controllerNameShort,
Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)),
Logger: ctx.Logger.WithName(controllerNameShort),
}
return ctrl.NewControllerManagedBy(mgr).
// Watch the controlled, infrastructure resource.
For(controlledType).
// Watch the CAPI resource that owns this infrastructure resource.
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: clusterutilv1.MachineToInfrastructureMapFunc(controlledTypeGVK),
},
).
// Watch a GenericEvent channel for the controlled resource.
//
// This is useful when there are events outside of Kubernetes that
// should cause a resource to be synchronized, such as a goroutine
// waiting on some asynchronous, external task to complete.
Watches(
&source.Channel{Source: ctx.GetGenericEventChannelFor(controlledTypeGVK)},
&handler.EnqueueRequestForObject{},
).
Complete(machineReconciler{ControllerContext: controllerContext})
}
type machineReconciler struct {
*context.ControllerContext
}
// Reconcile ensures the back-end state reflects the Kubernetes resource state intent.
func (r machineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
// Get the VSphereMachine resource for this request.
vsphereMachine := &infrav1.VSphereMachine{}
if err := r.Client.Get(r, req.NamespacedName, vsphereMachine); err != nil {
if apierrors.IsNotFound(err) {
r.Logger.Info("VSphereMachine not found, won't reconcile", "key", req.NamespacedName)
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the CAPI Machine.
machine, err := clusterutilv1.GetOwnerMachine(r, r.Client, vsphereMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
r.Logger.Info("Waiting for Machine Controller to set OwnerRef on VSphereMachine")
return reconcile.Result{}, nil
}
// Fetch the CAPI Cluster.
cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, machine.ObjectMeta)
if err != nil {
r.Logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
// Fetch the VSphereCluster
vsphereCluster := &infrav1.VSphereCluster{}
vsphereClusterName := client.ObjectKey{
Namespace: vsphereMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(r, vsphereClusterName, vsphereCluster); err != nil {
r.Logger.Info("Waiting for VSphereCluster")
return reconcile.Result{}, nil
}
// Get or create an authenticated session to the vSphere endpoint.
authSession, err := session.GetOrCreate(r.Context,
vsphereCluster.Spec.Server, vsphereMachine.Spec.Datacenter,
r.ControllerManagerContext.Username, r.ControllerManagerContext.Password)
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to create vSphere session")
}
// Create the patch helper.
patchHelper, err := patch.NewHelper(vsphereMachine, r.Client)
if err != nil {
return reconcile.Result{}, errors.Wrapf(
err,
"failed to init patch helper for %s %s/%s",
vsphereMachine.GroupVersionKind(),
vsphereMachine.Namespace,
vsphereMachine.Name)
}
// Create the machine context for this request.
machineContext := &context.MachineContext{
ClusterContext: &context.ClusterContext{
ControllerContext: r.ControllerContext,
Cluster: cluster,
VSphereCluster: vsphereCluster,
},
Machine: machine,
VSphereMachine: vsphereMachine,
Session: authSession,
Logger: r.Logger.WithName(req.Namespace).WithName(req.Name),
PatchHelper: patchHelper,
}
// Print the task-ref upon entry and upon exit.
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnEntry",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
defer func() {
machineContext.Logger.V(4).Info(
"VSphereMachine.Status.TaskRef OnExit",
"task-ref", machineContext.VSphereMachine.Status.TaskRef)
}()
// Always issue a patch when exiting this function so changes to the
// resource are patched back to the API server.
defer func() {
// Patch the VSphereMachine resource.
if err := machineContext.Patch(); err != nil {
if reterr == nil {
reterr = err
}
machineContext.Logger.Error(err, "patch failed", "machine", machineContext.String())
}
// localObj is a deep copy of the VSphereMachine resource that was
// fetched at the top of this Reconcile function.
localObj := machineContext.VSphereMachine.DeepCopy()
// Fetch the up-to-date VSphereMachine resource into remoteObj until the
// fetched resource has a a different ResourceVersion than the local
// object.
//
// FYI - resource versions are opaque, numeric strings and should not
// be compared with < or >, only for equality -
// https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions.
//
// Since CAPV is currently deployed with a single replica, and this
// controller has a max concurrency of one, the only agent updating the
// VSphereMachine resource should be this controller.
//
// So if the remote resource's ResourceVersion is different than the
// ResourceVersion of the resource fetched at the beginning of this
// reconcile request, then that means the remote resource should be
// newer than the local resource.
wait.PollImmediateInfinite(time.Second*1, func() (bool, error) {
// remoteObj refererences the same VSphereMachine resource as it exists
// on the API server post the patch operation above. In a perfect world,
// the Status for localObj and remoteObj should be the same.
remoteObj := &infrav1.VSphereMachine{}
if err := machineContext.Client.Get(machineContext, req.NamespacedName, remoteObj); err != nil {
if apierrors.IsNotFound(err) {
// It's possible that the remote resource cannot be found
// because it has been removed. Do not error, just exit.
return true, nil
}
// There was an issue getting the remote resource. Sleep for a
// second and try again.
machineContext.Logger.Error(err, "failed to get VSphereMachine while exiting reconcile")
return false, nil
}
// If the remote resource version is not the same as the local
// resource version, then it means we were able to get a resource
// newer than the one we already had.
if localObj.ResourceVersion != remoteObj.ResourceVersion {
machineContext.Logger.Info(
"resource is patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// If the resources are the same resource version, then a previous
// patch may not have resulted in any changes. Check to see if the
// remote status is the same as the local status.
if cmp.Equal(localObj.Status, remoteObj.Status, cmpopts.EquateEmpty()) {
machineContext.Logger.Info(
"resource patch was not required",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return true, nil
}
// The remote resource version is the same as the local resource
// version, which means the local cache is not yet up-to-date.
machineContext.Logger.Info(
"resource is not patched",
"local-resource-version", localObj.ResourceVersion,
"remote-resource-version", remoteObj.ResourceVersion)
return false, nil
})
}()
// Handle deleted machines
if !vsphereMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineContext)
}
// Handle non-deleted machines
return r.reconcileNormal(machineContext)
}
func (r machineReconciler) reconcileDelete(ctx *context.MachineContext) (reconcile.Result, error) {
ctx.Logger.Info("Handling deleted VSphereMachine")
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
vm, err := vmService.DestroyVM(ctx)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to destroy VM")
}
// Requeue the operation until the VM is "notfound".
if vm.State != infrav1.VirtualMachineStateNotFound {
ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateNotFound, "actual-vm-state", vm.State)
return reconcile.Result{}, nil
}
// The VM is deleted so remove the finalizer.
ctx.VSphereMachine.Finalizers = clusterutilv1.Filter(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
return reconcile.Result{}, nil
}
func (r machineReconciler) | (ctx *context.MachineContext) (reconcile.Result, error) {
// If the VSphereMachine is in an error state, return early.
if ctx.VSphereMachine.Status.ErrorReason != nil || ctx.VSphereMachine.Status.ErrorMessage != nil {
ctx.Logger.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the VSphereMachine doesn't have our finalizer, add it.
if !clusterutilv1.Contains(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer) {
ctx.VSphereMachine.Finalizers = append(ctx.VSphereMachine.Finalizers, infrav1.MachineFinalizer)
}
if !ctx.Cluster.Status.InfrastructureReady {
ctx.Logger.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
// Make sure bootstrap data is available and populated.
// Make sure bootstrap data is available and populated.
if ctx.Machine.Spec.Bootstrap.DataSecretName == nil {
ctx.Logger.Info("Waiting for bootstrap data to be available")
return reconcile.Result{}, nil
}
// TODO(akutz) Implement selection of VM service based on vSphere version
var vmService services.VirtualMachineService = &govmomi.VMService{}
// Get or create the VM.
vm, err := vmService.ReconcileVM(ctx)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile VM")
}
if vm.State != infrav1.VirtualMachineStateReady {
ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateReady, "actual-vm-state", vm.State)
return reconcile.Result{}, nil
}
if ok, err := r.reconcileNetwork(ctx, vm, vmService); !ok {
if err != nil {
return reconcile.Result{}, err
}
ctx.Logger.Info("waiting on vm networking")
return reconcile.Result{}, nil
}
if err := r.reconcileProviderID(ctx, vm, vmService); err != nil {
return reconcile.Result{}, err
}
// Once the provider ID is set then the VSphereMachine is InfrastructureReady
ctx.VSphereMachine.Status.Ready = true
ctx.Logger.Info("VSphereMachine is infrastructure-ready")
return reconcile.Result{}, nil
}
func (r machineReconciler) reconcileNetwork(ctx *context.MachineContext, vm infrav1.VirtualMachine, vmService services.VirtualMachineService) (bool, error) {
expNetCount, actNetCount := len(ctx.VSphereMachine.Spec.Network.Devices), len(vm.Network)
if expNetCount != actNetCount {
return false, errors.Errorf("invalid network count for %q: exp=%d act=%d", ctx, expNetCount, actNetCount)
}
ctx.VSphereMachine.Status.Network = vm.Network
// If the VM is powered on then issue requeues until all of the VM's
// networks have IP addresses.
var ipAddrs []corev1.NodeAddress
for _, netStatus := range ctx.VSphereMachine.Status.Network {
for _, ip := range netStatus.IPAddrs {
ipAddrs = append(ipAddrs, corev1.NodeAddress{
Type: corev1.NodeInternalIP,
Address: ip,
})
}
}
if len(ipAddrs) == 0 {
ctx.Logger.Info("waiting on IP addresses")
return false, nil
}
// Use the collected IP addresses to assign the Machine's addresses.
ctx.VSphereMachine.Status.Addresses = ipAddrs
return true, nil
}
func (r machineReconciler) reconcileProviderID(ctx *context.MachineContext, vm infrav1.VirtualMachine, vmService services.VirtualMachineService) error {
providerID := infrautilv1.ConvertUUIDToProviderID(vm.BiosUUID)
if providerID == "" {
return errors.Errorf("invalid BIOS UUID %s for %s", vm.BiosUUID, ctx)
}
if ctx.VSphereMachine.Spec.ProviderID == nil || *ctx.VSphereMachine.Spec.ProviderID != providerID {
ctx.VSphereMachine.Spec.ProviderID = &providerID
ctx.Logger.Info("updated provider ID", "provider-id", providerID)
}
return nil
}
| reconcileNormal | identifier_name |
scrape.py | from bs4 import BeautifulSoup
import csv
import copy
from collections import defaultdict
from datetime import date, timedelta
import json
import os
import re
import requests
import sys
import time
district_map = {
"TVM": "Thiruvananthapuram",
"KLM": "Kollam",
"PTA": "Pathanamthitta",
"IDK": "Idukki",
"KTM": "Kottayam",
"ALP": "Alappuzha",
"EKM": "Ernakulam",
"TSR": "Thrissur",
"PKD": "Palakkad",
"MPM": "Malappuram",
"KKD": "Kozhikode",
"WYD": "Wayanad",
"KNR": "Kannur",
"KGD": "Kasaragod",
"TOTAL": "Total",
}
district_code_map = {
"1601": "KGD",
"1602": "KNR",
"1603": "WYD",
"1604": "KKD",
"1605": "MPM",
"1606": "PKD",
"1607": "TSR",
"1608": "EKM",
"1609": "IDK",
"1610": "KTM",
"1611": "ALP",
"1612": "PTA",
"1613": "KLM",
"1614": "TVM",
}
QUARANTINE_HEADER = [
"district",
"observation",
"total_hospitalized",
"isolation",
"hospitalized_today",
]
CASE_HEADER = ["confirmed", "recovered", "active", "deaths"]
ACTIVE_HEADER = ["date"]
ACTIVE_HEADER.extend(CASE_HEADER)
ACTIVE_TODAY_HEADER = ["district"]
ACTIVE_TODAY_HEADER.extend(CASE_HEADER)
CSV_HEADER = copy.deepcopy(QUARANTINE_HEADER)
CSV_HEADER.extend(CASE_HEADER)
TESTING_HEADER = [
"date",
"total_sent",
"sent_on_date",
"processed_in_one_day",
"total_positive",
"new_positive",
]
TEST_DATA_JSON = "./testData.json"
DATA_INDEX_JSON = "./dataIndexJSON.json"
DATA_INDEX_JS = "./dataIndex.js"
INIT_URL = "https://dashboard.kerala.gov.in"
QAR_REQ_URL = "https://dashboard.kerala.gov.in/quarantined-datewise.php"
ACTIVE_REQ_URL = (
"https://dashboard.kerala.gov.in/dailyreporting-view-public-districtwise.php"
)
TESTING_REQ_URL = "https://dashboard.kerala.gov.in/testing-view-public.php"
DATE_REQ_STRING = "rep_date3"
HEADERS = {
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
"Origin": "https://dashboard.kerala.gov.in",
"Referer": "https://dashboard.kerala.gov.in/quarantined-datewise.php",
}
def get_date(datearg):
dsplit = [int(i) for i in re.split(r"[-/]\s*", datearg)]
return date(day=dsplit[0], month=dsplit[1], year=dsplit[2])
def getint(string):
try:
return int(string)
except (TypeError, ValueError):
return int("".join(filter(str.isdigit, string)))
def csv_writer(filepath, data):
with open(filepath, "w") as f:
w = csv.DictWriter(f, fieldnames=set(CSV_HEADER))
w.writeheader()
w.writerows(data)
def init_sess():
try:
# warm up the cookies
sess = requests.Session()
sess.get(INIT_URL, headers=HEADERS)
except requests.exceptions.RequestException as e:
print(e)
else:
return sess
def run_req(sess, url, data, headers, method="GET"):
kwargs = {}
if method == "GET":
req_func = sess.get
elif method == "POST":
req_func = sess.post
kwargs["data"] = data
kwargs["headers"] = headers
try:
response = req_func(url, **kwargs)
except requests.exceptions.RequestException as e:
print(e)
else:
return response
def get_quarantine_details(date):
sess = init_sess()
date_str = date.strftime("%d/%m/%Y")
payload = {DATE_REQ_STRING: date_str, "lw": "View"}
csv_data = []
response = run_req(sess, QAR_REQ_URL, payload, HEADERS, "POST")
if not response:
return
soup = BeautifulSoup(response.content, "html.parser")
table = soup.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
district = cols[0]
data_dict = dict(zip(QUARANTINE_HEADER, cols))
data_dict["dist_code"] = district
data_dict["district"] = district_map[district]
csv_data.append(data_dict)
return csv_data
def extract_datewise_active(soup, district):
datalist = []
# assumes last table is the datewise table
table = soup.find_all("table")[-1]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_HEADER, cols))
data_dict["date"] = get_date(data_dict["date"])
data_dict["district"] = district
datalist.append(data_dict)
return datalist
def get_active_details_today():
active_data = defaultdict(list)
sess = init_sess()
response = run_req(sess, ACTIVE_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
last_date = soup.find_all(text=re.compile("updated", re.I))[0].split()[1]
last_date = get_date(last_date)
# assumes second last table is the summary table
table = soup.find_all("table")[-2]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_TODAY_HEADER, cols))
data_dict["date"] = last_date
district = data_dict["district"]
active_data[district].append(data_dict)
# are there 14 district data + total data
assert len(active_data) == 15
# get kerala datewise details table as well
kerala_data = {i["date"]: i for i in extract_datewise_active(soup, "KERALA")}
return (kerala_data, active_data, last_date)
def get_bulk_active_details():
active_data = {}
time.sleep(5)
for code, district in district_code_map.items():
# new session for each district data
sess = init_sess()
print("Processing: {}".format(district))
payload = {"district": code}
response = run_req(sess, ACTIVE_REQ_URL, payload, HEADERS, "POST")
if not response:
print("response failed for district {}".format(district))
break
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
break
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
# assumes last table is the datewise table
active_data[district] = extract_datewise_active(soup, district)
time.sleep(5)
return active_data
def | ():
testing_data = {}
sess = init_sess()
response = run_req(sess, TESTING_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
print("Processing testing data")
soup = BeautifulSoup(response.content, "html.parser")
case_report = soup.find_all(text=re.compile("Daily Case Reports from", re.I))[0]
table = case_report.parent.parent.parent.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(TESTING_HEADER, cols))
test_date = get_date(data_dict["date"])
data_dict["date"] = test_date
testing_data[test_date] = data_dict
return testing_data
def active_detail_pivot(active_data, get_only_curr=False):
pivot_data = defaultdict(dict)
for district, values in active_data.items():
count = {"district": district, "confirmed": 0, "deaths": 0, "recovered": 0}
for value in values:
v_date = value["date"]
for key in ["confirmed", "deaths", "recovered"]:
count[key] += getint(value[key])
count["active"] = getint(value["active"])
pivot_data[v_date][district] = copy.deepcopy(count)
# set total if get_only_curr is False
if not get_only_curr:
for d, dist_dict in pivot_data.items():
total = {
"confirmed": 0,
"deaths": 0,
"recovered": 0,
"active": 0,
}
for district, value in dist_dict.items():
for key in total.keys():
total[key] += getint(value[key])
total["district"] = "TOTAL"
dist_dict["TOTAL"] = total
return pivot_data
def edit_data_index(date_list, totals_data, testing_data, kerala_data):
di_data = None
# write test_data into testdatajson
with open(TEST_DATA_JSON, "w") as test_json:
td = {}
for d, v in testing_data.items():
strdate = "{}-{}-{}".format(d.day, d.month, d.year)
td[strdate] = {key: val for key, val in v.items() if key != "date"}
json.dump(td, test_json)
print("Wrote testing data to: {}".format(TEST_DATA_JSON))
with open(DATA_INDEX_JSON, "r") as json_file:
di_data = json.load(json_file)
# redundant code for updating testing details on dataIndex for every run
print("updating testing in dataIndex")
for d, v in testing_data.items():
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
testing_entry = di_data["daily_bulletin"][datestr]
except KeyError:
testing_entry = {}
testing_day = v
testing_entry["sample_sent"] = getint(testing_day["total_sent"])
testing_entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
di_data["daily_bulletin"][datestr] = testing_entry
print("updating other details in dataIndex")
for d in date_list:
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
entry = di_data["daily_bulletin"][datestr]
except KeyError:
entry = {}
total_day = totals_data[d]
try:
testing_day = testing_data[d]
except KeyError:
print("data not available for {}".format(d))
available = False
prevday = d
while not available:
prevday -= timedelta(days=1)
print("trying for {}".format(prevday))
# if data isn't available for current day, show previous day data
try:
testing_day = testing_data[prevday]
except KeyError:
print("not available for {}".format(prevday))
else:
available = True
kd = kerala_data[d]
entry["total_active"] = getint(total_day["active"])
entry["total_positive"] = getint(total_day["confirmed"])
entry["deaths"] = getint(total_day["deaths"])
entry["positive_today"] = getint(kd["confirmed"])
if "sample_sent" not in entry:
entry["sample_sent"] = getint(testing_day["total_sent"])
entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
entry["total_passengers"] = 0
filename = "data_{}_{}_{}.csv".format(d.day, d.month, d.year)
entry["file"] = filename
di_data["daily_bulletin"][datestr] = entry
with open(DATA_INDEX_JSON, "w") as json_file:
json.dump(di_data, json_file)
print("Wrote index data to: {}".format(DATA_INDEX_JSON))
with open(DATA_INDEX_JS, "w") as js_file:
write_str = "var dataIndex = " + str(di_data)
js_file.write(write_str)
print("Wrote index data to: {}".format(DATA_INDEX_JS))
def get_data_for_date(dates=[], get_only_curr=False):
kd, active_today, updated_date = get_active_details_today()
if not get_only_curr:
active_data = get_bulk_active_details()
else:
assert dates[0] == updated_date, "Date mismatch. Site updated till {}".format(
updated_date
)
active_data = active_today
time.sleep(2)
testing_data = get_testing_details()
time.sleep(3)
active_data_pivot = active_detail_pivot(active_data, get_only_curr)
totals_data = {}
for d in dates:
qar_data = get_quarantine_details(d)
active = active_data_pivot[d]
if not active:
print("Active details empty for date: {}".format(d))
continue
csv_data = []
for dat in qar_data:
dist_code = dat.pop("dist_code")
dist_active = active[dist_code]
for key in CASE_HEADER:
dat[key] = dist_active[key]
# take active count from kerala data
if dist_code == "TOTAL":
dat["active"] = kd[d]["active"]
csv_data.append(dat)
# set total data
totals_data[d] = active["TOTAL"]
totals_data[d]["active"] = kd[d]["active"]
# write to csv
filename = "data_{}_{}_{}.csv".format(d.day, d.month, d.year)
filepath = os.path.join("./data/", filename)
print("writing csv for {}".format(d))
csv_writer(filepath, csv_data)
time.sleep(2)
edit_data_index(dates, totals_data, testing_data, kd)
if __name__ == "__main__":
if len(sys.argv) > 1:
dateargs = sys.argv[1:]
print("processing for these dates: {}".format(dateargs))
dates = [get_date(datearg) for datearg in dateargs]
get_only_curr = False
else:
dates = [date.today()]
get_only_curr = True
get_data_for_date(dates, get_only_curr)
| get_testing_details | identifier_name |
scrape.py | from bs4 import BeautifulSoup
import csv
import copy
from collections import defaultdict
from datetime import date, timedelta
import json
import os
import re
import requests
import sys
import time
district_map = {
"TVM": "Thiruvananthapuram",
"KLM": "Kollam",
"PTA": "Pathanamthitta",
"IDK": "Idukki",
"KTM": "Kottayam",
"ALP": "Alappuzha",
"EKM": "Ernakulam",
"TSR": "Thrissur",
"PKD": "Palakkad",
"MPM": "Malappuram",
"KKD": "Kozhikode",
"WYD": "Wayanad",
"KNR": "Kannur",
"KGD": "Kasaragod",
"TOTAL": "Total",
}
district_code_map = {
"1601": "KGD",
"1602": "KNR",
"1603": "WYD",
"1604": "KKD",
"1605": "MPM",
"1606": "PKD",
"1607": "TSR",
"1608": "EKM",
"1609": "IDK",
"1610": "KTM",
"1611": "ALP",
"1612": "PTA",
"1613": "KLM",
"1614": "TVM",
}
QUARANTINE_HEADER = [
"district",
"observation",
"total_hospitalized",
"isolation",
"hospitalized_today",
]
CASE_HEADER = ["confirmed", "recovered", "active", "deaths"]
ACTIVE_HEADER = ["date"]
ACTIVE_HEADER.extend(CASE_HEADER)
ACTIVE_TODAY_HEADER = ["district"]
ACTIVE_TODAY_HEADER.extend(CASE_HEADER)
CSV_HEADER = copy.deepcopy(QUARANTINE_HEADER)
CSV_HEADER.extend(CASE_HEADER)
TESTING_HEADER = [
"date",
"total_sent",
"sent_on_date",
"processed_in_one_day",
"total_positive",
"new_positive",
]
TEST_DATA_JSON = "./testData.json"
DATA_INDEX_JSON = "./dataIndexJSON.json"
DATA_INDEX_JS = "./dataIndex.js"
INIT_URL = "https://dashboard.kerala.gov.in"
QAR_REQ_URL = "https://dashboard.kerala.gov.in/quarantined-datewise.php"
ACTIVE_REQ_URL = (
"https://dashboard.kerala.gov.in/dailyreporting-view-public-districtwise.php"
)
TESTING_REQ_URL = "https://dashboard.kerala.gov.in/testing-view-public.php"
DATE_REQ_STRING = "rep_date3"
HEADERS = {
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
"Origin": "https://dashboard.kerala.gov.in",
"Referer": "https://dashboard.kerala.gov.in/quarantined-datewise.php",
}
def get_date(datearg):
dsplit = [int(i) for i in re.split(r"[-/]\s*", datearg)]
return date(day=dsplit[0], month=dsplit[1], year=dsplit[2])
def getint(string):
try:
return int(string)
except (TypeError, ValueError):
return int("".join(filter(str.isdigit, string)))
def csv_writer(filepath, data):
with open(filepath, "w") as f:
w = csv.DictWriter(f, fieldnames=set(CSV_HEADER))
w.writeheader()
w.writerows(data)
def init_sess():
try:
# warm up the cookies
sess = requests.Session()
sess.get(INIT_URL, headers=HEADERS)
except requests.exceptions.RequestException as e:
print(e)
else:
return sess
def run_req(sess, url, data, headers, method="GET"):
kwargs = {}
if method == "GET":
req_func = sess.get
elif method == "POST":
req_func = sess.post
kwargs["data"] = data
kwargs["headers"] = headers
try:
response = req_func(url, **kwargs)
except requests.exceptions.RequestException as e:
print(e)
else:
return response
def get_quarantine_details(date):
sess = init_sess()
date_str = date.strftime("%d/%m/%Y")
payload = {DATE_REQ_STRING: date_str, "lw": "View"}
csv_data = []
response = run_req(sess, QAR_REQ_URL, payload, HEADERS, "POST")
if not response:
return
soup = BeautifulSoup(response.content, "html.parser")
table = soup.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
district = cols[0]
data_dict = dict(zip(QUARANTINE_HEADER, cols))
data_dict["dist_code"] = district
data_dict["district"] = district_map[district]
csv_data.append(data_dict)
return csv_data
def extract_datewise_active(soup, district):
datalist = []
# assumes last table is the datewise table
table = soup.find_all("table")[-1]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_HEADER, cols))
data_dict["date"] = get_date(data_dict["date"])
data_dict["district"] = district
datalist.append(data_dict)
return datalist
def get_active_details_today():
active_data = defaultdict(list)
sess = init_sess()
response = run_req(sess, ACTIVE_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
last_date = soup.find_all(text=re.compile("updated", re.I))[0].split()[1]
last_date = get_date(last_date)
# assumes second last table is the summary table
table = soup.find_all("table")[-2]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_TODAY_HEADER, cols))
data_dict["date"] = last_date
district = data_dict["district"]
active_data[district].append(data_dict)
# are there 14 district data + total data
assert len(active_data) == 15
# get kerala datewise details table as well
kerala_data = {i["date"]: i for i in extract_datewise_active(soup, "KERALA")}
return (kerala_data, active_data, last_date)
def get_bulk_active_details():
active_data = {}
time.sleep(5)
for code, district in district_code_map.items():
# new session for each district data
sess = init_sess()
print("Processing: {}".format(district))
payload = {"district": code}
response = run_req(sess, ACTIVE_REQ_URL, payload, HEADERS, "POST")
if not response:
print("response failed for district {}".format(district))
break
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
break
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
# assumes last table is the datewise table
active_data[district] = extract_datewise_active(soup, district)
time.sleep(5)
return active_data
def get_testing_details():
testing_data = {}
sess = init_sess()
response = run_req(sess, TESTING_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
print("Processing testing data")
soup = BeautifulSoup(response.content, "html.parser")
case_report = soup.find_all(text=re.compile("Daily Case Reports from", re.I))[0]
table = case_report.parent.parent.parent.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(TESTING_HEADER, cols))
test_date = get_date(data_dict["date"])
data_dict["date"] = test_date
testing_data[test_date] = data_dict
return testing_data
def active_detail_pivot(active_data, get_only_curr=False):
pivot_data = defaultdict(dict)
for district, values in active_data.items():
count = {"district": district, "confirmed": 0, "deaths": 0, "recovered": 0}
for value in values:
v_date = value["date"]
for key in ["confirmed", "deaths", "recovered"]:
count[key] += getint(value[key])
count["active"] = getint(value["active"])
pivot_data[v_date][district] = copy.deepcopy(count)
# set total if get_only_curr is False
if not get_only_curr:
for d, dist_dict in pivot_data.items():
total = {
"confirmed": 0,
"deaths": 0,
"recovered": 0,
"active": 0,
}
for district, value in dist_dict.items():
for key in total.keys():
total[key] += getint(value[key])
total["district"] = "TOTAL"
dist_dict["TOTAL"] = total
return pivot_data
def edit_data_index(date_list, totals_data, testing_data, kerala_data):
di_data = None
# write test_data into testdatajson
with open(TEST_DATA_JSON, "w") as test_json:
td = {}
for d, v in testing_data.items():
strdate = "{}-{}-{}".format(d.day, d.month, d.year)
td[strdate] = {key: val for key, val in v.items() if key != "date"}
json.dump(td, test_json)
print("Wrote testing data to: {}".format(TEST_DATA_JSON))
with open(DATA_INDEX_JSON, "r") as json_file:
di_data = json.load(json_file)
# redundant code for updating testing details on dataIndex for every run
print("updating testing in dataIndex")
for d, v in testing_data.items():
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
testing_entry = di_data["daily_bulletin"][datestr]
except KeyError:
testing_entry = {}
testing_day = v
testing_entry["sample_sent"] = getint(testing_day["total_sent"])
testing_entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
di_data["daily_bulletin"][datestr] = testing_entry
print("updating other details in dataIndex")
for d in date_list:
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
entry = di_data["daily_bulletin"][datestr]
except KeyError:
entry = {}
total_day = totals_data[d]
try:
testing_day = testing_data[d]
except KeyError:
print("data not available for {}".format(d))
available = False
prevday = d
while not available:
prevday -= timedelta(days=1)
print("trying for {}".format(prevday))
# if data isn't available for current day, show previous day data
try:
testing_day = testing_data[prevday]
except KeyError:
print("not available for {}".format(prevday))
else:
available = True
kd = kerala_data[d]
entry["total_active"] = getint(total_day["active"])
entry["total_positive"] = getint(total_day["confirmed"])
entry["deaths"] = getint(total_day["deaths"])
entry["positive_today"] = getint(kd["confirmed"])
if "sample_sent" not in entry:
entry["sample_sent"] = getint(testing_day["total_sent"])
entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
entry["total_passengers"] = 0
filename = "data_{}_{}_{}.csv".format(d.day, d.month, d.year)
entry["file"] = filename
di_data["daily_bulletin"][datestr] = entry
with open(DATA_INDEX_JSON, "w") as json_file:
json.dump(di_data, json_file)
print("Wrote index data to: {}".format(DATA_INDEX_JSON))
with open(DATA_INDEX_JS, "w") as js_file:
write_str = "var dataIndex = " + str(di_data)
js_file.write(write_str)
print("Wrote index data to: {}".format(DATA_INDEX_JS))
def get_data_for_date(dates=[], get_only_curr=False):
kd, active_today, updated_date = get_active_details_today()
if not get_only_curr:
active_data = get_bulk_active_details()
else:
assert dates[0] == updated_date, "Date mismatch. Site updated till {}".format(
updated_date
)
active_data = active_today
time.sleep(2)
testing_data = get_testing_details()
time.sleep(3)
active_data_pivot = active_detail_pivot(active_data, get_only_curr)
totals_data = {}
for d in dates:
qar_data = get_quarantine_details(d) | if not active:
print("Active details empty for date: {}".format(d))
continue
csv_data = []
for dat in qar_data:
dist_code = dat.pop("dist_code")
dist_active = active[dist_code]
for key in CASE_HEADER:
dat[key] = dist_active[key]
# take active count from kerala data
if dist_code == "TOTAL":
dat["active"] = kd[d]["active"]
csv_data.append(dat)
# set total data
totals_data[d] = active["TOTAL"]
totals_data[d]["active"] = kd[d]["active"]
# write to csv
filename = "data_{}_{}_{}.csv".format(d.day, d.month, d.year)
filepath = os.path.join("./data/", filename)
print("writing csv for {}".format(d))
csv_writer(filepath, csv_data)
time.sleep(2)
edit_data_index(dates, totals_data, testing_data, kd)
if __name__ == "__main__":
if len(sys.argv) > 1:
dateargs = sys.argv[1:]
print("processing for these dates: {}".format(dateargs))
dates = [get_date(datearg) for datearg in dateargs]
get_only_curr = False
else:
dates = [date.today()]
get_only_curr = True
get_data_for_date(dates, get_only_curr) | active = active_data_pivot[d] | random_line_split |
scrape.py | from bs4 import BeautifulSoup
import csv
import copy
from collections import defaultdict
from datetime import date, timedelta
import json
import os
import re
import requests
import sys
import time
district_map = {
"TVM": "Thiruvananthapuram",
"KLM": "Kollam",
"PTA": "Pathanamthitta",
"IDK": "Idukki",
"KTM": "Kottayam",
"ALP": "Alappuzha",
"EKM": "Ernakulam",
"TSR": "Thrissur",
"PKD": "Palakkad",
"MPM": "Malappuram",
"KKD": "Kozhikode",
"WYD": "Wayanad",
"KNR": "Kannur",
"KGD": "Kasaragod",
"TOTAL": "Total",
}
district_code_map = {
"1601": "KGD",
"1602": "KNR",
"1603": "WYD",
"1604": "KKD",
"1605": "MPM",
"1606": "PKD",
"1607": "TSR",
"1608": "EKM",
"1609": "IDK",
"1610": "KTM",
"1611": "ALP",
"1612": "PTA",
"1613": "KLM",
"1614": "TVM",
}
QUARANTINE_HEADER = [
"district",
"observation",
"total_hospitalized",
"isolation",
"hospitalized_today",
]
CASE_HEADER = ["confirmed", "recovered", "active", "deaths"]
ACTIVE_HEADER = ["date"]
ACTIVE_HEADER.extend(CASE_HEADER)
ACTIVE_TODAY_HEADER = ["district"]
ACTIVE_TODAY_HEADER.extend(CASE_HEADER)
CSV_HEADER = copy.deepcopy(QUARANTINE_HEADER)
CSV_HEADER.extend(CASE_HEADER)
TESTING_HEADER = [
"date",
"total_sent",
"sent_on_date",
"processed_in_one_day",
"total_positive",
"new_positive",
]
TEST_DATA_JSON = "./testData.json"
DATA_INDEX_JSON = "./dataIndexJSON.json"
DATA_INDEX_JS = "./dataIndex.js"
INIT_URL = "https://dashboard.kerala.gov.in"
QAR_REQ_URL = "https://dashboard.kerala.gov.in/quarantined-datewise.php"
ACTIVE_REQ_URL = (
"https://dashboard.kerala.gov.in/dailyreporting-view-public-districtwise.php"
)
TESTING_REQ_URL = "https://dashboard.kerala.gov.in/testing-view-public.php"
DATE_REQ_STRING = "rep_date3"
HEADERS = {
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
"Origin": "https://dashboard.kerala.gov.in",
"Referer": "https://dashboard.kerala.gov.in/quarantined-datewise.php",
}
def get_date(datearg):
dsplit = [int(i) for i in re.split(r"[-/]\s*", datearg)]
return date(day=dsplit[0], month=dsplit[1], year=dsplit[2])
def getint(string):
try:
return int(string)
except (TypeError, ValueError):
return int("".join(filter(str.isdigit, string)))
def csv_writer(filepath, data):
with open(filepath, "w") as f:
w = csv.DictWriter(f, fieldnames=set(CSV_HEADER))
w.writeheader()
w.writerows(data)
def init_sess():
try:
# warm up the cookies
sess = requests.Session()
sess.get(INIT_URL, headers=HEADERS)
except requests.exceptions.RequestException as e:
print(e)
else:
return sess
def run_req(sess, url, data, headers, method="GET"):
kwargs = {}
if method == "GET":
req_func = sess.get
elif method == "POST":
req_func = sess.post
kwargs["data"] = data
kwargs["headers"] = headers
try:
response = req_func(url, **kwargs)
except requests.exceptions.RequestException as e:
print(e)
else:
return response
def get_quarantine_details(date):
sess = init_sess()
date_str = date.strftime("%d/%m/%Y")
payload = {DATE_REQ_STRING: date_str, "lw": "View"}
csv_data = []
response = run_req(sess, QAR_REQ_URL, payload, HEADERS, "POST")
if not response:
return
soup = BeautifulSoup(response.content, "html.parser")
table = soup.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
district = cols[0]
data_dict = dict(zip(QUARANTINE_HEADER, cols))
data_dict["dist_code"] = district
data_dict["district"] = district_map[district]
csv_data.append(data_dict)
return csv_data
def extract_datewise_active(soup, district):
datalist = []
# assumes last table is the datewise table
table = soup.find_all("table")[-1]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_HEADER, cols))
data_dict["date"] = get_date(data_dict["date"])
data_dict["district"] = district
datalist.append(data_dict)
return datalist
def get_active_details_today():
active_data = defaultdict(list)
sess = init_sess()
response = run_req(sess, ACTIVE_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
last_date = soup.find_all(text=re.compile("updated", re.I))[0].split()[1]
last_date = get_date(last_date)
# assumes second last table is the summary table
table = soup.find_all("table")[-2]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_TODAY_HEADER, cols))
data_dict["date"] = last_date
district = data_dict["district"]
active_data[district].append(data_dict)
# are there 14 district data + total data
assert len(active_data) == 15
# get kerala datewise details table as well
kerala_data = {i["date"]: i for i in extract_datewise_active(soup, "KERALA")}
return (kerala_data, active_data, last_date)
def get_bulk_active_details():
active_data = {}
time.sleep(5)
for code, district in district_code_map.items():
# new session for each district data
sess = init_sess()
print("Processing: {}".format(district))
payload = {"district": code}
response = run_req(sess, ACTIVE_REQ_URL, payload, HEADERS, "POST")
if not response:
print("response failed for district {}".format(district))
break
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
break
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
# assumes last table is the datewise table
active_data[district] = extract_datewise_active(soup, district)
time.sleep(5)
return active_data
def get_testing_details():
testing_data = {}
sess = init_sess()
response = run_req(sess, TESTING_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
print("Processing testing data")
soup = BeautifulSoup(response.content, "html.parser")
case_report = soup.find_all(text=re.compile("Daily Case Reports from", re.I))[0]
table = case_report.parent.parent.parent.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(TESTING_HEADER, cols))
test_date = get_date(data_dict["date"])
data_dict["date"] = test_date
testing_data[test_date] = data_dict
return testing_data
def active_detail_pivot(active_data, get_only_curr=False):
pivot_data = defaultdict(dict)
for district, values in active_data.items():
count = {"district": district, "confirmed": 0, "deaths": 0, "recovered": 0}
for value in values:
v_date = value["date"]
for key in ["confirmed", "deaths", "recovered"]:
count[key] += getint(value[key])
count["active"] = getint(value["active"])
pivot_data[v_date][district] = copy.deepcopy(count)
# set total if get_only_curr is False
if not get_only_curr:
for d, dist_dict in pivot_data.items():
total = {
"confirmed": 0,
"deaths": 0,
"recovered": 0,
"active": 0,
}
for district, value in dist_dict.items():
for key in total.keys():
total[key] += getint(value[key])
total["district"] = "TOTAL"
dist_dict["TOTAL"] = total
return pivot_data
def edit_data_index(date_list, totals_data, testing_data, kerala_data):
di_data = None
# write test_data into testdatajson
with open(TEST_DATA_JSON, "w") as test_json:
td = {}
for d, v in testing_data.items():
strdate = "{}-{}-{}".format(d.day, d.month, d.year)
td[strdate] = {key: val for key, val in v.items() if key != "date"}
json.dump(td, test_json)
print("Wrote testing data to: {}".format(TEST_DATA_JSON))
with open(DATA_INDEX_JSON, "r") as json_file:
di_data = json.load(json_file)
# redundant code for updating testing details on dataIndex for every run
print("updating testing in dataIndex")
for d, v in testing_data.items():
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
testing_entry = di_data["daily_bulletin"][datestr]
except KeyError:
testing_entry = {}
testing_day = v
testing_entry["sample_sent"] = getint(testing_day["total_sent"])
testing_entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
di_data["daily_bulletin"][datestr] = testing_entry
print("updating other details in dataIndex")
for d in date_list:
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
entry = di_data["daily_bulletin"][datestr]
except KeyError:
entry = {}
total_day = totals_data[d]
try:
testing_day = testing_data[d]
except KeyError:
print("data not available for {}".format(d))
available = False
prevday = d
while not available:
prevday -= timedelta(days=1)
print("trying for {}".format(prevday))
# if data isn't available for current day, show previous day data
try:
testing_day = testing_data[prevday]
except KeyError:
print("not available for {}".format(prevday))
else:
available = True
kd = kerala_data[d]
entry["total_active"] = getint(total_day["active"])
entry["total_positive"] = getint(total_day["confirmed"])
entry["deaths"] = getint(total_day["deaths"])
entry["positive_today"] = getint(kd["confirmed"])
if "sample_sent" not in entry:
entry["sample_sent"] = getint(testing_day["total_sent"])
entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
entry["total_passengers"] = 0
filename = "data_{}_{}_{}.csv".format(d.day, d.month, d.year)
entry["file"] = filename
di_data["daily_bulletin"][datestr] = entry
with open(DATA_INDEX_JSON, "w") as json_file:
json.dump(di_data, json_file)
print("Wrote index data to: {}".format(DATA_INDEX_JSON))
with open(DATA_INDEX_JS, "w") as js_file:
write_str = "var dataIndex = " + str(di_data)
js_file.write(write_str)
print("Wrote index data to: {}".format(DATA_INDEX_JS))
def get_data_for_date(dates=[], get_only_curr=False):
|
if __name__ == "__main__":
if len(sys.argv) > 1:
dateargs = sys.argv[1:]
print("processing for these dates: {}".format(dateargs))
dates = [get_date(datearg) for datearg in dateargs]
get_only_curr = False
else:
dates = [date.today()]
get_only_curr = True
get_data_for_date(dates, get_only_curr)
| kd, active_today, updated_date = get_active_details_today()
if not get_only_curr:
active_data = get_bulk_active_details()
else:
assert dates[0] == updated_date, "Date mismatch. Site updated till {}".format(
updated_date
)
active_data = active_today
time.sleep(2)
testing_data = get_testing_details()
time.sleep(3)
active_data_pivot = active_detail_pivot(active_data, get_only_curr)
totals_data = {}
for d in dates:
qar_data = get_quarantine_details(d)
active = active_data_pivot[d]
if not active:
print("Active details empty for date: {}".format(d))
continue
csv_data = []
for dat in qar_data:
dist_code = dat.pop("dist_code")
dist_active = active[dist_code]
for key in CASE_HEADER:
dat[key] = dist_active[key]
# take active count from kerala data
if dist_code == "TOTAL":
dat["active"] = kd[d]["active"]
csv_data.append(dat)
# set total data
totals_data[d] = active["TOTAL"]
totals_data[d]["active"] = kd[d]["active"]
# write to csv
filename = "data_{}_{}_{}.csv".format(d.day, d.month, d.year)
filepath = os.path.join("./data/", filename)
print("writing csv for {}".format(d))
csv_writer(filepath, csv_data)
time.sleep(2)
edit_data_index(dates, totals_data, testing_data, kd) | identifier_body |
scrape.py | from bs4 import BeautifulSoup
import csv
import copy
from collections import defaultdict
from datetime import date, timedelta
import json
import os
import re
import requests
import sys
import time
district_map = {
"TVM": "Thiruvananthapuram",
"KLM": "Kollam",
"PTA": "Pathanamthitta",
"IDK": "Idukki",
"KTM": "Kottayam",
"ALP": "Alappuzha",
"EKM": "Ernakulam",
"TSR": "Thrissur",
"PKD": "Palakkad",
"MPM": "Malappuram",
"KKD": "Kozhikode",
"WYD": "Wayanad",
"KNR": "Kannur",
"KGD": "Kasaragod",
"TOTAL": "Total",
}
district_code_map = {
"1601": "KGD",
"1602": "KNR",
"1603": "WYD",
"1604": "KKD",
"1605": "MPM",
"1606": "PKD",
"1607": "TSR",
"1608": "EKM",
"1609": "IDK",
"1610": "KTM",
"1611": "ALP",
"1612": "PTA",
"1613": "KLM",
"1614": "TVM",
}
QUARANTINE_HEADER = [
"district",
"observation",
"total_hospitalized",
"isolation",
"hospitalized_today",
]
CASE_HEADER = ["confirmed", "recovered", "active", "deaths"]
ACTIVE_HEADER = ["date"]
ACTIVE_HEADER.extend(CASE_HEADER)
ACTIVE_TODAY_HEADER = ["district"]
ACTIVE_TODAY_HEADER.extend(CASE_HEADER)
CSV_HEADER = copy.deepcopy(QUARANTINE_HEADER)
CSV_HEADER.extend(CASE_HEADER)
TESTING_HEADER = [
"date",
"total_sent",
"sent_on_date",
"processed_in_one_day",
"total_positive",
"new_positive",
]
TEST_DATA_JSON = "./testData.json"
DATA_INDEX_JSON = "./dataIndexJSON.json"
DATA_INDEX_JS = "./dataIndex.js"
INIT_URL = "https://dashboard.kerala.gov.in"
QAR_REQ_URL = "https://dashboard.kerala.gov.in/quarantined-datewise.php"
ACTIVE_REQ_URL = (
"https://dashboard.kerala.gov.in/dailyreporting-view-public-districtwise.php"
)
TESTING_REQ_URL = "https://dashboard.kerala.gov.in/testing-view-public.php"
DATE_REQ_STRING = "rep_date3"
HEADERS = {
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36",
"Origin": "https://dashboard.kerala.gov.in",
"Referer": "https://dashboard.kerala.gov.in/quarantined-datewise.php",
}
def get_date(datearg):
dsplit = [int(i) for i in re.split(r"[-/]\s*", datearg)]
return date(day=dsplit[0], month=dsplit[1], year=dsplit[2])
def getint(string):
try:
return int(string)
except (TypeError, ValueError):
return int("".join(filter(str.isdigit, string)))
def csv_writer(filepath, data):
with open(filepath, "w") as f:
w = csv.DictWriter(f, fieldnames=set(CSV_HEADER))
w.writeheader()
w.writerows(data)
def init_sess():
try:
# warm up the cookies
sess = requests.Session()
sess.get(INIT_URL, headers=HEADERS)
except requests.exceptions.RequestException as e:
print(e)
else:
return sess
def run_req(sess, url, data, headers, method="GET"):
kwargs = {}
if method == "GET":
req_func = sess.get
elif method == "POST":
req_func = sess.post
kwargs["data"] = data
kwargs["headers"] = headers
try:
response = req_func(url, **kwargs)
except requests.exceptions.RequestException as e:
print(e)
else:
return response
def get_quarantine_details(date):
sess = init_sess()
date_str = date.strftime("%d/%m/%Y")
payload = {DATE_REQ_STRING: date_str, "lw": "View"}
csv_data = []
response = run_req(sess, QAR_REQ_URL, payload, HEADERS, "POST")
if not response:
return
soup = BeautifulSoup(response.content, "html.parser")
table = soup.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
district = cols[0]
data_dict = dict(zip(QUARANTINE_HEADER, cols))
data_dict["dist_code"] = district
data_dict["district"] = district_map[district]
csv_data.append(data_dict)
return csv_data
def extract_datewise_active(soup, district):
datalist = []
# assumes last table is the datewise table
table = soup.find_all("table")[-1]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_HEADER, cols))
data_dict["date"] = get_date(data_dict["date"])
data_dict["district"] = district
datalist.append(data_dict)
return datalist
def get_active_details_today():
active_data = defaultdict(list)
sess = init_sess()
response = run_req(sess, ACTIVE_REQ_URL, None, HEADERS, "GET")
if not response:
print("response failed for today's active case details")
return
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
last_date = soup.find_all(text=re.compile("updated", re.I))[0].split()[1]
last_date = get_date(last_date)
# assumes second last table is the summary table
table = soup.find_all("table")[-2]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(ACTIVE_TODAY_HEADER, cols))
data_dict["date"] = last_date
district = data_dict["district"]
active_data[district].append(data_dict)
# are there 14 district data + total data
assert len(active_data) == 15
# get kerala datewise details table as well
kerala_data = {i["date"]: i for i in extract_datewise_active(soup, "KERALA")}
return (kerala_data, active_data, last_date)
def get_bulk_active_details():
active_data = {}
time.sleep(5)
for code, district in district_code_map.items():
# new session for each district data
sess = init_sess()
print("Processing: {}".format(district))
payload = {"district": code}
response = run_req(sess, ACTIVE_REQ_URL, payload, HEADERS, "POST")
if not response:
print("response failed for district {}".format(district))
break
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
break
soup = BeautifulSoup(response.content, "html.parser")
region = soup.find_all(text=re.compile("you have chosen", re.I))[0]
print("Got data for {}".format(region.next_sibling.text))
# assumes last table is the datewise table
active_data[district] = extract_datewise_active(soup, district)
time.sleep(5)
return active_data
def get_testing_details():
testing_data = {}
sess = init_sess()
response = run_req(sess, TESTING_REQ_URL, None, HEADERS, "GET")
if not response:
|
if len(response.content) < 1000:
print(len(response.content), " Actual response not recieved")
return
print("Processing testing data")
soup = BeautifulSoup(response.content, "html.parser")
case_report = soup.find_all(text=re.compile("Daily Case Reports from", re.I))[0]
table = case_report.parent.parent.parent.find_all("table")[0]
for row in table.find_all("tr")[1:]:
cols = [i.text for i in row.find_all("td")]
data_dict = dict(zip(TESTING_HEADER, cols))
test_date = get_date(data_dict["date"])
data_dict["date"] = test_date
testing_data[test_date] = data_dict
return testing_data
def active_detail_pivot(active_data, get_only_curr=False):
pivot_data = defaultdict(dict)
for district, values in active_data.items():
count = {"district": district, "confirmed": 0, "deaths": 0, "recovered": 0}
for value in values:
v_date = value["date"]
for key in ["confirmed", "deaths", "recovered"]:
count[key] += getint(value[key])
count["active"] = getint(value["active"])
pivot_data[v_date][district] = copy.deepcopy(count)
# set total if get_only_curr is False
if not get_only_curr:
for d, dist_dict in pivot_data.items():
total = {
"confirmed": 0,
"deaths": 0,
"recovered": 0,
"active": 0,
}
for district, value in dist_dict.items():
for key in total.keys():
total[key] += getint(value[key])
total["district"] = "TOTAL"
dist_dict["TOTAL"] = total
return pivot_data
def edit_data_index(date_list, totals_data, testing_data, kerala_data):
di_data = None
# write test_data into testdatajson
with open(TEST_DATA_JSON, "w") as test_json:
td = {}
for d, v in testing_data.items():
strdate = "{}-{}-{}".format(d.day, d.month, d.year)
td[strdate] = {key: val for key, val in v.items() if key != "date"}
json.dump(td, test_json)
print("Wrote testing data to: {}".format(TEST_DATA_JSON))
with open(DATA_INDEX_JSON, "r") as json_file:
di_data = json.load(json_file)
# redundant code for updating testing details on dataIndex for every run
print("updating testing in dataIndex")
for d, v in testing_data.items():
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
testing_entry = di_data["daily_bulletin"][datestr]
except KeyError:
testing_entry = {}
testing_day = v
testing_entry["sample_sent"] = getint(testing_day["total_sent"])
testing_entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
di_data["daily_bulletin"][datestr] = testing_entry
print("updating other details in dataIndex")
for d in date_list:
datestr = "{}-{}-{}".format(d.day, d.month, d.year)
try:
entry = di_data["daily_bulletin"][datestr]
except KeyError:
entry = {}
total_day = totals_data[d]
try:
testing_day = testing_data[d]
except KeyError:
print("data not available for {}".format(d))
available = False
prevday = d
while not available:
prevday -= timedelta(days=1)
print("trying for {}".format(prevday))
# if data isn't available for current day, show previous day data
try:
testing_day = testing_data[prevday]
except KeyError:
print("not available for {}".format(prevday))
else:
available = True
kd = kerala_data[d]
entry["total_active"] = getint(total_day["active"])
entry["total_positive"] = getint(total_day["confirmed"])
entry["deaths"] = getint(total_day["deaths"])
entry["positive_today"] = getint(kd["confirmed"])
if "sample_sent" not in entry:
entry["sample_sent"] = getint(testing_day["total_sent"])
entry["sample_sent_today"] = getint(testing_day["sent_on_date"])
entry["total_passengers"] = 0
filename = "data_{}_{}_{}.csv".format(d.day, d.month, d.year)
entry["file"] = filename
di_data["daily_bulletin"][datestr] = entry
with open(DATA_INDEX_JSON, "w") as json_file:
json.dump(di_data, json_file)
print("Wrote index data to: {}".format(DATA_INDEX_JSON))
with open(DATA_INDEX_JS, "w") as js_file:
write_str = "var dataIndex = " + str(di_data)
js_file.write(write_str)
print("Wrote index data to: {}".format(DATA_INDEX_JS))
def get_data_for_date(dates=[], get_only_curr=False):
kd, active_today, updated_date = get_active_details_today()
if not get_only_curr:
active_data = get_bulk_active_details()
else:
assert dates[0] == updated_date, "Date mismatch. Site updated till {}".format(
updated_date
)
active_data = active_today
time.sleep(2)
testing_data = get_testing_details()
time.sleep(3)
active_data_pivot = active_detail_pivot(active_data, get_only_curr)
totals_data = {}
for d in dates:
qar_data = get_quarantine_details(d)
active = active_data_pivot[d]
if not active:
print("Active details empty for date: {}".format(d))
continue
csv_data = []
for dat in qar_data:
dist_code = dat.pop("dist_code")
dist_active = active[dist_code]
for key in CASE_HEADER:
dat[key] = dist_active[key]
# take active count from kerala data
if dist_code == "TOTAL":
dat["active"] = kd[d]["active"]
csv_data.append(dat)
# set total data
totals_data[d] = active["TOTAL"]
totals_data[d]["active"] = kd[d]["active"]
# write to csv
filename = "data_{}_{}_{}.csv".format(d.day, d.month, d.year)
filepath = os.path.join("./data/", filename)
print("writing csv for {}".format(d))
csv_writer(filepath, csv_data)
time.sleep(2)
edit_data_index(dates, totals_data, testing_data, kd)
if __name__ == "__main__":
if len(sys.argv) > 1:
dateargs = sys.argv[1:]
print("processing for these dates: {}".format(dateargs))
dates = [get_date(datearg) for datearg in dateargs]
get_only_curr = False
else:
dates = [date.today()]
get_only_curr = True
get_data_for_date(dates, get_only_curr)
| print("response failed for today's active case details")
return | conditional_block |
vck.py | import Tkinter
from PIL import Image
from PIL import ImageDraw
from PIL import ImageTk
#import whrandom
import random
import string
import pickle
import sys
import os
class bitmap:
"""A two-dimensional one-bit-deep bitmap suitable for VCK operations.
The coordinate system has 0,0 at NW and xmax, ymax at SE. The external
representation of the pixels, accessed via get() and set(), has white
paper as 0 and black ink as 1."""
# Protected static constants for the internal representation of the
# pixel colours. The internal representation for pixels is that of
# PIL's "1" format, where 0x00 is black and 0xff is white. No other
# pixel values are allowed. Yeah, this gives a conversion for every
# pixel access: so sue me! I just found it too confusing to have paper
# as 1 and ink as 0, especially when it came to doing the boolean ops
# on the images.
_pixelBlack = 0x00
_pixelWhite = 0xff
# Private members:
# __image = the image
# __draw = the PIL gadget you use to write on the image
def __init__(self, arg1, arg2=None):
"""The allowed forms for the constructor are:
1- vck.bitmap("image.tif")
...i.e. from a file name;
2- vck.bitmap((x,y))
...ie from a 2-tuple with the size; picture will be all white."""
self.__image = None
self.__draw = None
if type(arg1) == type(""):
# form 1
raw = Image.open(arg1)
self.__image = raw.convert("1")
elif type(arg1) == type((1,2)):
if arg2 == None:
# form 2
self.__image = Image.new("1", arg1, bitmap._pixelWhite)
if not self.__image:
raise TypeError, "Give me EITHER a filename OR a " \
"(width, height) pair and an optional string of binary data."
self.__draw = ImageDraw.ImageDraw(self.__image)
def set(self, x, y, colour=1):
"""Set the pixel at x, y to be of colour colour (default 1 = black
ink). Any colour value other than 0 (white paper) is taken to be 1
(black ink)."""
inkCol = None
if colour == 0:
# self.__draw.setink(bitmap._pixelWhite)
inkCol = bitmap._pixelWhite
else:
# self.__draw.setink(bitmap._pixelBlack)
inkCol = bitmap._pixelBlack
self.__draw.point((x, y), fill=inkCol)
def get(self, x, y):
"""Return the value of the pixel at x, y"""
return not self.__image.getpixel((x, y))
def size(self):
"""Return a 2-tuple (width, height) in pixels."""
return self.__image.size
def view(self, root, title="No name"):
"""Display this image in a toplevel window (optionally with the
given title). Precondition: Tk must have been initialised (the
caller must supply Tk's root window). Return the toplevel window,
which the caller must hold on to otherwise it will disappear from
the screen for various PIL/Tkinter/refcount quirks."""
return _bitmapViewer(root, self.__image, title)
def write(self, filename):
"""Write this bitmap to a file with the given filename. File type
is deduced from the extension (exception if it can't be figured
out)."""
self.__image.save(filename)
def pixelcode(self):
"""Return a new bitmap, twice as big linearly, by pixelcoding every
pixel of bmp into a grid of 4 pixels. Pixelcoding means translating
each pixel into a grid of pixels in a clever way which is the core
idea of visual cryptography. Read the poster for more on that."""
maxX, maxY = self.size()
result = bitmap((2*maxX, 2*maxY))
for x in range(maxX):
for y in range(maxY):
pixel = self.get(x,y)
result.set(2*x,2*y, pixel)
result.set(2*x,2*y+1, not pixel)
result.set(2*x+1,2*y, not pixel)
result.set(2*x+1,2*y+1, pixel)
return result
def boolean(operation, bitmaps):
"""Apply the boolean operation 'operation' (a binary function of two
integers returning an integer) to the list of bitmaps in 'bitmaps'
(precondition: the list can't be empty and the bitmaps must all have
the same size) and return the resulting bitmap."""
maxX, maxY = size = bitmaps[0].size()
result = bitmap(size)
for x in range(maxX):
for y in range(maxY):
pixel = bitmaps[0].get(x,y)
for b in bitmaps[1:]:
pixel = apply(operation, (pixel, b.get(x,y)))
result.set(x,y,pixel)
return result
# Doc string for the following three functions:
# Take an arbitrary number (>=1) of bitmap arguments, all of the same size,
# and return another bitmap resulting from their pixel-by-pixel AND, OR or
# XOR as appropriate.
def AND(*args): return boolean(lambda a,b:a&b, args)
def OR(*args): return boolean(lambda a,b:a|b, args)
def XOR(*args): return boolean(lambda a,b:a^b, args)
def NOT(bmp):
"""Take a bitmap and return its negative (obtained by swopping white
and black at each pixel)."""
maxX, maxY = size = bmp.size()
result = bitmap(size)
for x in range(maxX):
for y in range(maxY):
result.set(x,y, not bmp.get(x,y))
return result
def randomBitmap(size):
"""Take a size (2-tuple of x and y) and return a bitmap of that size
filled with random pixels. WARNING! THE CODE HERE IS ONLY FOR
DEMONSTRATION PURPOSES, SINCE IT CALLS THE STANDARD PYTHON RANDOM
NUMBER GENERATOR, which is fine for statistics but not good enough for
crypto. For real use, substitute this with really random data from an
external source, or at least with a properly seeded cryptographically
strong RNG."""
b = bitmap(size)
xmax, ymax = size
for x in xrange(xmax):
for y in xrange(ymax):
b.set(x, y, random.randint(0,1))
return b
class _viewer:
"""A toplevel window with a canvas."""
def __init__(self, root, width, height, title="Unnamed VCK image"):
self.__width = width
self.__height = height
self._t = Tkinter.Toplevel(root)
Tkinter.Wm.title(self._t, title)
self._c = Tkinter.Canvas(self._t, width=width, height=height,
border=0, highlightthickness=0,
background="White")
self._c.pack()
self._t.update()
def psprint(self, filename):
"""Write a postscript representation of the canvas to the specified
file."""
# The portrait A4 page is, in mm, WxH=210x297. Let's have a safety
# margin of 7mm all around it, and the usable area becomes 196x283.
W = 196.0
H = 283.0
x1, y1, x2, y2 = self._c.bbox("all")
options = {
"pageanchor": "sw",
"x": "%fp" % x1,
"y": "%fp" % y1,
"height": "%fp" % (y2-y1),
"width": "%fp" % (x2-x1),
"pagex": "0",
"pagey": "0",
"file": filename,
"colormode": "mono",
}
# ??? I think I'm doing all this viewport math sensibly, BUT I
# still get a weird asymmetric margin around the thing, and I
# haven't got a clue how to get rid of it.
yscale = (y2-y1) / H
xscale = (x2-x1) / W
# The direction with the greater scaling factor is the limiting one
if xscale > yscale:
options["pagewidth"] = "%fm" % W
else:
options["pageheight"] ="%fm" % H
self._c.update()
apply(self._c.postscript, (), options)
def canvas(self):
"""Return the canvas."""
return self._c
def __del__(self):
self._t.destroy()
class _bitmapViewer(_viewer):
"""A viewer for bitmaps."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.BitmapImage(
image, background="Black", foreground="White")
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
def encrypt(rawPlaintext, rawPad = None):
"""Take a plaintext bitmap and, optionally, a supposedly random pad of
the same size (one will be made up on the spot if not supplied). Return
a 2-tuple containing the large pixelcoded versions of ciphertext and
pad."""
# The raw versions are the same size as the original rawPlaintext
if not rawPad:
rawPad = randomBitmap(rawPlaintext.size())
rawCiphertext = XOR(rawPlaintext, rawPad)
# The final versions are linearly twice as big due to pixelcoding
ciphertext = rawCiphertext.pixelcode()
pad = rawPad.pixelcode()
return ciphertext, pad
def decrypt(ciphertext, pad):
"""Actually the decription ought to be performed without a computer
(the whole point of visual cryptography), by just superimposing the
transparencies of the ciphertext and pad. This is a simulation of this
process."""
return OR(ciphertext, pad)
def mainApp(function):
"""Execute the supplied function. The function may create new windows
by calling bitmap.view() or by making instances of viewer, but it must
return a list of any such windows it makes. The point of this wrapper
is merely to shield the caller away from the quirks of initialising
Tkinter, running its main loop and ensuring that windows don't
disappear unexpectedly."""
root = Tkinter.Tk()
quit = Tkinter.Button(root, text="Quit", command=root.quit)
quit.pack()
Tkinter.Wm.title(root, "VCK main")
windows = function(root)
root.update()
root.mainloop()
# --------------------------------------------------------------
# Analog (greyscale) version
class | (_viewer):
"""A toplevel window with a canvas, suitable for viewing a moonfield."""
R = 9 # default radius
def __init__(self, root, mf, title="Unnamed moonfield", radius=R):
"""Precondition: the moonfield mf must be filled."""
xmax, ymax = mf.size()
_viewer.__init__(self, root, xmax*2*radius, ymax*2*radius, title)
mf.renderOnCanvas(self._c, radius)
self._t.update()
class photoViewer(_viewer):
"""A viewer for greyscale images."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.PhotoImage(
image)
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
class moonfield:
"""A 2d array of angles. Items in the array are indexed by integers in
0..xmax, 0..ymax, with 0,0 being the NW corner. Each angle specifies
the phase (rotation) of a black halfmoon around its centre (determined
by its place in the array) and is represented by an integer in the
range 0..509"""
# Why that strange range? Well, since we are going to use two rotated
# halfmoons to display a luminosity, and since the luminosity of the
# gap between the two halfmoons ranges from 255 (white) when they're 0
# radians apart (i.e. superimposed, leaving a half-moon of white) to 0
# (black) when they're pi radians apart (i.e. non-overlapping, covering
# the whole disc with black), this means that there are 255 discrete
# steps in pi (not 256, because the 256th step is already "the first of
# the other half"), and 2*255 in 2*pi. So the integers in a moonfield
# range from 0 to 2*255-1 = 509. And we use arithmetic modulo 510 on
# them.
discretePi = 255
mod = discretePi*2
i2d = 360.0 / mod # integer to degree conversion factor
def __init__(self, size, filler=None):
"""Make a moonfield of the specified size. If a filler function is
specified, fill it with it, otherwise leave the data
uninitialised."""
self.__data = {}
self.__xmax, self.__ymax = size
if filler:
self.fill(filler)
def size(self):
"""Return a 2-tuple with the dimensions of the moonfield."""
return self.__xmax, self.__ymax
def fill(self, filler):
"""Take a function f(x,y) that accepts a position in the moonfield
and returns an integer value. Fill every cell in the moonfield with
the value returned by the filler (taken modulo mod)."""
for x in range(self.__xmax):
for y in range(self.__ymax):
self.__data[(x,y)] = filler(x,y) % self.mod
def randomFill(self, low=0, high=mod-1):
"""Fill the moonfield with random values in the range min..max
inclusive. WARNING: NOT GOOD FOR REAL CRYPTO USE. Use a
cryptographically strong RNG instead of the library's unless you're
just playing around."""
def randomFiller(x,y, low=low, high=high):
return random.randint(low, high)
self.fill(randomFiller)
def imageComplement(self, img):
"""Precondition: self must have been filled already. Take a
greyscale image (PIL type "L"), which must have the same size as
self. Return a new moonfield such that, if that new moonfield and
the current one were superimposed, one would "see" the supplied
image. NB: if the supplied image parameter is a string, an attempt
is made to open the file of that name."""
if type(img) == type(""):
img = Image.open(img).convert("L")
assert self.size() == img.size
result = moonfield(size=(self.__xmax, self.__ymax))
def filler(x,y,i=img, d=self.__data, pi=self.discretePi, m=self.mod):
return (d[(x,y)] - (pi - i.getpixel((x,y)))) % m
result.fill(filler)
return result
def renderOnCanvas(self, canvas, radius=moonfieldViewer.R):
"""Take a canvas and render the moonfield on it. The radius of the
halfmoons must be specified in canvas units."""
for x in range(self.__xmax):
for y in range(self.__ymax):
# Make the halfmoon at x,y
canvas.create_arc(
radius*2*x, radius*2*y, radius*2*(x+1)-1, radius*2*(y+1)-1,
start = self.__data[(x,y)] * self.i2d, extent = 180.0,
fill="Black")
def view(self, root, title="No name", radius=moonfieldViewer.R):
"""Display this image in a toplevel window (optionally with the
given title). Preconditions: the moonfield must be filled; Tk must
have been initialised (the caller must supply Tk's root
window). Return the toplevel window, which the caller must hold on
to otherwise it will disappear from the screen for various
PIL/Tkinter/refcount quirks."""
return moonfieldViewer(root, self, title, radius)
def __repr__(self):
if self.__data == {}:
return "<uninitialised>"
result = ""
for y in range(self.__ymax):
for x in range(self.__xmax):
result = result + "%3d " % self.__data[(x,y)]
result = result + "\n"
return result
def dump(self, filename):
"""Dump yourself to a file in the internal .mfd format (another
moonfield object can later be made from such a file)."""
pickle.dump(self, open(filename, "w"))
def moonfield_undump(filename):
"""Return a moonfield obtained by rebuilding the one that had been
dumped to the given file."""
return pickle.load(open(filename))
# --------------------------------------------------------------
# File-based mode of operation
def makePad(size, expandedPadFile="pad.tif", dumpFile="media/rawpad.pbm"):
"""Generate a random pad. (NB: remember that the RNG used here is only
good for demos since it's not cryptographically strong!) Write out two
files with the supplied names, one with the dump of the pad in raw form
(necessary for encrypting later, to be kept at the agency) and one with
the pad in expanded form, ready for use, to be given to 007. Return the
raw and expanded bitmaps."""
rawPad = randomBitmap(size)
rawPad.write(dumpFile)
expandedPad = rawPad.pixelcode()
expandedPad.write(expandedPadFile)
return rawPad, expandedPad
def makeCryptograph(imageFile, codedFile="coded.tif", dumpFile="media/rawpad.pbm"):
"""Generate a cryptograph. Take a monochrome image (the filename of a
PIL type "1") and a file with a dump of a raw pad (Precondition: image
and raw pad must be of the same size in pixels.) Write out the
cryptograph as an image file. Return the bitmap for the cryptograph."""
print(os.getcwd())
pad = bitmap(dumpFile)
plaintext = bitmap(imageFile)
ciphertext = XOR(pad, plaintext)
expandedCiphertext = ciphertext.pixelcode()
expandedCiphertext.write(codedFile)
return expandedCiphertext
def splitImage(image, shareFile1="share1.tif", shareFile2="share2.tif"):
"""Not for spies, really, just for cute demos. Take a monochrome image
(a PIL type "1" or its filename) and produce two image files that, when
superimposed, will yield the image. Return the bitmaps for the two
shares."""
_, expandedPad = makePad(Image.open(image).size, shareFile1)
expandedCiphertext = makeCryptograph(str(image), shareFile2)
print(expandedPad,expandedCiphertext)
return expandedPad, expandedCiphertext
# And same again for greyscale... Note that here we HAVE to use windows,
# even if we want to run in batch mode, because without drawing the stuff
# on the canvas we can't generate the postscript (actually, seeing how
# messy it is to get the margins to come out right, I'm thinking that I
# perhaps ought to generate the postscript by hand, without any canvas,
# like I used to do in the old, deprecated C++ version of VCK...)
def makePadG(root, size, expandedPadFile="pad.ps", dumpFile="rawpad.mfd"):
"""Generate a random pad. (NB: remember that the RNG used here is only
good for demos since it's not cryptographically strong!) Write out two
files with the supplied names, one with the dump of the pad in raw form
(necessary for encrypting later, to be kept at the agency) and one with
the pad in expanded form, ready for use, to be given to 007. Return a
pair made of the moonfield for the pad and a viewer on it."""
raw = moonfield(size)
raw.randomFill()
raw.dump(dumpFile)
v = raw.view(root)
v.psprint(expandedPadFile)
return raw, v
def makeCryptographG(root, image, codedFile="coded.ps", dumpFile="rawpad.mfd"):
"""Generate a cryptograph. Take an image (either a PIL image of type
"L" or a filename) and a file with a dump of a raw pad moonfield
(Precondition: image and raw pad must be of the same size in pixels.)
Write out the cryptograph as a postscript file of halfmoons. Return a
pair made of the moonfield for the cryptograph and a viewer on it."""
pad = moonfield_undump(dumpFile)
ciphertext = pad.imageComplement(image)
v = ciphertext.view(root)
v.psprint(codedFile)
return ciphertext, v
def splitImageG(root, image, shareFile1="share1.ps", shareFile2="share2.ps"):
"""Not for spies, really, just for cute demos. Take a greyscale image
(either an "L" image object or a filename) and produce two postscript
files of halfmoons that, when superimposed, will yield the
image. Return a quadruple made of the two shares and two viewers
showing them."""
if type(image) == type(""):
image = Image.open(image).convert("L")
p, v1 = makePadG(root, image.size, shareFile1)
c, v2 = makeCryptographG(root, image, shareFile2)
return p, c, v1, v2
# --------------------------------------------------------------
# Self-test
# Activate the test you want (one at a time) by uncommenting it in main().
def testEncryptDecrypt(root):
"""Encrypt a monochrome image and decrypt it, showing the results on
screen (work in memory, don't save to files)."""
plaintext = bitmap("vck.gif")
ciphertext, pad = encrypt(plaintext)
decryptedResult = decrypt(ciphertext, pad)
v1 = plaintext.view(root, "plaintext")
v2 = pad.view(root, "pad (pixelcoded)")
v3 = ciphertext.view(root, "ciphertext (pixelcoded)")
v4 = decryptedResult.view(root, "decrypted result")
return v1, v2, v3, v4
def testAllIntermediateValues(root):
"""Encrypt a monochrome image and decrypt it, but do it all "by hand"
and show all the intermediate results at each step."""
rawPlaintext = bitmap("vck.gif")
v1 = rawPlaintext.view(root, "raw plaintext")
rawPad = randomBitmap(rawPlaintext.size())
v2 = rawPad.view(root, "raw pad")
rawCiphertext = XOR(rawPlaintext, rawPad)
v3 = rawCiphertext.view(root, "raw ciphertext")
pad = rawPad.pixelcode()
v4 = pad.view(root, "pixelcoded pad")
ciphertext = rawCiphertext.pixelcode()
v5 = ciphertext.view(root, "pixelcoded ciphertext")
decryptedResult = OR(ciphertext, pad)
v6 = decryptedResult.view(root, "decrypted result")
return v1, v2, v3, v4, v5, v6
def testBooleanOps(root):
"""Demonstrate the boolean operations available in VCK by combining an
image (vck.tif must be in the current directory) with a diagonal
cross."""
letters = bitmap("vck.tif")
v1 = letters.view(root, "vck")
cross = bitmap(letters.size())
xmax, ymax = cross.size()
r = ymax*1.0/xmax
for x in range(xmax):
cross.set(x, x*r)
cross.set(x, x*r+1)
cross.set(x, x*r-1)
cross.set(x, ymax-x*r)
cross.set(x, ymax-x*r+1)
cross.set(x, ymax-x*r-1)
v2 = cross.view(root, "cross")
xorResult = XOR(letters, cross)
v3 = xorResult.view(root, "vck XOR cross")
orResult = OR(letters, cross)
v4 = orResult.view(root, "vck OR cross")
andResult = AND(letters, cross)
v5 = andResult.view(root, "vck AND cross")
notResult = NOT(letters)
v6 = notResult.view(root, "NOT vck")
return v1, v2, v3, v4, v5, v6
def testGrey(root):
"""Look at how the pie slices appear for a test card with all the
possible grey tones."""
# Make a greyscale test card: a 16x16 square going from black to white
t = open("testcard.pgm", "wb")
t.write("P5\n16 16\n255\n")
for i in range(256):
t.write(chr(i))
t.close()
plaintext = Image.open("testcard.pgm")
plaintext.convert("L")
mx,my = plaintext.size
pad = moonfield(size=(mx,my))
pad.randomFill()
v1 = pad.view(root, "random junk")
ciphertext = pad.imageComplement(plaintext)
v2 = ciphertext.view(root, "ciphertext")
v3 = ciphertext.view(root, "decrypted ciphertext")
pad.renderOnCanvas(v3.canvas())
return v1, v2, v3
def testSplitImage(root):
"""Split a monochrome image into two shares and write these to two
files that can be viewed externally."""
s1, s2 = splitImage("vck.tif")
v = OR(s1, s2).view(root)
return v
def testSplitImageG(root):
"""Split a greyscale image into two shares (postscript files)."""
p, c, v1, v2 = splitImageG(root, "guido.tif")
p.renderOnCanvas(v2.canvas())
v2.psprint("guido-decrypted.ps")
return v2
if __name__ == "__main__":
mainApp(testBooleanOps)
mainApp(testEncryptDecrypt)
# mainApp(testAllIntermediateValues)
# mainApp(testGrey)
# mainApp(testSplitImage)
# mainApp(testSplitImageG)
| moonfieldViewer | identifier_name |
vck.py | import Tkinter
from PIL import Image
from PIL import ImageDraw
from PIL import ImageTk
#import whrandom
import random
import string
import pickle
import sys
import os
class bitmap:
"""A two-dimensional one-bit-deep bitmap suitable for VCK operations.
The coordinate system has 0,0 at NW and xmax, ymax at SE. The external
representation of the pixels, accessed via get() and set(), has white
paper as 0 and black ink as 1."""
# Protected static constants for the internal representation of the
# pixel colours. The internal representation for pixels is that of
# PIL's "1" format, where 0x00 is black and 0xff is white. No other
# pixel values are allowed. Yeah, this gives a conversion for every
# pixel access: so sue me! I just found it too confusing to have paper
# as 1 and ink as 0, especially when it came to doing the boolean ops
# on the images.
_pixelBlack = 0x00
_pixelWhite = 0xff
# Private members:
# __image = the image
# __draw = the PIL gadget you use to write on the image
def __init__(self, arg1, arg2=None):
"""The allowed forms for the constructor are:
1- vck.bitmap("image.tif")
...i.e. from a file name;
2- vck.bitmap((x,y))
...ie from a 2-tuple with the size; picture will be all white."""
self.__image = None
self.__draw = None
if type(arg1) == type(""):
# form 1
raw = Image.open(arg1)
self.__image = raw.convert("1")
elif type(arg1) == type((1,2)):
if arg2 == None:
# form 2
self.__image = Image.new("1", arg1, bitmap._pixelWhite)
if not self.__image:
raise TypeError, "Give me EITHER a filename OR a " \
"(width, height) pair and an optional string of binary data."
self.__draw = ImageDraw.ImageDraw(self.__image)
def set(self, x, y, colour=1):
"""Set the pixel at x, y to be of colour colour (default 1 = black
ink). Any colour value other than 0 (white paper) is taken to be 1
(black ink)."""
inkCol = None
if colour == 0:
# self.__draw.setink(bitmap._pixelWhite)
inkCol = bitmap._pixelWhite
else:
# self.__draw.setink(bitmap._pixelBlack)
inkCol = bitmap._pixelBlack
self.__draw.point((x, y), fill=inkCol)
def get(self, x, y):
"""Return the value of the pixel at x, y"""
return not self.__image.getpixel((x, y))
def size(self):
"""Return a 2-tuple (width, height) in pixels."""
return self.__image.size
def view(self, root, title="No name"):
"""Display this image in a toplevel window (optionally with the
given title). Precondition: Tk must have been initialised (the
caller must supply Tk's root window). Return the toplevel window,
which the caller must hold on to otherwise it will disappear from
the screen for various PIL/Tkinter/refcount quirks."""
return _bitmapViewer(root, self.__image, title)
def write(self, filename):
"""Write this bitmap to a file with the given filename. File type
is deduced from the extension (exception if it can't be figured
out)."""
self.__image.save(filename)
def pixelcode(self):
"""Return a new bitmap, twice as big linearly, by pixelcoding every
pixel of bmp into a grid of 4 pixels. Pixelcoding means translating
each pixel into a grid of pixels in a clever way which is the core
idea of visual cryptography. Read the poster for more on that."""
maxX, maxY = self.size()
result = bitmap((2*maxX, 2*maxY))
for x in range(maxX):
for y in range(maxY):
pixel = self.get(x,y)
result.set(2*x,2*y, pixel)
result.set(2*x,2*y+1, not pixel)
result.set(2*x+1,2*y, not pixel)
result.set(2*x+1,2*y+1, pixel)
return result
def boolean(operation, bitmaps):
"""Apply the boolean operation 'operation' (a binary function of two
integers returning an integer) to the list of bitmaps in 'bitmaps'
(precondition: the list can't be empty and the bitmaps must all have
the same size) and return the resulting bitmap."""
maxX, maxY = size = bitmaps[0].size()
result = bitmap(size)
for x in range(maxX):
for y in range(maxY):
pixel = bitmaps[0].get(x,y)
for b in bitmaps[1:]:
pixel = apply(operation, (pixel, b.get(x,y)))
result.set(x,y,pixel)
return result
# Doc string for the following three functions:
# Take an arbitrary number (>=1) of bitmap arguments, all of the same size,
# and return another bitmap resulting from their pixel-by-pixel AND, OR or
# XOR as appropriate.
def AND(*args): return boolean(lambda a,b:a&b, args)
def OR(*args): return boolean(lambda a,b:a|b, args)
def XOR(*args): return boolean(lambda a,b:a^b, args)
def NOT(bmp):
"""Take a bitmap and return its negative (obtained by swopping white
and black at each pixel)."""
maxX, maxY = size = bmp.size()
result = bitmap(size)
for x in range(maxX):
for y in range(maxY):
result.set(x,y, not bmp.get(x,y))
return result
def randomBitmap(size):
"""Take a size (2-tuple of x and y) and return a bitmap of that size
filled with random pixels. WARNING! THE CODE HERE IS ONLY FOR
DEMONSTRATION PURPOSES, SINCE IT CALLS THE STANDARD PYTHON RANDOM
NUMBER GENERATOR, which is fine for statistics but not good enough for
crypto. For real use, substitute this with really random data from an
external source, or at least with a properly seeded cryptographically
strong RNG."""
b = bitmap(size)
xmax, ymax = size
for x in xrange(xmax):
for y in xrange(ymax):
b.set(x, y, random.randint(0,1))
return b
class _viewer:
"""A toplevel window with a canvas."""
def __init__(self, root, width, height, title="Unnamed VCK image"):
self.__width = width
self.__height = height
self._t = Tkinter.Toplevel(root)
Tkinter.Wm.title(self._t, title)
self._c = Tkinter.Canvas(self._t, width=width, height=height,
border=0, highlightthickness=0,
background="White")
self._c.pack()
self._t.update()
def psprint(self, filename):
"""Write a postscript representation of the canvas to the specified
file."""
# The portrait A4 page is, in mm, WxH=210x297. Let's have a safety
# margin of 7mm all around it, and the usable area becomes 196x283.
W = 196.0
H = 283.0
x1, y1, x2, y2 = self._c.bbox("all")
options = {
"pageanchor": "sw",
"x": "%fp" % x1,
"y": "%fp" % y1,
"height": "%fp" % (y2-y1),
"width": "%fp" % (x2-x1),
"pagex": "0",
"pagey": "0",
"file": filename,
"colormode": "mono",
}
# ??? I think I'm doing all this viewport math sensibly, BUT I
# still get a weird asymmetric margin around the thing, and I
# haven't got a clue how to get rid of it.
yscale = (y2-y1) / H
xscale = (x2-x1) / W
# The direction with the greater scaling factor is the limiting one
if xscale > yscale:
options["pagewidth"] = "%fm" % W
else:
options["pageheight"] ="%fm" % H
self._c.update()
apply(self._c.postscript, (), options)
def canvas(self):
"""Return the canvas."""
return self._c
def __del__(self):
self._t.destroy()
class _bitmapViewer(_viewer):
"""A viewer for bitmaps."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.BitmapImage(
image, background="Black", foreground="White")
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
def encrypt(rawPlaintext, rawPad = None):
"""Take a plaintext bitmap and, optionally, a supposedly random pad of
the same size (one will be made up on the spot if not supplied). Return
a 2-tuple containing the large pixelcoded versions of ciphertext and
pad."""
# The raw versions are the same size as the original rawPlaintext
if not rawPad:
rawPad = randomBitmap(rawPlaintext.size())
rawCiphertext = XOR(rawPlaintext, rawPad)
# The final versions are linearly twice as big due to pixelcoding
ciphertext = rawCiphertext.pixelcode()
pad = rawPad.pixelcode()
return ciphertext, pad
def decrypt(ciphertext, pad):
"""Actually the decription ought to be performed without a computer
(the whole point of visual cryptography), by just superimposing the
transparencies of the ciphertext and pad. This is a simulation of this
process."""
return OR(ciphertext, pad)
def mainApp(function):
"""Execute the supplied function. The function may create new windows
by calling bitmap.view() or by making instances of viewer, but it must
return a list of any such windows it makes. The point of this wrapper
is merely to shield the caller away from the quirks of initialising
Tkinter, running its main loop and ensuring that windows don't
disappear unexpectedly."""
root = Tkinter.Tk()
quit = Tkinter.Button(root, text="Quit", command=root.quit)
quit.pack()
Tkinter.Wm.title(root, "VCK main")
windows = function(root)
root.update()
root.mainloop()
# --------------------------------------------------------------
# Analog (greyscale) version
class moonfieldViewer(_viewer):
"""A toplevel window with a canvas, suitable for viewing a moonfield."""
R = 9 # default radius
def __init__(self, root, mf, title="Unnamed moonfield", radius=R):
"""Precondition: the moonfield mf must be filled."""
xmax, ymax = mf.size()
_viewer.__init__(self, root, xmax*2*radius, ymax*2*radius, title)
mf.renderOnCanvas(self._c, radius)
self._t.update()
class photoViewer(_viewer):
"""A viewer for greyscale images."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.PhotoImage(
image)
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
class moonfield:
"""A 2d array of angles. Items in the array are indexed by integers in
0..xmax, 0..ymax, with 0,0 being the NW corner. Each angle specifies
the phase (rotation) of a black halfmoon around its centre (determined
by its place in the array) and is represented by an integer in the
range 0..509"""
# Why that strange range? Well, since we are going to use two rotated
# halfmoons to display a luminosity, and since the luminosity of the
# gap between the two halfmoons ranges from 255 (white) when they're 0
# radians apart (i.e. superimposed, leaving a half-moon of white) to 0
# (black) when they're pi radians apart (i.e. non-overlapping, covering
# the whole disc with black), this means that there are 255 discrete
# steps in pi (not 256, because the 256th step is already "the first of
# the other half"), and 2*255 in 2*pi. So the integers in a moonfield
# range from 0 to 2*255-1 = 509. And we use arithmetic modulo 510 on
# them.
discretePi = 255
mod = discretePi*2
i2d = 360.0 / mod # integer to degree conversion factor
def __init__(self, size, filler=None):
"""Make a moonfield of the specified size. If a filler function is
specified, fill it with it, otherwise leave the data
uninitialised."""
self.__data = {}
self.__xmax, self.__ymax = size
if filler:
self.fill(filler)
def size(self):
"""Return a 2-tuple with the dimensions of the moonfield."""
return self.__xmax, self.__ymax
def fill(self, filler):
"""Take a function f(x,y) that accepts a position in the moonfield
and returns an integer value. Fill every cell in the moonfield with
the value returned by the filler (taken modulo mod)."""
for x in range(self.__xmax):
for y in range(self.__ymax):
self.__data[(x,y)] = filler(x,y) % self.mod
def randomFill(self, low=0, high=mod-1):
"""Fill the moonfield with random values in the range min..max
inclusive. WARNING: NOT GOOD FOR REAL CRYPTO USE. Use a
cryptographically strong RNG instead of the library's unless you're
just playing around."""
def randomFiller(x,y, low=low, high=high):
return random.randint(low, high)
self.fill(randomFiller)
def imageComplement(self, img):
"""Precondition: self must have been filled already. Take a
greyscale image (PIL type "L"), which must have the same size as
self. Return a new moonfield such that, if that new moonfield and
the current one were superimposed, one would "see" the supplied
image. NB: if the supplied image parameter is a string, an attempt
is made to open the file of that name."""
if type(img) == type(""):
img = Image.open(img).convert("L")
assert self.size() == img.size
result = moonfield(size=(self.__xmax, self.__ymax))
def filler(x,y,i=img, d=self.__data, pi=self.discretePi, m=self.mod):
return (d[(x,y)] - (pi - i.getpixel((x,y)))) % m
result.fill(filler)
return result
def renderOnCanvas(self, canvas, radius=moonfieldViewer.R):
"""Take a canvas and render the moonfield on it. The radius of the
halfmoons must be specified in canvas units."""
for x in range(self.__xmax):
for y in range(self.__ymax):
# Make the halfmoon at x,y | fill="Black")
def view(self, root, title="No name", radius=moonfieldViewer.R):
"""Display this image in a toplevel window (optionally with the
given title). Preconditions: the moonfield must be filled; Tk must
have been initialised (the caller must supply Tk's root
window). Return the toplevel window, which the caller must hold on
to otherwise it will disappear from the screen for various
PIL/Tkinter/refcount quirks."""
return moonfieldViewer(root, self, title, radius)
def __repr__(self):
if self.__data == {}:
return "<uninitialised>"
result = ""
for y in range(self.__ymax):
for x in range(self.__xmax):
result = result + "%3d " % self.__data[(x,y)]
result = result + "\n"
return result
def dump(self, filename):
"""Dump yourself to a file in the internal .mfd format (another
moonfield object can later be made from such a file)."""
pickle.dump(self, open(filename, "w"))
def moonfield_undump(filename):
"""Return a moonfield obtained by rebuilding the one that had been
dumped to the given file."""
return pickle.load(open(filename))
# --------------------------------------------------------------
# File-based mode of operation
def makePad(size, expandedPadFile="pad.tif", dumpFile="media/rawpad.pbm"):
"""Generate a random pad. (NB: remember that the RNG used here is only
good for demos since it's not cryptographically strong!) Write out two
files with the supplied names, one with the dump of the pad in raw form
(necessary for encrypting later, to be kept at the agency) and one with
the pad in expanded form, ready for use, to be given to 007. Return the
raw and expanded bitmaps."""
rawPad = randomBitmap(size)
rawPad.write(dumpFile)
expandedPad = rawPad.pixelcode()
expandedPad.write(expandedPadFile)
return rawPad, expandedPad
def makeCryptograph(imageFile, codedFile="coded.tif", dumpFile="media/rawpad.pbm"):
"""Generate a cryptograph. Take a monochrome image (the filename of a
PIL type "1") and a file with a dump of a raw pad (Precondition: image
and raw pad must be of the same size in pixels.) Write out the
cryptograph as an image file. Return the bitmap for the cryptograph."""
print(os.getcwd())
pad = bitmap(dumpFile)
plaintext = bitmap(imageFile)
ciphertext = XOR(pad, plaintext)
expandedCiphertext = ciphertext.pixelcode()
expandedCiphertext.write(codedFile)
return expandedCiphertext
def splitImage(image, shareFile1="share1.tif", shareFile2="share2.tif"):
"""Not for spies, really, just for cute demos. Take a monochrome image
(a PIL type "1" or its filename) and produce two image files that, when
superimposed, will yield the image. Return the bitmaps for the two
shares."""
_, expandedPad = makePad(Image.open(image).size, shareFile1)
expandedCiphertext = makeCryptograph(str(image), shareFile2)
print(expandedPad,expandedCiphertext)
return expandedPad, expandedCiphertext
# And same again for greyscale... Note that here we HAVE to use windows,
# even if we want to run in batch mode, because without drawing the stuff
# on the canvas we can't generate the postscript (actually, seeing how
# messy it is to get the margins to come out right, I'm thinking that I
# perhaps ought to generate the postscript by hand, without any canvas,
# like I used to do in the old, deprecated C++ version of VCK...)
def makePadG(root, size, expandedPadFile="pad.ps", dumpFile="rawpad.mfd"):
"""Generate a random pad. (NB: remember that the RNG used here is only
good for demos since it's not cryptographically strong!) Write out two
files with the supplied names, one with the dump of the pad in raw form
(necessary for encrypting later, to be kept at the agency) and one with
the pad in expanded form, ready for use, to be given to 007. Return a
pair made of the moonfield for the pad and a viewer on it."""
raw = moonfield(size)
raw.randomFill()
raw.dump(dumpFile)
v = raw.view(root)
v.psprint(expandedPadFile)
return raw, v
def makeCryptographG(root, image, codedFile="coded.ps", dumpFile="rawpad.mfd"):
"""Generate a cryptograph. Take an image (either a PIL image of type
"L" or a filename) and a file with a dump of a raw pad moonfield
(Precondition: image and raw pad must be of the same size in pixels.)
Write out the cryptograph as a postscript file of halfmoons. Return a
pair made of the moonfield for the cryptograph and a viewer on it."""
pad = moonfield_undump(dumpFile)
ciphertext = pad.imageComplement(image)
v = ciphertext.view(root)
v.psprint(codedFile)
return ciphertext, v
def splitImageG(root, image, shareFile1="share1.ps", shareFile2="share2.ps"):
"""Not for spies, really, just for cute demos. Take a greyscale image
(either an "L" image object or a filename) and produce two postscript
files of halfmoons that, when superimposed, will yield the
image. Return a quadruple made of the two shares and two viewers
showing them."""
if type(image) == type(""):
image = Image.open(image).convert("L")
p, v1 = makePadG(root, image.size, shareFile1)
c, v2 = makeCryptographG(root, image, shareFile2)
return p, c, v1, v2
# --------------------------------------------------------------
# Self-test
# Activate the test you want (one at a time) by uncommenting it in main().
def testEncryptDecrypt(root):
"""Encrypt a monochrome image and decrypt it, showing the results on
screen (work in memory, don't save to files)."""
plaintext = bitmap("vck.gif")
ciphertext, pad = encrypt(plaintext)
decryptedResult = decrypt(ciphertext, pad)
v1 = plaintext.view(root, "plaintext")
v2 = pad.view(root, "pad (pixelcoded)")
v3 = ciphertext.view(root, "ciphertext (pixelcoded)")
v4 = decryptedResult.view(root, "decrypted result")
return v1, v2, v3, v4
def testAllIntermediateValues(root):
"""Encrypt a monochrome image and decrypt it, but do it all "by hand"
and show all the intermediate results at each step."""
rawPlaintext = bitmap("vck.gif")
v1 = rawPlaintext.view(root, "raw plaintext")
rawPad = randomBitmap(rawPlaintext.size())
v2 = rawPad.view(root, "raw pad")
rawCiphertext = XOR(rawPlaintext, rawPad)
v3 = rawCiphertext.view(root, "raw ciphertext")
pad = rawPad.pixelcode()
v4 = pad.view(root, "pixelcoded pad")
ciphertext = rawCiphertext.pixelcode()
v5 = ciphertext.view(root, "pixelcoded ciphertext")
decryptedResult = OR(ciphertext, pad)
v6 = decryptedResult.view(root, "decrypted result")
return v1, v2, v3, v4, v5, v6
def testBooleanOps(root):
"""Demonstrate the boolean operations available in VCK by combining an
image (vck.tif must be in the current directory) with a diagonal
cross."""
letters = bitmap("vck.tif")
v1 = letters.view(root, "vck")
cross = bitmap(letters.size())
xmax, ymax = cross.size()
r = ymax*1.0/xmax
for x in range(xmax):
cross.set(x, x*r)
cross.set(x, x*r+1)
cross.set(x, x*r-1)
cross.set(x, ymax-x*r)
cross.set(x, ymax-x*r+1)
cross.set(x, ymax-x*r-1)
v2 = cross.view(root, "cross")
xorResult = XOR(letters, cross)
v3 = xorResult.view(root, "vck XOR cross")
orResult = OR(letters, cross)
v4 = orResult.view(root, "vck OR cross")
andResult = AND(letters, cross)
v5 = andResult.view(root, "vck AND cross")
notResult = NOT(letters)
v6 = notResult.view(root, "NOT vck")
return v1, v2, v3, v4, v5, v6
def testGrey(root):
"""Look at how the pie slices appear for a test card with all the
possible grey tones."""
# Make a greyscale test card: a 16x16 square going from black to white
t = open("testcard.pgm", "wb")
t.write("P5\n16 16\n255\n")
for i in range(256):
t.write(chr(i))
t.close()
plaintext = Image.open("testcard.pgm")
plaintext.convert("L")
mx,my = plaintext.size
pad = moonfield(size=(mx,my))
pad.randomFill()
v1 = pad.view(root, "random junk")
ciphertext = pad.imageComplement(plaintext)
v2 = ciphertext.view(root, "ciphertext")
v3 = ciphertext.view(root, "decrypted ciphertext")
pad.renderOnCanvas(v3.canvas())
return v1, v2, v3
def testSplitImage(root):
"""Split a monochrome image into two shares and write these to two
files that can be viewed externally."""
s1, s2 = splitImage("vck.tif")
v = OR(s1, s2).view(root)
return v
def testSplitImageG(root):
"""Split a greyscale image into two shares (postscript files)."""
p, c, v1, v2 = splitImageG(root, "guido.tif")
p.renderOnCanvas(v2.canvas())
v2.psprint("guido-decrypted.ps")
return v2
if __name__ == "__main__":
mainApp(testBooleanOps)
mainApp(testEncryptDecrypt)
# mainApp(testAllIntermediateValues)
# mainApp(testGrey)
# mainApp(testSplitImage)
# mainApp(testSplitImageG) | canvas.create_arc(
radius*2*x, radius*2*y, radius*2*(x+1)-1, radius*2*(y+1)-1,
start = self.__data[(x,y)] * self.i2d, extent = 180.0, | random_line_split |
vck.py | import Tkinter
from PIL import Image
from PIL import ImageDraw
from PIL import ImageTk
#import whrandom
import random
import string
import pickle
import sys
import os
class bitmap:
"""A two-dimensional one-bit-deep bitmap suitable for VCK operations.
The coordinate system has 0,0 at NW and xmax, ymax at SE. The external
representation of the pixels, accessed via get() and set(), has white
paper as 0 and black ink as 1."""
# Protected static constants for the internal representation of the
# pixel colours. The internal representation for pixels is that of
# PIL's "1" format, where 0x00 is black and 0xff is white. No other
# pixel values are allowed. Yeah, this gives a conversion for every
# pixel access: so sue me! I just found it too confusing to have paper
# as 1 and ink as 0, especially when it came to doing the boolean ops
# on the images.
_pixelBlack = 0x00
_pixelWhite = 0xff
# Private members:
# __image = the image
# __draw = the PIL gadget you use to write on the image
def __init__(self, arg1, arg2=None):
"""The allowed forms for the constructor are:
1- vck.bitmap("image.tif")
...i.e. from a file name;
2- vck.bitmap((x,y))
...ie from a 2-tuple with the size; picture will be all white."""
self.__image = None
self.__draw = None
if type(arg1) == type(""):
# form 1
raw = Image.open(arg1)
self.__image = raw.convert("1")
elif type(arg1) == type((1,2)):
if arg2 == None:
# form 2
self.__image = Image.new("1", arg1, bitmap._pixelWhite)
if not self.__image:
raise TypeError, "Give me EITHER a filename OR a " \
"(width, height) pair and an optional string of binary data."
self.__draw = ImageDraw.ImageDraw(self.__image)
def set(self, x, y, colour=1):
"""Set the pixel at x, y to be of colour colour (default 1 = black
ink). Any colour value other than 0 (white paper) is taken to be 1
(black ink)."""
inkCol = None
if colour == 0:
# self.__draw.setink(bitmap._pixelWhite)
inkCol = bitmap._pixelWhite
else:
# self.__draw.setink(bitmap._pixelBlack)
inkCol = bitmap._pixelBlack
self.__draw.point((x, y), fill=inkCol)
def get(self, x, y):
"""Return the value of the pixel at x, y"""
return not self.__image.getpixel((x, y))
def size(self):
"""Return a 2-tuple (width, height) in pixels."""
return self.__image.size
def view(self, root, title="No name"):
"""Display this image in a toplevel window (optionally with the
given title). Precondition: Tk must have been initialised (the
caller must supply Tk's root window). Return the toplevel window,
which the caller must hold on to otherwise it will disappear from
the screen for various PIL/Tkinter/refcount quirks."""
return _bitmapViewer(root, self.__image, title)
def write(self, filename):
"""Write this bitmap to a file with the given filename. File type
is deduced from the extension (exception if it can't be figured
out)."""
self.__image.save(filename)
def pixelcode(self):
"""Return a new bitmap, twice as big linearly, by pixelcoding every
pixel of bmp into a grid of 4 pixels. Pixelcoding means translating
each pixel into a grid of pixels in a clever way which is the core
idea of visual cryptography. Read the poster for more on that."""
maxX, maxY = self.size()
result = bitmap((2*maxX, 2*maxY))
for x in range(maxX):
for y in range(maxY):
pixel = self.get(x,y)
result.set(2*x,2*y, pixel)
result.set(2*x,2*y+1, not pixel)
result.set(2*x+1,2*y, not pixel)
result.set(2*x+1,2*y+1, pixel)
return result
def boolean(operation, bitmaps):
"""Apply the boolean operation 'operation' (a binary function of two
integers returning an integer) to the list of bitmaps in 'bitmaps'
(precondition: the list can't be empty and the bitmaps must all have
the same size) and return the resulting bitmap."""
maxX, maxY = size = bitmaps[0].size()
result = bitmap(size)
for x in range(maxX):
|
return result
# Doc string for the following three functions:
# Take an arbitrary number (>=1) of bitmap arguments, all of the same size,
# and return another bitmap resulting from their pixel-by-pixel AND, OR or
# XOR as appropriate.
def AND(*args): return boolean(lambda a,b:a&b, args)
def OR(*args): return boolean(lambda a,b:a|b, args)
def XOR(*args): return boolean(lambda a,b:a^b, args)
def NOT(bmp):
"""Take a bitmap and return its negative (obtained by swopping white
and black at each pixel)."""
maxX, maxY = size = bmp.size()
result = bitmap(size)
for x in range(maxX):
for y in range(maxY):
result.set(x,y, not bmp.get(x,y))
return result
def randomBitmap(size):
"""Take a size (2-tuple of x and y) and return a bitmap of that size
filled with random pixels. WARNING! THE CODE HERE IS ONLY FOR
DEMONSTRATION PURPOSES, SINCE IT CALLS THE STANDARD PYTHON RANDOM
NUMBER GENERATOR, which is fine for statistics but not good enough for
crypto. For real use, substitute this with really random data from an
external source, or at least with a properly seeded cryptographically
strong RNG."""
b = bitmap(size)
xmax, ymax = size
for x in xrange(xmax):
for y in xrange(ymax):
b.set(x, y, random.randint(0,1))
return b
class _viewer:
"""A toplevel window with a canvas."""
def __init__(self, root, width, height, title="Unnamed VCK image"):
self.__width = width
self.__height = height
self._t = Tkinter.Toplevel(root)
Tkinter.Wm.title(self._t, title)
self._c = Tkinter.Canvas(self._t, width=width, height=height,
border=0, highlightthickness=0,
background="White")
self._c.pack()
self._t.update()
def psprint(self, filename):
"""Write a postscript representation of the canvas to the specified
file."""
# The portrait A4 page is, in mm, WxH=210x297. Let's have a safety
# margin of 7mm all around it, and the usable area becomes 196x283.
W = 196.0
H = 283.0
x1, y1, x2, y2 = self._c.bbox("all")
options = {
"pageanchor": "sw",
"x": "%fp" % x1,
"y": "%fp" % y1,
"height": "%fp" % (y2-y1),
"width": "%fp" % (x2-x1),
"pagex": "0",
"pagey": "0",
"file": filename,
"colormode": "mono",
}
# ??? I think I'm doing all this viewport math sensibly, BUT I
# still get a weird asymmetric margin around the thing, and I
# haven't got a clue how to get rid of it.
yscale = (y2-y1) / H
xscale = (x2-x1) / W
# The direction with the greater scaling factor is the limiting one
if xscale > yscale:
options["pagewidth"] = "%fm" % W
else:
options["pageheight"] ="%fm" % H
self._c.update()
apply(self._c.postscript, (), options)
def canvas(self):
"""Return the canvas."""
return self._c
def __del__(self):
self._t.destroy()
class _bitmapViewer(_viewer):
"""A viewer for bitmaps."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.BitmapImage(
image, background="Black", foreground="White")
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
def encrypt(rawPlaintext, rawPad = None):
"""Take a plaintext bitmap and, optionally, a supposedly random pad of
the same size (one will be made up on the spot if not supplied). Return
a 2-tuple containing the large pixelcoded versions of ciphertext and
pad."""
# The raw versions are the same size as the original rawPlaintext
if not rawPad:
rawPad = randomBitmap(rawPlaintext.size())
rawCiphertext = XOR(rawPlaintext, rawPad)
# The final versions are linearly twice as big due to pixelcoding
ciphertext = rawCiphertext.pixelcode()
pad = rawPad.pixelcode()
return ciphertext, pad
def decrypt(ciphertext, pad):
"""Actually the decription ought to be performed without a computer
(the whole point of visual cryptography), by just superimposing the
transparencies of the ciphertext and pad. This is a simulation of this
process."""
return OR(ciphertext, pad)
def mainApp(function):
"""Execute the supplied function. The function may create new windows
by calling bitmap.view() or by making instances of viewer, but it must
return a list of any such windows it makes. The point of this wrapper
is merely to shield the caller away from the quirks of initialising
Tkinter, running its main loop and ensuring that windows don't
disappear unexpectedly."""
root = Tkinter.Tk()
quit = Tkinter.Button(root, text="Quit", command=root.quit)
quit.pack()
Tkinter.Wm.title(root, "VCK main")
windows = function(root)
root.update()
root.mainloop()
# --------------------------------------------------------------
# Analog (greyscale) version
class moonfieldViewer(_viewer):
"""A toplevel window with a canvas, suitable for viewing a moonfield."""
R = 9 # default radius
def __init__(self, root, mf, title="Unnamed moonfield", radius=R):
"""Precondition: the moonfield mf must be filled."""
xmax, ymax = mf.size()
_viewer.__init__(self, root, xmax*2*radius, ymax*2*radius, title)
mf.renderOnCanvas(self._c, radius)
self._t.update()
class photoViewer(_viewer):
"""A viewer for greyscale images."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.PhotoImage(
image)
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
class moonfield:
"""A 2d array of angles. Items in the array are indexed by integers in
0..xmax, 0..ymax, with 0,0 being the NW corner. Each angle specifies
the phase (rotation) of a black halfmoon around its centre (determined
by its place in the array) and is represented by an integer in the
range 0..509"""
# Why that strange range? Well, since we are going to use two rotated
# halfmoons to display a luminosity, and since the luminosity of the
# gap between the two halfmoons ranges from 255 (white) when they're 0
# radians apart (i.e. superimposed, leaving a half-moon of white) to 0
# (black) when they're pi radians apart (i.e. non-overlapping, covering
# the whole disc with black), this means that there are 255 discrete
# steps in pi (not 256, because the 256th step is already "the first of
# the other half"), and 2*255 in 2*pi. So the integers in a moonfield
# range from 0 to 2*255-1 = 509. And we use arithmetic modulo 510 on
# them.
discretePi = 255
mod = discretePi*2
i2d = 360.0 / mod # integer to degree conversion factor
def __init__(self, size, filler=None):
"""Make a moonfield of the specified size. If a filler function is
specified, fill it with it, otherwise leave the data
uninitialised."""
self.__data = {}
self.__xmax, self.__ymax = size
if filler:
self.fill(filler)
def size(self):
"""Return a 2-tuple with the dimensions of the moonfield."""
return self.__xmax, self.__ymax
def fill(self, filler):
"""Take a function f(x,y) that accepts a position in the moonfield
and returns an integer value. Fill every cell in the moonfield with
the value returned by the filler (taken modulo mod)."""
for x in range(self.__xmax):
for y in range(self.__ymax):
self.__data[(x,y)] = filler(x,y) % self.mod
def randomFill(self, low=0, high=mod-1):
"""Fill the moonfield with random values in the range min..max
inclusive. WARNING: NOT GOOD FOR REAL CRYPTO USE. Use a
cryptographically strong RNG instead of the library's unless you're
just playing around."""
def randomFiller(x,y, low=low, high=high):
return random.randint(low, high)
self.fill(randomFiller)
def imageComplement(self, img):
"""Precondition: self must have been filled already. Take a
greyscale image (PIL type "L"), which must have the same size as
self. Return a new moonfield such that, if that new moonfield and
the current one were superimposed, one would "see" the supplied
image. NB: if the supplied image parameter is a string, an attempt
is made to open the file of that name."""
if type(img) == type(""):
img = Image.open(img).convert("L")
assert self.size() == img.size
result = moonfield(size=(self.__xmax, self.__ymax))
def filler(x,y,i=img, d=self.__data, pi=self.discretePi, m=self.mod):
return (d[(x,y)] - (pi - i.getpixel((x,y)))) % m
result.fill(filler)
return result
def renderOnCanvas(self, canvas, radius=moonfieldViewer.R):
"""Take a canvas and render the moonfield on it. The radius of the
halfmoons must be specified in canvas units."""
for x in range(self.__xmax):
for y in range(self.__ymax):
# Make the halfmoon at x,y
canvas.create_arc(
radius*2*x, radius*2*y, radius*2*(x+1)-1, radius*2*(y+1)-1,
start = self.__data[(x,y)] * self.i2d, extent = 180.0,
fill="Black")
def view(self, root, title="No name", radius=moonfieldViewer.R):
"""Display this image in a toplevel window (optionally with the
given title). Preconditions: the moonfield must be filled; Tk must
have been initialised (the caller must supply Tk's root
window). Return the toplevel window, which the caller must hold on
to otherwise it will disappear from the screen for various
PIL/Tkinter/refcount quirks."""
return moonfieldViewer(root, self, title, radius)
def __repr__(self):
if self.__data == {}:
return "<uninitialised>"
result = ""
for y in range(self.__ymax):
for x in range(self.__xmax):
result = result + "%3d " % self.__data[(x,y)]
result = result + "\n"
return result
def dump(self, filename):
"""Dump yourself to a file in the internal .mfd format (another
moonfield object can later be made from such a file)."""
pickle.dump(self, open(filename, "w"))
def moonfield_undump(filename):
"""Return a moonfield obtained by rebuilding the one that had been
dumped to the given file."""
return pickle.load(open(filename))
# --------------------------------------------------------------
# File-based mode of operation
def makePad(size, expandedPadFile="pad.tif", dumpFile="media/rawpad.pbm"):
"""Generate a random pad. (NB: remember that the RNG used here is only
good for demos since it's not cryptographically strong!) Write out two
files with the supplied names, one with the dump of the pad in raw form
(necessary for encrypting later, to be kept at the agency) and one with
the pad in expanded form, ready for use, to be given to 007. Return the
raw and expanded bitmaps."""
rawPad = randomBitmap(size)
rawPad.write(dumpFile)
expandedPad = rawPad.pixelcode()
expandedPad.write(expandedPadFile)
return rawPad, expandedPad
def makeCryptograph(imageFile, codedFile="coded.tif", dumpFile="media/rawpad.pbm"):
"""Generate a cryptograph. Take a monochrome image (the filename of a
PIL type "1") and a file with a dump of a raw pad (Precondition: image
and raw pad must be of the same size in pixels.) Write out the
cryptograph as an image file. Return the bitmap for the cryptograph."""
print(os.getcwd())
pad = bitmap(dumpFile)
plaintext = bitmap(imageFile)
ciphertext = XOR(pad, plaintext)
expandedCiphertext = ciphertext.pixelcode()
expandedCiphertext.write(codedFile)
return expandedCiphertext
def splitImage(image, shareFile1="share1.tif", shareFile2="share2.tif"):
"""Not for spies, really, just for cute demos. Take a monochrome image
(a PIL type "1" or its filename) and produce two image files that, when
superimposed, will yield the image. Return the bitmaps for the two
shares."""
_, expandedPad = makePad(Image.open(image).size, shareFile1)
expandedCiphertext = makeCryptograph(str(image), shareFile2)
print(expandedPad,expandedCiphertext)
return expandedPad, expandedCiphertext
# And same again for greyscale... Note that here we HAVE to use windows,
# even if we want to run in batch mode, because without drawing the stuff
# on the canvas we can't generate the postscript (actually, seeing how
# messy it is to get the margins to come out right, I'm thinking that I
# perhaps ought to generate the postscript by hand, without any canvas,
# like I used to do in the old, deprecated C++ version of VCK...)
def makePadG(root, size, expandedPadFile="pad.ps", dumpFile="rawpad.mfd"):
"""Generate a random pad. (NB: remember that the RNG used here is only
good for demos since it's not cryptographically strong!) Write out two
files with the supplied names, one with the dump of the pad in raw form
(necessary for encrypting later, to be kept at the agency) and one with
the pad in expanded form, ready for use, to be given to 007. Return a
pair made of the moonfield for the pad and a viewer on it."""
raw = moonfield(size)
raw.randomFill()
raw.dump(dumpFile)
v = raw.view(root)
v.psprint(expandedPadFile)
return raw, v
def makeCryptographG(root, image, codedFile="coded.ps", dumpFile="rawpad.mfd"):
"""Generate a cryptograph. Take an image (either a PIL image of type
"L" or a filename) and a file with a dump of a raw pad moonfield
(Precondition: image and raw pad must be of the same size in pixels.)
Write out the cryptograph as a postscript file of halfmoons. Return a
pair made of the moonfield for the cryptograph and a viewer on it."""
pad = moonfield_undump(dumpFile)
ciphertext = pad.imageComplement(image)
v = ciphertext.view(root)
v.psprint(codedFile)
return ciphertext, v
def splitImageG(root, image, shareFile1="share1.ps", shareFile2="share2.ps"):
"""Not for spies, really, just for cute demos. Take a greyscale image
(either an "L" image object or a filename) and produce two postscript
files of halfmoons that, when superimposed, will yield the
image. Return a quadruple made of the two shares and two viewers
showing them."""
if type(image) == type(""):
image = Image.open(image).convert("L")
p, v1 = makePadG(root, image.size, shareFile1)
c, v2 = makeCryptographG(root, image, shareFile2)
return p, c, v1, v2
# --------------------------------------------------------------
# Self-test
# Activate the test you want (one at a time) by uncommenting it in main().
def testEncryptDecrypt(root):
"""Encrypt a monochrome image and decrypt it, showing the results on
screen (work in memory, don't save to files)."""
plaintext = bitmap("vck.gif")
ciphertext, pad = encrypt(plaintext)
decryptedResult = decrypt(ciphertext, pad)
v1 = plaintext.view(root, "plaintext")
v2 = pad.view(root, "pad (pixelcoded)")
v3 = ciphertext.view(root, "ciphertext (pixelcoded)")
v4 = decryptedResult.view(root, "decrypted result")
return v1, v2, v3, v4
def testAllIntermediateValues(root):
"""Encrypt a monochrome image and decrypt it, but do it all "by hand"
and show all the intermediate results at each step."""
rawPlaintext = bitmap("vck.gif")
v1 = rawPlaintext.view(root, "raw plaintext")
rawPad = randomBitmap(rawPlaintext.size())
v2 = rawPad.view(root, "raw pad")
rawCiphertext = XOR(rawPlaintext, rawPad)
v3 = rawCiphertext.view(root, "raw ciphertext")
pad = rawPad.pixelcode()
v4 = pad.view(root, "pixelcoded pad")
ciphertext = rawCiphertext.pixelcode()
v5 = ciphertext.view(root, "pixelcoded ciphertext")
decryptedResult = OR(ciphertext, pad)
v6 = decryptedResult.view(root, "decrypted result")
return v1, v2, v3, v4, v5, v6
def testBooleanOps(root):
"""Demonstrate the boolean operations available in VCK by combining an
image (vck.tif must be in the current directory) with a diagonal
cross."""
letters = bitmap("vck.tif")
v1 = letters.view(root, "vck")
cross = bitmap(letters.size())
xmax, ymax = cross.size()
r = ymax*1.0/xmax
for x in range(xmax):
cross.set(x, x*r)
cross.set(x, x*r+1)
cross.set(x, x*r-1)
cross.set(x, ymax-x*r)
cross.set(x, ymax-x*r+1)
cross.set(x, ymax-x*r-1)
v2 = cross.view(root, "cross")
xorResult = XOR(letters, cross)
v3 = xorResult.view(root, "vck XOR cross")
orResult = OR(letters, cross)
v4 = orResult.view(root, "vck OR cross")
andResult = AND(letters, cross)
v5 = andResult.view(root, "vck AND cross")
notResult = NOT(letters)
v6 = notResult.view(root, "NOT vck")
return v1, v2, v3, v4, v5, v6
def testGrey(root):
"""Look at how the pie slices appear for a test card with all the
possible grey tones."""
# Make a greyscale test card: a 16x16 square going from black to white
t = open("testcard.pgm", "wb")
t.write("P5\n16 16\n255\n")
for i in range(256):
t.write(chr(i))
t.close()
plaintext = Image.open("testcard.pgm")
plaintext.convert("L")
mx,my = plaintext.size
pad = moonfield(size=(mx,my))
pad.randomFill()
v1 = pad.view(root, "random junk")
ciphertext = pad.imageComplement(plaintext)
v2 = ciphertext.view(root, "ciphertext")
v3 = ciphertext.view(root, "decrypted ciphertext")
pad.renderOnCanvas(v3.canvas())
return v1, v2, v3
def testSplitImage(root):
"""Split a monochrome image into two shares and write these to two
files that can be viewed externally."""
s1, s2 = splitImage("vck.tif")
v = OR(s1, s2).view(root)
return v
def testSplitImageG(root):
"""Split a greyscale image into two shares (postscript files)."""
p, c, v1, v2 = splitImageG(root, "guido.tif")
p.renderOnCanvas(v2.canvas())
v2.psprint("guido-decrypted.ps")
return v2
if __name__ == "__main__":
mainApp(testBooleanOps)
mainApp(testEncryptDecrypt)
# mainApp(testAllIntermediateValues)
# mainApp(testGrey)
# mainApp(testSplitImage)
# mainApp(testSplitImageG)
| for y in range(maxY):
pixel = bitmaps[0].get(x,y)
for b in bitmaps[1:]:
pixel = apply(operation, (pixel, b.get(x,y)))
result.set(x,y,pixel) | conditional_block |
vck.py | import Tkinter
from PIL import Image
from PIL import ImageDraw
from PIL import ImageTk
#import whrandom
import random
import string
import pickle
import sys
import os
class bitmap:
"""A two-dimensional one-bit-deep bitmap suitable for VCK operations.
The coordinate system has 0,0 at NW and xmax, ymax at SE. The external
representation of the pixels, accessed via get() and set(), has white
paper as 0 and black ink as 1."""
# Protected static constants for the internal representation of the
# pixel colours. The internal representation for pixels is that of
# PIL's "1" format, where 0x00 is black and 0xff is white. No other
# pixel values are allowed. Yeah, this gives a conversion for every
# pixel access: so sue me! I just found it too confusing to have paper
# as 1 and ink as 0, especially when it came to doing the boolean ops
# on the images.
_pixelBlack = 0x00
_pixelWhite = 0xff
# Private members:
# __image = the image
# __draw = the PIL gadget you use to write on the image
def __init__(self, arg1, arg2=None):
"""The allowed forms for the constructor are:
1- vck.bitmap("image.tif")
...i.e. from a file name;
2- vck.bitmap((x,y))
...ie from a 2-tuple with the size; picture will be all white."""
self.__image = None
self.__draw = None
if type(arg1) == type(""):
# form 1
raw = Image.open(arg1)
self.__image = raw.convert("1")
elif type(arg1) == type((1,2)):
if arg2 == None:
# form 2
self.__image = Image.new("1", arg1, bitmap._pixelWhite)
if not self.__image:
raise TypeError, "Give me EITHER a filename OR a " \
"(width, height) pair and an optional string of binary data."
self.__draw = ImageDraw.ImageDraw(self.__image)
def set(self, x, y, colour=1):
"""Set the pixel at x, y to be of colour colour (default 1 = black
ink). Any colour value other than 0 (white paper) is taken to be 1
(black ink)."""
inkCol = None
if colour == 0:
# self.__draw.setink(bitmap._pixelWhite)
inkCol = bitmap._pixelWhite
else:
# self.__draw.setink(bitmap._pixelBlack)
inkCol = bitmap._pixelBlack
self.__draw.point((x, y), fill=inkCol)
def get(self, x, y):
"""Return the value of the pixel at x, y"""
return not self.__image.getpixel((x, y))
def size(self):
"""Return a 2-tuple (width, height) in pixels."""
return self.__image.size
def view(self, root, title="No name"):
"""Display this image in a toplevel window (optionally with the
given title). Precondition: Tk must have been initialised (the
caller must supply Tk's root window). Return the toplevel window,
which the caller must hold on to otherwise it will disappear from
the screen for various PIL/Tkinter/refcount quirks."""
return _bitmapViewer(root, self.__image, title)
def write(self, filename):
"""Write this bitmap to a file with the given filename. File type
is deduced from the extension (exception if it can't be figured
out)."""
self.__image.save(filename)
def pixelcode(self):
"""Return a new bitmap, twice as big linearly, by pixelcoding every
pixel of bmp into a grid of 4 pixels. Pixelcoding means translating
each pixel into a grid of pixels in a clever way which is the core
idea of visual cryptography. Read the poster for more on that."""
maxX, maxY = self.size()
result = bitmap((2*maxX, 2*maxY))
for x in range(maxX):
for y in range(maxY):
pixel = self.get(x,y)
result.set(2*x,2*y, pixel)
result.set(2*x,2*y+1, not pixel)
result.set(2*x+1,2*y, not pixel)
result.set(2*x+1,2*y+1, pixel)
return result
def boolean(operation, bitmaps):
"""Apply the boolean operation 'operation' (a binary function of two
integers returning an integer) to the list of bitmaps in 'bitmaps'
(precondition: the list can't be empty and the bitmaps must all have
the same size) and return the resulting bitmap."""
maxX, maxY = size = bitmaps[0].size()
result = bitmap(size)
for x in range(maxX):
for y in range(maxY):
pixel = bitmaps[0].get(x,y)
for b in bitmaps[1:]:
pixel = apply(operation, (pixel, b.get(x,y)))
result.set(x,y,pixel)
return result
# Doc string for the following three functions:
# Take an arbitrary number (>=1) of bitmap arguments, all of the same size,
# and return another bitmap resulting from their pixel-by-pixel AND, OR or
# XOR as appropriate.
def AND(*args): return boolean(lambda a,b:a&b, args)
def OR(*args): return boolean(lambda a,b:a|b, args)
def XOR(*args): return boolean(lambda a,b:a^b, args)
def NOT(bmp):
"""Take a bitmap and return its negative (obtained by swopping white
and black at each pixel)."""
maxX, maxY = size = bmp.size()
result = bitmap(size)
for x in range(maxX):
for y in range(maxY):
result.set(x,y, not bmp.get(x,y))
return result
def randomBitmap(size):
"""Take a size (2-tuple of x and y) and return a bitmap of that size
filled with random pixels. WARNING! THE CODE HERE IS ONLY FOR
DEMONSTRATION PURPOSES, SINCE IT CALLS THE STANDARD PYTHON RANDOM
NUMBER GENERATOR, which is fine for statistics but not good enough for
crypto. For real use, substitute this with really random data from an
external source, or at least with a properly seeded cryptographically
strong RNG."""
b = bitmap(size)
xmax, ymax = size
for x in xrange(xmax):
for y in xrange(ymax):
b.set(x, y, random.randint(0,1))
return b
class _viewer:
"""A toplevel window with a canvas."""
def __init__(self, root, width, height, title="Unnamed VCK image"):
self.__width = width
self.__height = height
self._t = Tkinter.Toplevel(root)
Tkinter.Wm.title(self._t, title)
self._c = Tkinter.Canvas(self._t, width=width, height=height,
border=0, highlightthickness=0,
background="White")
self._c.pack()
self._t.update()
def psprint(self, filename):
"""Write a postscript representation of the canvas to the specified
file."""
# The portrait A4 page is, in mm, WxH=210x297. Let's have a safety
# margin of 7mm all around it, and the usable area becomes 196x283.
W = 196.0
H = 283.0
x1, y1, x2, y2 = self._c.bbox("all")
options = {
"pageanchor": "sw",
"x": "%fp" % x1,
"y": "%fp" % y1,
"height": "%fp" % (y2-y1),
"width": "%fp" % (x2-x1),
"pagex": "0",
"pagey": "0",
"file": filename,
"colormode": "mono",
}
# ??? I think I'm doing all this viewport math sensibly, BUT I
# still get a weird asymmetric margin around the thing, and I
# haven't got a clue how to get rid of it.
yscale = (y2-y1) / H
xscale = (x2-x1) / W
# The direction with the greater scaling factor is the limiting one
if xscale > yscale:
options["pagewidth"] = "%fm" % W
else:
options["pageheight"] ="%fm" % H
self._c.update()
apply(self._c.postscript, (), options)
def canvas(self):
"""Return the canvas."""
return self._c
def __del__(self):
self._t.destroy()
class _bitmapViewer(_viewer):
"""A viewer for bitmaps."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.BitmapImage(
image, background="Black", foreground="White")
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
def encrypt(rawPlaintext, rawPad = None):
"""Take a plaintext bitmap and, optionally, a supposedly random pad of
the same size (one will be made up on the spot if not supplied). Return
a 2-tuple containing the large pixelcoded versions of ciphertext and
pad."""
# The raw versions are the same size as the original rawPlaintext
if not rawPad:
rawPad = randomBitmap(rawPlaintext.size())
rawCiphertext = XOR(rawPlaintext, rawPad)
# The final versions are linearly twice as big due to pixelcoding
ciphertext = rawCiphertext.pixelcode()
pad = rawPad.pixelcode()
return ciphertext, pad
def decrypt(ciphertext, pad):
"""Actually the decription ought to be performed without a computer
(the whole point of visual cryptography), by just superimposing the
transparencies of the ciphertext and pad. This is a simulation of this
process."""
return OR(ciphertext, pad)
def mainApp(function):
"""Execute the supplied function. The function may create new windows
by calling bitmap.view() or by making instances of viewer, but it must
return a list of any such windows it makes. The point of this wrapper
is merely to shield the caller away from the quirks of initialising
Tkinter, running its main loop and ensuring that windows don't
disappear unexpectedly."""
root = Tkinter.Tk()
quit = Tkinter.Button(root, text="Quit", command=root.quit)
quit.pack()
Tkinter.Wm.title(root, "VCK main")
windows = function(root)
root.update()
root.mainloop()
# --------------------------------------------------------------
# Analog (greyscale) version
class moonfieldViewer(_viewer):
"""A toplevel window with a canvas, suitable for viewing a moonfield."""
R = 9 # default radius
def __init__(self, root, mf, title="Unnamed moonfield", radius=R):
"""Precondition: the moonfield mf must be filled."""
xmax, ymax = mf.size()
_viewer.__init__(self, root, xmax*2*radius, ymax*2*radius, title)
mf.renderOnCanvas(self._c, radius)
self._t.update()
class photoViewer(_viewer):
"""A viewer for greyscale images."""
def __init__(self, root, image, title="Unnamed VCK image"):
width, height = image.size
_viewer.__init__(self, root, width, height, title)
self.__photo = ImageTk.PhotoImage(
image)
self._c.create_image(0, 0, anchor=Tkinter.NW, image=self.__photo)
self._t.update()
class moonfield:
|
def moonfield_undump(filename):
"""Return a moonfield obtained by rebuilding the one that had been
dumped to the given file."""
return pickle.load(open(filename))
# --------------------------------------------------------------
# File-based mode of operation
def makePad(size, expandedPadFile="pad.tif", dumpFile="media/rawpad.pbm"):
"""Generate a random pad. (NB: remember that the RNG used here is only
good for demos since it's not cryptographically strong!) Write out two
files with the supplied names, one with the dump of the pad in raw form
(necessary for encrypting later, to be kept at the agency) and one with
the pad in expanded form, ready for use, to be given to 007. Return the
raw and expanded bitmaps."""
rawPad = randomBitmap(size)
rawPad.write(dumpFile)
expandedPad = rawPad.pixelcode()
expandedPad.write(expandedPadFile)
return rawPad, expandedPad
def makeCryptograph(imageFile, codedFile="coded.tif", dumpFile="media/rawpad.pbm"):
"""Generate a cryptograph. Take a monochrome image (the filename of a
PIL type "1") and a file with a dump of a raw pad (Precondition: image
and raw pad must be of the same size in pixels.) Write out the
cryptograph as an image file. Return the bitmap for the cryptograph."""
print(os.getcwd())
pad = bitmap(dumpFile)
plaintext = bitmap(imageFile)
ciphertext = XOR(pad, plaintext)
expandedCiphertext = ciphertext.pixelcode()
expandedCiphertext.write(codedFile)
return expandedCiphertext
def splitImage(image, shareFile1="share1.tif", shareFile2="share2.tif"):
"""Not for spies, really, just for cute demos. Take a monochrome image
(a PIL type "1" or its filename) and produce two image files that, when
superimposed, will yield the image. Return the bitmaps for the two
shares."""
_, expandedPad = makePad(Image.open(image).size, shareFile1)
expandedCiphertext = makeCryptograph(str(image), shareFile2)
print(expandedPad,expandedCiphertext)
return expandedPad, expandedCiphertext
# And same again for greyscale... Note that here we HAVE to use windows,
# even if we want to run in batch mode, because without drawing the stuff
# on the canvas we can't generate the postscript (actually, seeing how
# messy it is to get the margins to come out right, I'm thinking that I
# perhaps ought to generate the postscript by hand, without any canvas,
# like I used to do in the old, deprecated C++ version of VCK...)
def makePadG(root, size, expandedPadFile="pad.ps", dumpFile="rawpad.mfd"):
"""Generate a random pad. (NB: remember that the RNG used here is only
good for demos since it's not cryptographically strong!) Write out two
files with the supplied names, one with the dump of the pad in raw form
(necessary for encrypting later, to be kept at the agency) and one with
the pad in expanded form, ready for use, to be given to 007. Return a
pair made of the moonfield for the pad and a viewer on it."""
raw = moonfield(size)
raw.randomFill()
raw.dump(dumpFile)
v = raw.view(root)
v.psprint(expandedPadFile)
return raw, v
def makeCryptographG(root, image, codedFile="coded.ps", dumpFile="rawpad.mfd"):
"""Generate a cryptograph. Take an image (either a PIL image of type
"L" or a filename) and a file with a dump of a raw pad moonfield
(Precondition: image and raw pad must be of the same size in pixels.)
Write out the cryptograph as a postscript file of halfmoons. Return a
pair made of the moonfield for the cryptograph and a viewer on it."""
pad = moonfield_undump(dumpFile)
ciphertext = pad.imageComplement(image)
v = ciphertext.view(root)
v.psprint(codedFile)
return ciphertext, v
def splitImageG(root, image, shareFile1="share1.ps", shareFile2="share2.ps"):
"""Not for spies, really, just for cute demos. Take a greyscale image
(either an "L" image object or a filename) and produce two postscript
files of halfmoons that, when superimposed, will yield the
image. Return a quadruple made of the two shares and two viewers
showing them."""
if type(image) == type(""):
image = Image.open(image).convert("L")
p, v1 = makePadG(root, image.size, shareFile1)
c, v2 = makeCryptographG(root, image, shareFile2)
return p, c, v1, v2
# --------------------------------------------------------------
# Self-test
# Activate the test you want (one at a time) by uncommenting it in main().
def testEncryptDecrypt(root):
"""Encrypt a monochrome image and decrypt it, showing the results on
screen (work in memory, don't save to files)."""
plaintext = bitmap("vck.gif")
ciphertext, pad = encrypt(plaintext)
decryptedResult = decrypt(ciphertext, pad)
v1 = plaintext.view(root, "plaintext")
v2 = pad.view(root, "pad (pixelcoded)")
v3 = ciphertext.view(root, "ciphertext (pixelcoded)")
v4 = decryptedResult.view(root, "decrypted result")
return v1, v2, v3, v4
def testAllIntermediateValues(root):
"""Encrypt a monochrome image and decrypt it, but do it all "by hand"
and show all the intermediate results at each step."""
rawPlaintext = bitmap("vck.gif")
v1 = rawPlaintext.view(root, "raw plaintext")
rawPad = randomBitmap(rawPlaintext.size())
v2 = rawPad.view(root, "raw pad")
rawCiphertext = XOR(rawPlaintext, rawPad)
v3 = rawCiphertext.view(root, "raw ciphertext")
pad = rawPad.pixelcode()
v4 = pad.view(root, "pixelcoded pad")
ciphertext = rawCiphertext.pixelcode()
v5 = ciphertext.view(root, "pixelcoded ciphertext")
decryptedResult = OR(ciphertext, pad)
v6 = decryptedResult.view(root, "decrypted result")
return v1, v2, v3, v4, v5, v6
def testBooleanOps(root):
"""Demonstrate the boolean operations available in VCK by combining an
image (vck.tif must be in the current directory) with a diagonal
cross."""
letters = bitmap("vck.tif")
v1 = letters.view(root, "vck")
cross = bitmap(letters.size())
xmax, ymax = cross.size()
r = ymax*1.0/xmax
for x in range(xmax):
cross.set(x, x*r)
cross.set(x, x*r+1)
cross.set(x, x*r-1)
cross.set(x, ymax-x*r)
cross.set(x, ymax-x*r+1)
cross.set(x, ymax-x*r-1)
v2 = cross.view(root, "cross")
xorResult = XOR(letters, cross)
v3 = xorResult.view(root, "vck XOR cross")
orResult = OR(letters, cross)
v4 = orResult.view(root, "vck OR cross")
andResult = AND(letters, cross)
v5 = andResult.view(root, "vck AND cross")
notResult = NOT(letters)
v6 = notResult.view(root, "NOT vck")
return v1, v2, v3, v4, v5, v6
def testGrey(root):
"""Look at how the pie slices appear for a test card with all the
possible grey tones."""
# Make a greyscale test card: a 16x16 square going from black to white
t = open("testcard.pgm", "wb")
t.write("P5\n16 16\n255\n")
for i in range(256):
t.write(chr(i))
t.close()
plaintext = Image.open("testcard.pgm")
plaintext.convert("L")
mx,my = plaintext.size
pad = moonfield(size=(mx,my))
pad.randomFill()
v1 = pad.view(root, "random junk")
ciphertext = pad.imageComplement(plaintext)
v2 = ciphertext.view(root, "ciphertext")
v3 = ciphertext.view(root, "decrypted ciphertext")
pad.renderOnCanvas(v3.canvas())
return v1, v2, v3
def testSplitImage(root):
"""Split a monochrome image into two shares and write these to two
files that can be viewed externally."""
s1, s2 = splitImage("vck.tif")
v = OR(s1, s2).view(root)
return v
def testSplitImageG(root):
"""Split a greyscale image into two shares (postscript files)."""
p, c, v1, v2 = splitImageG(root, "guido.tif")
p.renderOnCanvas(v2.canvas())
v2.psprint("guido-decrypted.ps")
return v2
if __name__ == "__main__":
mainApp(testBooleanOps)
mainApp(testEncryptDecrypt)
# mainApp(testAllIntermediateValues)
# mainApp(testGrey)
# mainApp(testSplitImage)
# mainApp(testSplitImageG)
| """A 2d array of angles. Items in the array are indexed by integers in
0..xmax, 0..ymax, with 0,0 being the NW corner. Each angle specifies
the phase (rotation) of a black halfmoon around its centre (determined
by its place in the array) and is represented by an integer in the
range 0..509"""
# Why that strange range? Well, since we are going to use two rotated
# halfmoons to display a luminosity, and since the luminosity of the
# gap between the two halfmoons ranges from 255 (white) when they're 0
# radians apart (i.e. superimposed, leaving a half-moon of white) to 0
# (black) when they're pi radians apart (i.e. non-overlapping, covering
# the whole disc with black), this means that there are 255 discrete
# steps in pi (not 256, because the 256th step is already "the first of
# the other half"), and 2*255 in 2*pi. So the integers in a moonfield
# range from 0 to 2*255-1 = 509. And we use arithmetic modulo 510 on
# them.
discretePi = 255
mod = discretePi*2
i2d = 360.0 / mod # integer to degree conversion factor
def __init__(self, size, filler=None):
"""Make a moonfield of the specified size. If a filler function is
specified, fill it with it, otherwise leave the data
uninitialised."""
self.__data = {}
self.__xmax, self.__ymax = size
if filler:
self.fill(filler)
def size(self):
"""Return a 2-tuple with the dimensions of the moonfield."""
return self.__xmax, self.__ymax
def fill(self, filler):
"""Take a function f(x,y) that accepts a position in the moonfield
and returns an integer value. Fill every cell in the moonfield with
the value returned by the filler (taken modulo mod)."""
for x in range(self.__xmax):
for y in range(self.__ymax):
self.__data[(x,y)] = filler(x,y) % self.mod
def randomFill(self, low=0, high=mod-1):
"""Fill the moonfield with random values in the range min..max
inclusive. WARNING: NOT GOOD FOR REAL CRYPTO USE. Use a
cryptographically strong RNG instead of the library's unless you're
just playing around."""
def randomFiller(x,y, low=low, high=high):
return random.randint(low, high)
self.fill(randomFiller)
def imageComplement(self, img):
"""Precondition: self must have been filled already. Take a
greyscale image (PIL type "L"), which must have the same size as
self. Return a new moonfield such that, if that new moonfield and
the current one were superimposed, one would "see" the supplied
image. NB: if the supplied image parameter is a string, an attempt
is made to open the file of that name."""
if type(img) == type(""):
img = Image.open(img).convert("L")
assert self.size() == img.size
result = moonfield(size=(self.__xmax, self.__ymax))
def filler(x,y,i=img, d=self.__data, pi=self.discretePi, m=self.mod):
return (d[(x,y)] - (pi - i.getpixel((x,y)))) % m
result.fill(filler)
return result
def renderOnCanvas(self, canvas, radius=moonfieldViewer.R):
"""Take a canvas and render the moonfield on it. The radius of the
halfmoons must be specified in canvas units."""
for x in range(self.__xmax):
for y in range(self.__ymax):
# Make the halfmoon at x,y
canvas.create_arc(
radius*2*x, radius*2*y, radius*2*(x+1)-1, radius*2*(y+1)-1,
start = self.__data[(x,y)] * self.i2d, extent = 180.0,
fill="Black")
def view(self, root, title="No name", radius=moonfieldViewer.R):
"""Display this image in a toplevel window (optionally with the
given title). Preconditions: the moonfield must be filled; Tk must
have been initialised (the caller must supply Tk's root
window). Return the toplevel window, which the caller must hold on
to otherwise it will disappear from the screen for various
PIL/Tkinter/refcount quirks."""
return moonfieldViewer(root, self, title, radius)
def __repr__(self):
if self.__data == {}:
return "<uninitialised>"
result = ""
for y in range(self.__ymax):
for x in range(self.__xmax):
result = result + "%3d " % self.__data[(x,y)]
result = result + "\n"
return result
def dump(self, filename):
"""Dump yourself to a file in the internal .mfd format (another
moonfield object can later be made from such a file)."""
pickle.dump(self, open(filename, "w")) | identifier_body |
lib.rs | /*!
This crate implements various macros detailed in [The Little Book of Rust Macros](https://danielkeep.github.io/tlborm/).
If you use selective macro importing, you should make sure to *always* use the `tlborm_util` macro, as most macros in this crate depend on it being present.
*/
/**
Forces the parser to interpret this macro's argument as an expression, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use] extern crate tlborm;
# fn main() {
assert_eq!(as_expr!(42), 42);
macro_rules! conceal_as_tts {
// The `tt` substitution will break regular parsing.
(passthru, $($tts:tt)*) => {$($tts)*};
($callback:ident, $($tts:tt)*) => {$callback!($($tts)*)};
}
assert_eq!(conceal_as_tts!(as_expr, 2 * (3 + 4)), 14);
# }
```
The following will *not* compile:
<!-- NO-FAILING-TESTS -->
```ignore
# #[macro_use(as_expr, tlborm_util)] extern crate tlborm;
# fn main() {
# macro_rules! conceal_as_tts {
# (passthru, $($tts:tt)*) => {$($tts)*};
# ($callback:ident, $($tts:tt)*) => {$callback!($($tts)*)};
# }
assert_eq!(conceal_as_tts!(passthru, 2 * (3 + 4)), 14);
# }
```
*/
#[macro_export]
macro_rules! as_expr { ($e:expr) => {$e} }
/**
Forces the parser to interpret this macro's argument as an item, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use(as_item, tlborm_util)] extern crate tlborm;
macro_rules! enoom {
($name:ident { $($body:tt)* }) => {
as_item! {
// The `tt` substitution breaks regular parsing.
enum $name { $($body)* }
}
}
}
enoom! {
Dash { Solid, Dash, Dot }
}
# fn main() {}
```
*/
#[macro_export]
macro_rules! as_item { ($i:item) => {$i} }
/**
Forces the parser to interpret this macro's argument as a pattern, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use(as_pat, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! tuple_pat {
($($names:tt)*) => {
// The `tt` substitution breaks regular parsing.
as_pat!( ( $($names,)* ) )
}
}
match (1, 2, 3) {
tuple_pat!(a b c) => assert_eq!((a, b, c), (1, 2, 3))
}
# }
```
*/
#[macro_export]
macro_rules! as_pat { ($p:pat) => {$p} }
/**
Forces the parser to interpret this macro's argument as a statement, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use(as_stmt, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! let_stmt {
($name:tt = $($init:tt)*) => {
// The `tt` substitution breaks regular parsing.
as_stmt!(let $name = $($init)*);
}
}
let_stmt!(x = 42);
assert_eq!(x, 42);
# }
```
*/
#[macro_export]
macro_rules! as_stmt { ($s:stmt) => {$s} }
/**
Expands to the number of identifiers provided. The expansion is suitable for use in a constant expression, and is of type `u32`.
The identifiers provided **must** be mutually unique; *i.e.* there cannot be any repeated identifiers. In addition, the identifier `__CountIdentsLast` **must not** be used in the invocation. This macro should be usable for even very large numbers of identifiers.
See [TLBoRM: Counting (Enum counting)](https://danielkeep.github.io/tlborm/book/blk-counting.html#enum-counting).
## Examples
```rust
# #[macro_use(count_idents_enum, tlborm_util)] extern crate tlborm;
# fn main() {
const NUM: u32 = count_idents_enum!(Silly swingers get your feeling under spell);
assert_eq!(NUM, 7);
# }
*/
#[macro_export]
macro_rules! count_idents_enum {
($($idents:ident)*) => {tlborm_util!(@count_idents_enum $($idents)*)};
}
/**
Expands to the number of token trees provided. The expansion is suitable for use in a constant expression, and is of type `usize`.
This macro is limited to input of approximately 500 tokens, but efficiently expands in a single pass. This makes it useful in recursion-limited contexts, or when you want fast expansion of small inputs.
See [TLBoRM: Counting (Repetition with replacement)](https://danielkeep.github.io/tlborm/book/blk-counting.html#repetition-with-replacement).
## Examples
```rust
# #[macro_use(count_tts_flat, tlborm_util)] extern crate tlborm;
# fn main() {
const NUM: usize = count_tts_flat!(Everybody's rhythm mad (and I love that rhythm too!));
assert_eq!(NUM, 5);
# }
*/
#[macro_export]
macro_rules! count_tts_flat {
($($tts:tt)*) => {tlborm_util!(@count_tts_flat $($tts)*)};
}
/**
Expands to the number of token trees provided. The expansion is suitable for use in a constant expression, and is of type `usize`.
This macro is limited to input of approximately 1,200 tokens, but requires multiple recursive expansion passes. This macro is useful when you need to count a large number of things *and* you need the result to be a compile-time constant.
See [TLBoRM: Counting (Recursion)](https://danielkeep.github.io/tlborm/book/blk-counting.html#recursion).
## Examples
```rust
# #[macro_use(count_tts_recur, tlborm_util)] extern crate tlborm;
# fn main() {
const NUM: usize = count_tts_recur!(De l'enfer au paradis!);
assert_eq!(NUM, 6);
# }
*/
#[macro_export]
macro_rules! count_tts_recur {
($($tts:tt)*) => {tlborm_util!(@count_tts_recur $($tts)*)};
}
/**
Expands to the number of token trees provided. The expansion is **not** suitable for use in a constant expression, though it should be optimised to a simple integer constant in release builds.
This macro is has no practical limit (and has been tested to over 10,000 tokens).
See [TLBoRM: Counting (Slice length)](https://danielkeep.github.io/tlborm/book/blk-counting.html#slice-length).
## Examples
```rust
# #[macro_use(count_tts_slice, tlborm_util)] extern crate tlborm;
# fn main() {
let num = count_tts_slice!(You have no idea how tedious this is! #examplesrhard);
assert_eq!(num, 11);
# }
*/
#[macro_export]
macro_rules! count_tts_slice {
($($tts:tt)*) => {tlborm_util!(@count_tts_slice $($tts)*)};
}
/**
Expands to an invocation of the `$callback` macro, with a list of the unitary variant names of the provided enum separated by commas. The invocation's argument will be prefixed by the contents of `$arg`.
If `$arg` is of the form `{…}`, then the expansion will be parsed as one or more items. If it is of the form `(…)`, the expansion will be parsed as an expression.
See [TLBoRM: Enum Parsing](https://danielkeep.github.io/tlborm/book/blk-enum-parsing.html).
## Examples
```rust
# #[macro_use(parse_unitary_variants, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! variant_list {
(sep: $sep:tt, ($($var:ident),*)) => {
concat!($(stringify!($var), $sep,)*)
}
}
const LIST: &'static str = parse_unitary_variants!(
enum Currency { Trenni, Phiring, Ryut, FakeMarinne, Faram, SoManyCoins }
=> variant_list(sep: ", ", )
);
assert_eq!(LIST, "Trenni, Phiring, Ryut, FakeMarinne, Faram, SoManyCoins, ");
# }
*/
#[macro_export]
macro_rules! parse_unitary_variants {
(
enum $name:ident {$($body:tt)*} => $callback:ident $arg:tt
) => {
tlborm_util! {
@parse_unitary_variants
enum $name {$($body)*} => $callback $arg
}
};
}
|
This is typically used to replace elements of an arbitrary token sequence with some fixed expression.
See [TLBoRM: Repetition replacement](https://danielkeep.github.io/tlborm/book/pat-repetition-replacement.html).
## Examples
```rust
# #[macro_use(replace_expr, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! tts_to_zeroes {
($($tts:tt)*) => {
[$(replace_expr!($tts 0)),*]
}
}
assert_eq!(tts_to_zeroes!(pub const unsafe impl), [0, 0, 0, 0]);
# }
```
*/
#[macro_export]
macro_rules! replace_expr {
($_t:tt $sub:expr) => {tlborm_util!(@replace_expr $_t $sub)};
}
#[doc(hidden)]
#[macro_export]
macro_rules! tlborm_util {
(@as_expr $e:expr) => {$e};
(@as_item $($i:item)+) => {$($i)+};
(@as_pat $p:pat) => {$p};
(@as_stmt $s:stmt) => {$s};
(@count_idents_enum $($idents:ident)*) => {
{
#[allow(dead_code, non_camel_case_types)]
enum Idents { $($idents,)* __CountIdentsLast }
const COUNT: u32 = Idents::__CountIdentsLast as u32;
COUNT
}
};
(@count_tts_flat $($tts:tt)*) => {0usize $(+ tlborm_util!(@replace_expr $tts 1usize))*};
(@count_tts_recur
$_a:tt $_b:tt $_c:tt $_d:tt $_e:tt
$_f:tt $_g:tt $_h:tt $_i:tt $_j:tt
$_k:tt $_l:tt $_m:tt $_n:tt $_o:tt
$_p:tt $_q:tt $_r:tt $_s:tt $_t:tt
$($tail:tt)*)
=> {20usize + tlborm_util!(@count_tts_recur $($tail)*)};
(@count_tts_recur
$_a:tt $_b:tt $_c:tt $_d:tt $_e:tt
$_f:tt $_g:tt $_h:tt $_i:tt $_j:tt
$($tail:tt)*)
=> {10usize + tlborm_util!(@count_tts_recur $($tail)*)};
(@count_tts_recur
$_a:tt $_b:tt $_c:tt $_d:tt $_e:tt
$($tail:tt)*)
=> {5usize + tlborm_util!(@count_tts_recur $($tail)*)};
(@count_tts_recur
$_a:tt
$($tail:tt)*)
=> {1usize + tlborm_util!(@count_tts_recur $($tail)*)};
(@count_tts_recur) => {0usize};
(@count_tts_slice $($tts:tt)*)
=> {<[()]>::len(&[$(tlborm_util!(@replace_expr $tts ())),*])};
(@replace_expr $_t:tt $sub:expr) => {$sub};
// ========================================================================
// @parse_unitary_variants
(
@parse_unitary_variants
enum $name:ident {$($body:tt)*} => $callback:ident $arg:tt
) => {
tlborm_util! {
@collect_unitary_variants
($callback $arg), ($($body)*,) -> ()
}
};
// ========================================================================
// @collect_unitary_variants
// Exit rules.
(
@collect_unitary_variants ($callback:ident ( $($args:tt)* )),
($(,)*) -> ($($var_names:ident,)*)
) => {
tlborm_util! {
@as_expr
$callback!{ $($args)* ($($var_names),*) }
}
};
(
@collect_unitary_variants ($callback:ident { $($args:tt)* }),
($(,)*) -> ($($var_names:ident,)*)
) => {
tlborm_util! {
@as_item
$callback!{ $($args)* ($($var_names),*) }
}
};
// Consume an attribute.
(
@collect_unitary_variants $fixed:tt,
(#[$_attr:meta] $($tail:tt)*) -> ($($var_names:tt)*)
) => {
tlborm_util! {
@collect_unitary_variants $fixed,
($($tail)*) -> ($($var_names)*)
}
};
// Handle a variant, optionally with an with initialiser.
(
@collect_unitary_variants $fixed:tt,
($var:ident $(= $_val:expr)*, $($tail:tt)*) -> ($($var_names:tt)*)
) => {
tlborm_util! {
@collect_unitary_variants $fixed,
($($tail)*) -> ($($var_names)* $var,)
}
};
// Abort on variant with a payload.
(
@collect_unitary_variants $fixed:tt,
($var:ident $_struct:tt, $($tail:tt)*) -> ($($var_names:tt)*)
) => {
const _error: () = "cannot parse unitary variants from enum with non-unitary variants";
};
} | /**
Utility macro that takes a token tree and an expression, expanding to the expression. | random_line_split |
athena_cli.py | import argparse
import atexit
import csv
import json
import os
import readline
import subprocess
import sys
import time
import uuid
import boto3
import botocore
import cmd2 as cmd
from botocore.exceptions import ClientError, ParamValidationError
from tabulate import tabulate
LESS = "less -FXRSn"
HISTORY_FILE_SIZE = 500
__version__ = '0.1.8'
class AthenaBatch(object):
def __init__(self, athena, db=None, format='CSV'):
self.athena = athena
self.dbname = db
self.format = format
def execute(self, statement):
execution_id = self.athena.start_query_execution(self.dbname, statement)
if not execution_id:
return
while True:
stats = self.athena.get_query_execution(execution_id)
status = stats['QueryExecution']['Status']['State']
if status in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
break
time.sleep(0.2) # 200ms
if status == 'SUCCEEDED':
results = self.athena.get_query_results(execution_id)
headers = [h['Name'].encode("utf-8") for h in results['ResultSet']['ResultSetMetadata']['ColumnInfo']]
if self.format in ['CSV', 'CSV_HEADER']:
csv_writer = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
if self.format == 'CSV_HEADER':
csv_writer.writerow(headers)
csv_writer.writerows([[text.encode("utf-8") for text in row] for row in self.athena.yield_rows(results, headers)])
elif self.format == 'TSV':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], tablefmt='tsv'))
elif self.format == 'TSV_HEADER':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='tsv'))
elif self.format == 'VERTICAL':
for num, row in enumerate(self.athena.yield_rows(results, headers)):
print('--[RECORD {}]--'.format(num+1))
print(tabulate(zip(*[headers, row]), tablefmt='presto'))
else: # ALIGNED
print(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto'))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
try:
del cmd.Cmd.do_show # "show" is an Athena command
except AttributeError:
# "show" was removed from Cmd2 0.8.0
pass
class AthenaShell(cmd.Cmd, object):
multilineCommands = ['WITH', 'SELECT', 'ALTER', 'CREATE', 'DESCRIBE', 'DROP', 'MSCK', 'SHOW', 'USE', 'VALUES']
allow_cli_args = False
def __init__(self, athena, db=None):
cmd.Cmd.__init__(self)
self.athena = athena
self.dbname = db
self.execution_id = None
self.row_count = 0
self.set_prompt()
self.pager = os.environ.get('ATHENA_CLI_PAGER', LESS).split(' ')
self.hist_file = os.path.join(os.path.expanduser("~"), ".athena_history")
self.init_history()
def set_prompt(self):
self.prompt = 'athena:%s> ' % self.dbname if self.dbname else 'athena> '
def cmdloop_with_cancel(self, intro=None):
try:
self.cmdloop(intro)
except KeyboardInterrupt:
if self.execution_id:
self.athena.stop_query_execution(self.execution_id)
print('\n\n%s' % self.athena.console_link(self.execution_id))
print('\nQuery aborted by user')
else:
print('\r')
self.cmdloop_with_cancel(intro)
def preloop(self):
if os.path.exists(self.hist_file):
readline.read_history_file(self.hist_file)
def postloop(self):
self.save_history()
def init_history(self):
try:
readline.read_history_file(self.hist_file)
readline.set_history_length(HISTORY_FILE_SIZE)
readline.write_history_file(self.hist_file)
except IOError:
readline.write_history_file(self.hist_file)
atexit.register(self.save_history)
def save_history(self):
try:
readline.write_history_file(self.hist_file)
except IOError:
pass
def do_help(self, arg):
help_output = """
Supported commands:
QUIT
SELECT
ALTER DATABASE <schema>
ALTER TABLE <table>
CREATE DATABASE <schema>
CREATE TABLE <table>
DESCRIBE <table>
DROP DATABASE <schema>
DROP TABLE <table>
MSCK REPAIR TABLE <table>
SHOW COLUMNS FROM <table>
SHOW CREATE TABLE <table>
SHOW DATABASES [LIKE <pattern>]
SHOW PARTITIONS <table>
SHOW TABLES [IN <schema>] [<pattern>]
SHOW TBLPROPERTIES <table>
USE [<catalog>.]<schema>
VALUES row [, ...]
See http://docs.aws.amazon.com/athena/latest/ug/language-reference.html
"""
print(help_output)
def do_quit(self, arg):
print()
return -1
def do_EOF(self, arg):
return self.do_quit(arg)
def do_use(self, schema):
self.dbname = schema.rstrip(';')
self.set_prompt()
def do_set(self, arg):
try:
statement, param_name, val = arg.parsed.raw.split(None, 2)
val = val.strip()
param_name = param_name.strip().lower()
if param_name == 'debug':
self.athena.debug = cmd.cast(True, val)
except (ValueError, AttributeError):
self.do_show(arg)
super(AthenaShell, self).do_set(arg)
def default(self, line):
self.execution_id = self.athena.start_query_execution(self.dbname, line.full_parsed_statement())
if not self.execution_id:
return
while True:
stats = self.athena.get_query_execution(self.execution_id)
status = stats['QueryExecution']['Status']['State']
status_line = 'Query {0}, {1:9}'.format(self.execution_id, status)
sys.stdout.write('\r' + status_line)
sys.stdout.flush()
if status in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
break
time.sleep(0.2) # 200ms
sys.stdout.write('\r' + ' ' * len(status_line) + '\r') # delete query status line
sys.stdout.flush()
if status == 'SUCCEEDED':
results = self.athena.get_query_results(self.execution_id)
headers = [h['Name'] for h in results['ResultSet']['ResultSetMetadata']['ColumnInfo']]
row_count = len(results['ResultSet']['Rows'])
if headers and len(results['ResultSet']['Rows']) and results['ResultSet']['Rows'][0]['Data'][0].get('VarCharValue', None) == headers[0]:
row_count -= 1 # don't count header
process = subprocess.Popen(self.pager, stdin=subprocess.PIPE)
process.stdin.write(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto').encode('utf-8'))
process.communicate()
print('(%s rows)\n' % row_count)
print('Query {0}, {1}'.format(self.execution_id, status))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
print(self.athena.console_link(self.execution_id))
submission_date = stats['QueryExecution']['Status']['SubmissionDateTime']
completion_date = stats['QueryExecution']['Status']['CompletionDateTime']
execution_time = stats['QueryExecution']['Statistics']['EngineExecutionTimeInMillis']
data_scanned = stats['QueryExecution']['Statistics']['DataScannedInBytes']
query_cost = data_scanned / 1000000000000.0 * 5.0
print('Time: {}, CPU Time: {}ms total, Data Scanned: {}, Cost: ${:,.2f}\n'.format(
str(completion_date - submission_date).split('.')[0],
execution_time,
human_readable(data_scanned),
query_cost
))
class Athena(object):
def __init__(self, profile, region=None, bucket=None, debug=False, encryption=False):
self.session = boto3.Session(profile_name=profile, region_name=region)
self.athena = self.session.client('athena')
self.region = region or os.environ.get('AWS_DEFAULT_REGION', None) or self.session.region_name
self.bucket = bucket or self.default_bucket
self.debug = debug
self.encryption = encryption
@property
def default_bucket(self):
account_id = self.session.client('sts').get_caller_identity().get('Account')
return 's3://{}-query-results-{}-{}'.format(self.session.profile_name or 'aws-athena', account_id, self.region)
def start_query_execution(self, db, query):
try: | }
if self.encryption:
result_configuration['EncryptionConfiguration'] = {
'EncryptionOption': 'SSE_S3'
}
return self.athena.start_query_execution(
QueryString=query,
ClientRequestToken=str(uuid.uuid4()),
QueryExecutionContext={
'Database': db
},
ResultConfiguration=result_configuration
)['QueryExecutionId']
except (ClientError, ParamValidationError, ValueError) as e:
print(e)
return
def get_query_execution(self, execution_id):
try:
return self.athena.get_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
print(e)
def get_query_results(self, execution_id):
try:
results = None
paginator = self.athena.get_paginator('get_query_results')
page_iterator = paginator.paginate(
QueryExecutionId=execution_id
)
for page in page_iterator:
if results is None:
results = page
else:
results['ResultSet']['Rows'].extend(page['ResultSet']['Rows'])
except ClientError as e:
sys.exit(e)
if self.debug:
print(json.dumps(results, indent=2))
return results
def stop_query_execution(self, execution_id):
try:
return self.athena.stop_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
sys.exit(e)
@staticmethod
def yield_rows(results, headers):
for row in results['ResultSet']['Rows']:
# https://forums.aws.amazon.com/thread.jspa?threadID=256505
if headers and row['Data'][0].get('VarCharValue', None) == headers[0]:
continue # skip header
yield [d.get('VarCharValue', 'NULL') for d in row['Data']]
def console_link(self, execution_id):
return 'https://{0}.console.aws.amazon.com/athena/home?force®ion={0}#query/history/{1}'.format(self.region, execution_id)
def human_readable(size, precision=2):
suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 #increment the index of the suffix
size = size/1024.0 #apply the division
return "%.*f%s"%(precision, size, suffixes[suffixIndex])
def main():
parser = argparse.ArgumentParser(
prog='athena',
usage='athena [--debug] [--execute <statement>] [--output-format <format>] [--schema <schema>]'
' [--profile <profile>] [--region <region>] [--s3-bucket <bucket>] [--server-side-encryption] [--version]',
description='Athena interactive console'
)
parser.add_argument(
'--debug',
action='store_true',
help='enable debug mode'
)
parser.add_argument(
'--execute',
metavar='STATEMENT',
help='execute statement in batch mode'
)
parser.add_argument(
'--output-format',
dest='format',
help='output format for batch mode [ALIGNED, VERTICAL, CSV, TSV, CSV_HEADER, TSV_HEADER, NULL]'
)
parser.add_argument(
'--schema',
'--database',
'--db',
help='default schema'
)
parser.add_argument(
'--profile',
help='AWS profile'
)
parser.add_argument(
'--region',
help='AWS region'
)
parser.add_argument(
'--s3-bucket',
'--bucket',
dest='bucket',
help='AWS S3 bucket for query results'
)
parser.add_argument(
'--server-side-encryption',
'--encryption',
dest='encryption',
action='store_true',
help='Use server-side-encryption for query results'
)
parser.add_argument(
'--version',
action='store_true',
help='show version info and exit'
)
args = parser.parse_args()
if args.debug:
boto3.set_stream_logger(name='botocore')
if args.version:
print('Athena CLI %s' % __version__)
sys.exit()
profile = args.profile or os.environ.get('AWS_DEFAULT_PROFILE', None) or os.environ.get('AWS_PROFILE', None)
try:
athena = Athena(profile, region=args.region, bucket=args.bucket, debug=args.debug, encryption=args.encryption)
except botocore.exceptions.ClientError as e:
sys.exit(e)
if args.execute:
batch = AthenaBatch(athena, db=args.schema, format=args.format)
batch.execute(statement=args.execute)
else:
shell = AthenaShell(athena, db=args.schema)
shell.cmdloop_with_cancel()
if __name__ == '__main__':
main() | if not db:
raise ValueError('Schema must be specified when session schema is not set')
result_configuration = {
'OutputLocation': self.bucket, | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.